diff --git a/example/codeql-db/baseline-info.json b/example/codeql-db/baseline-info.json new file mode 100644 index 0000000000000000000000000000000000000000..585d815a215e169f5d1f0b490586307130b184b9 --- /dev/null +++ b/example/codeql-db/baseline-info.json @@ -0,0 +1 @@ +{"languages":{"python":{"files":["main.py"],"linesOfCode":2}}} \ No newline at end of file diff --git a/example/codeql-db/codeql-database.yml b/example/codeql-db/codeql-database.yml new file mode 100644 index 0000000000000000000000000000000000000000..deecbdfccce1166fc4f2b03a1ba7799106cbbf97 --- /dev/null +++ b/example/codeql-db/codeql-database.yml @@ -0,0 +1,10 @@ +--- +sourceLocationPrefix: /Users/pwntester/src/github.com/github/codeql-jupyter-kernel/example/src +baselineLinesOfCode: 2 +unicodeNewlines: false +columnKind: utf32 +primaryLanguage: python +creationMetadata: + cliVersion: 2.13.4 + creationTime: 2023-07-19T09:05:02.644931Z +finalised: true diff --git a/example/codeql-db/db-python/default/containerparent.rel b/example/codeql-db/db-python/default/containerparent.rel new file mode 100644 index 0000000000000000000000000000000000000000..1ae2f32d66d7f07460f55f4f1591009775c70a90 Binary files /dev/null and b/example/codeql-db/db-python/default/containerparent.rel differ diff --git a/example/codeql-db/db-python/default/containerparent.rel.checksum b/example/codeql-db/db-python/default/containerparent.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..e1911ccd0823ab9c856c088935ff22948087150f Binary files /dev/null and b/example/codeql-db/db-python/default/containerparent.rel.checksum differ diff --git a/example/codeql-db/db-python/default/ext_argreturn.rel b/example/codeql-db/db-python/default/ext_argreturn.rel new file mode 100644 index 0000000000000000000000000000000000000000..0e87ba60e7d24d78ac8f4197b3423837a3dcddf7 Binary files /dev/null and b/example/codeql-db/db-python/default/ext_argreturn.rel differ diff --git a/example/codeql-db/db-python/default/ext_argreturn.rel.checksum b/example/codeql-db/db-python/default/ext_argreturn.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..4fe1babab0edcab9100a7e87de1330b8f575473a Binary files /dev/null and b/example/codeql-db/db-python/default/ext_argreturn.rel.checksum differ diff --git a/example/codeql-db/db-python/default/ext_argtype.rel b/example/codeql-db/db-python/default/ext_argtype.rel new file mode 100644 index 0000000000000000000000000000000000000000..edf0753453f5f355a66f7a5eabfc1713fe29395d Binary files /dev/null and b/example/codeql-db/db-python/default/ext_argtype.rel differ diff --git a/example/codeql-db/db-python/default/ext_argtype.rel.checksum b/example/codeql-db/db-python/default/ext_argtype.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..f25fc6c342b6db701ea4c2d55b36396e05a92429 Binary files /dev/null and b/example/codeql-db/db-python/default/ext_argtype.rel.checksum differ diff --git a/example/codeql-db/db-python/default/ext_proptype.rel b/example/codeql-db/db-python/default/ext_proptype.rel new file mode 100644 index 0000000000000000000000000000000000000000..29d2774373acbf93271da7929d4714241774e675 Binary files /dev/null and b/example/codeql-db/db-python/default/ext_proptype.rel differ diff --git a/example/codeql-db/db-python/default/ext_proptype.rel.checksum b/example/codeql-db/db-python/default/ext_proptype.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..61b9472f0fce23d82e686f798d3067495253bf78 Binary files /dev/null and b/example/codeql-db/db-python/default/ext_proptype.rel.checksum differ diff --git a/example/codeql-db/db-python/default/ext_rettype.rel b/example/codeql-db/db-python/default/ext_rettype.rel new file mode 100644 index 0000000000000000000000000000000000000000..be070927a6026b1348d3c4933a97fb2838b3b4e0 Binary files /dev/null and b/example/codeql-db/db-python/default/ext_rettype.rel differ diff --git a/example/codeql-db/db-python/default/ext_rettype.rel.checksum b/example/codeql-db/db-python/default/ext_rettype.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..450943b9b20854d40b474b1346a4052b85a5aa95 Binary files /dev/null and b/example/codeql-db/db-python/default/ext_rettype.rel.checksum differ diff --git a/example/codeql-db/db-python/default/files.rel b/example/codeql-db/db-python/default/files.rel new file mode 100644 index 0000000000000000000000000000000000000000..8056e2bb979b6b2738c23773cd4217cc36b3ed1e Binary files /dev/null and b/example/codeql-db/db-python/default/files.rel differ diff --git a/example/codeql-db/db-python/default/files.rel.checksum b/example/codeql-db/db-python/default/files.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..66637fb516173afbe7b9259eb0fb6c7e491bea37 Binary files /dev/null and b/example/codeql-db/db-python/default/files.rel.checksum differ diff --git a/example/codeql-db/db-python/default/folders.rel b/example/codeql-db/db-python/default/folders.rel new file mode 100644 index 0000000000000000000000000000000000000000..3ce9b664b1d8a47abd7202fbaaf11e33cd734278 Binary files /dev/null and b/example/codeql-db/db-python/default/folders.rel differ diff --git a/example/codeql-db/db-python/default/folders.rel.checksum b/example/codeql-db/db-python/default/folders.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..6a36248334c53d23749df4e71281b5320f6206fa Binary files /dev/null and b/example/codeql-db/db-python/default/folders.rel.checksum differ diff --git a/example/codeql-db/db-python/default/locations_ast.rel b/example/codeql-db/db-python/default/locations_ast.rel new file mode 100644 index 0000000000000000000000000000000000000000..f43a16c30b6abb8c6b8a138b22faf96e3ea692a8 --- /dev/null +++ b/example/codeql-db/db-python/default/locations_ast.rel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a5aea4ba6c3d3bf3a515cf9ce5a84f552d3ee22eabf4e8615158f89000498af +size 10993056 diff --git a/example/codeql-db/db-python/default/locations_ast.rel.checksum b/example/codeql-db/db-python/default/locations_ast.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..f35c4d8f4baadb155ea374d5a86d2cf25e5704d0 Binary files /dev/null and b/example/codeql-db/db-python/default/locations_ast.rel.checksum differ diff --git a/example/codeql-db/db-python/default/numlines.rel b/example/codeql-db/db-python/default/numlines.rel new file mode 100644 index 0000000000000000000000000000000000000000..72301bbd3027b8a08a7f4cea27576853adf5eee2 Binary files /dev/null and b/example/codeql-db/db-python/default/numlines.rel differ diff --git a/example/codeql-db/db-python/default/numlines.rel.checksum b/example/codeql-db/db-python/default/numlines.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..07c59f65f043829608df40a6792cc06228fe2c37 Binary files /dev/null and b/example/codeql-db/db-python/default/numlines.rel.checksum differ diff --git a/example/codeql-db/db-python/default/pools/0/buckets/info b/example/codeql-db/db-python/default/pools/0/buckets/info new file mode 100644 index 0000000000000000000000000000000000000000..98f04a3c64f24017af654b569cd9ab2a9cb45ce8 Binary files /dev/null and b/example/codeql-db/db-python/default/pools/0/buckets/info differ diff --git a/example/codeql-db/db-python/default/pools/0/buckets/page-000000 b/example/codeql-db/db-python/default/pools/0/buckets/page-000000 new file mode 100644 index 0000000000000000000000000000000000000000..8c3ef5891fed2f46df1eac7faf759f4c60695fbe --- /dev/null +++ b/example/codeql-db/db-python/default/pools/0/buckets/page-000000 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d6bf79bb01d6fa0b19225b0a046dc69c70cba076f702dcf26f4413252f07cb7 +size 1048576 diff --git a/example/codeql-db/db-python/default/pools/0/info b/example/codeql-db/db-python/default/pools/0/info new file mode 100644 index 0000000000000000000000000000000000000000..51be0b6492a4b14196a4e70e40bf906f2379ad21 Binary files /dev/null and b/example/codeql-db/db-python/default/pools/0/info differ diff --git a/example/codeql-db/db-python/default/pools/0/metadata/info b/example/codeql-db/db-python/default/pools/0/metadata/info new file mode 100644 index 0000000000000000000000000000000000000000..48c2d1ef1e6ca821f7d609bfc6e1a25b29603f98 Binary files /dev/null and b/example/codeql-db/db-python/default/pools/0/metadata/info differ diff --git a/example/codeql-db/db-python/default/pools/0/metadata/page-000000 b/example/codeql-db/db-python/default/pools/0/metadata/page-000000 new file mode 100644 index 0000000000000000000000000000000000000000..5987d5980ad3047217a2557c6b8ec4da3e2e83fc --- /dev/null +++ b/example/codeql-db/db-python/default/pools/0/metadata/page-000000 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb096256d33c24a4eef628205927573c4a24a83228d543bb6eddb41d7718f64f +size 2097152 diff --git a/example/codeql-db/db-python/default/pools/0/pageDump/page-000000000 b/example/codeql-db/db-python/default/pools/0/pageDump/page-000000000 new file mode 100644 index 0000000000000000000000000000000000000000..0db34818423119ce5313cd698511fb6785cf4ec9 --- /dev/null +++ b/example/codeql-db/db-python/default/pools/0/pageDump/page-000000000 @@ -0,0 +1,15593 @@ +/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/_collections_abc.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/Users/pwntester/.pyenv/versions/3.8.13/lib/Users/pwntester/.pyenv/versions/3.8.13/Users/pwntester/.pyenv/versions/Users/pwntester/.pyenv/Users/pwntester/Users//Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/tokenize.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/doctest.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/unittest/signals.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/unittest/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/unittest/main.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/locks.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/windows_events.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/socket.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/concurrent/futures/_base.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/concurrent/futures/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/concurrent/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ast.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/importlib/_bootstrap.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/importlib/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/multiprocessing/process.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/multiprocessing/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/multiprocessing/resource_sharer.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/urllib/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/urllib/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/http/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/http/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/message.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/header.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/tkinter/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/tkinter/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/_py_abc.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/collections/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/collections/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/token.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/_compat_pickle.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/unittest/async_case.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/unittest/util.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/transports.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/exceptions.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/streams.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/queues.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/concurrent/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/contextvars.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/logging/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/logging/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/opcode.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/zipfile.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/parser.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/importlib/abc.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/xml/parsers/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/xml/parsers/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/xml/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/decimal.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/feedparser.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/plistlib.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/wintypes.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/socketserver.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/text_file.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/html/entities.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/html/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/abc.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/_weakrefset.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/traceback.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ntpath.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/heapq.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/pprint.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/unittest/result.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/unittest/case.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/glob.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/dis.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/windows_utils.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/locale.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/calendar.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/hmac.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/webbrowser.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/xml/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/gzip.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/zipimport.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/util.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/spawn.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/test/support/testresult.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/test/support/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/test/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/_aix.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/util.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/refactor.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/btm_matcher.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/pgen2/grammar.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/pgen2/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/pgen2/pgen.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/pgen2/literals.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/signal.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/functools.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/sre_parse.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/encodings/mbcs.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/encodings/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/unittest/runner.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/protocols.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/futures.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/unix_events.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/getopt.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/selector_events.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/tarfile.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/urllib/parse.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/_pydecimal.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/selectors.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/collections/abc.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/tracemalloc.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/_threading_local.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/genericpath.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/string.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/argparse.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/runners.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/subprocess.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/tasks.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/copy.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/base_futures.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/base_tasks.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/bdb.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/codeop.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/trsock.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/staggered.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/importlib/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/queue.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/multiprocessing/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/multiprocessing/queues.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/random.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/test/support/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/utils.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/urllib/request.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ftplib.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/io.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/enum.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/reprlib.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/pickle.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/pdb.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/log.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/format_helpers.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/concurrent/futures/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/constants.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/code.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/pydoc.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/platform.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/_policybase.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/_osx_support.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/errors.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/_encoded_words.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/nntplib.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/macholib/dyld.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/macholib/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/errors.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/file_util.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/xml/etree/ElementTree.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/xml/etree/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/tkinter/constants.py/Users/pwntester/src/github.com/github/codeql-jupyter-kernel/example/src/main.py/Users/pwntester/src/github.com/github/codeql-jupyter-kernel/example/src/Users/pwntester/src/github.com/github/codeql-jupyter-kernel/example/Users/pwntester/src/github.com/github/codeql-jupyter-kernel/Users/pwntester/src/github.com/github/Users/pwntester/src/github.com/Users/pwntester/src/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/os.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/copyreg.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/weakref.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/operator.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/encodings/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/__future__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/unittest/suite.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/base_events.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/base_subprocess.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/concurrent/futures/thread.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/sslproto.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lzma.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/_compression.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/multiprocessing/context.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/base64.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/test/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/bisect.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/xmlrpc/client.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/xmlrpc/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/pkgutil.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/shlex.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/charset.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/uu.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/sysconfig.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/encoders.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/quoprimime.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/html/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/optparse.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/pgen2/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/pgen2/token.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/fixer_util.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/patcomp.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/contextlib.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/types.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/sre_compile.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/inspect.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/concurrent/futures/process.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/importlib/machinery.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/tempfile.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/importlib/_bootstrap_external.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/xmlrpc/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/multiprocessing/spawn.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/tty.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/importlib/util.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/http/server.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/quopri.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/ccompiler.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/_parseaddr.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/debug.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/dep_util.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/fancy_getopt.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/textwrap.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/pgen2/tokenize.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/numbers.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/threading.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/posixpath.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/fnmatch.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/keyword.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/sre_constants.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/codecs.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/struct.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/unittest/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/unittest/loader.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/events.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/coroutines.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/shutil.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ssl.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/multiprocessing/util.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/_bootlocale.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/multiprocessing/reduction.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/multiprocessing/forkserver.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/runpy.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/sysconfig.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/xml/parsers/expat.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/http/client.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/log.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/urllib/error.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/email/base64mime.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/mimetypes.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/macholib/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/_endian.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/urllib/response.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/dir_util.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/xml/etree/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/test/__init__.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/test/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/macholib/framework.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/distutils/filelist.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/pgen2/driver.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/pygram.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/pgen2/parse.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/btm_utils.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/nturl2path.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/netrc.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/subprocess.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/warnings.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/re.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/linecache.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/stat.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/typing.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/encodings/aliases.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/difflib.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/gettext.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/cmd.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/asyncio/proactor_events.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/bz2.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/multiprocessing/connection.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/multiprocessing/resource_tracker.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/hashlib.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/datetime.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/logging/handlers.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/macholib/dylib.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/py_compile.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib2to3/pytree.py/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/xml/etree/ElementPath.pyextractor_python_version.major3extractor_python_version.minor8extractor_python_version.micro13extractor_python_version.releaselevelfinalextractor_python_version.serial0flags.bytes_warningflags.debugflags.dev_modeFalseflags.dont_write_bytecodeflags.hash_randomization1flags.ignore_environmentflags.inspectflags.interactiveflags.isolatedflags.n_fields15flags.n_sequence_fieldsflags.n_unnamed_fieldsflags.no_siteflags.no_user_siteflags.optimizeflags.quietflags.utf8_modeflags.verbosefloat.digfloat.epsilon2.220446049250313e-16float.mant_dig53float.max1.7976931348623157e+308float.max_10_exp308float.max_exp1024float.min2.2250738585072014e-308float.min_10_exp-307float.min_exp-1021float.n_fields11float.n_sequence_fieldsfloat.n_unnamed_fieldsfloat.radix2float.roundsoptions.alloptions.colorizeoptions.extract_stdlibTrueoptions.guessoptions.helpoptions.ignore_missing_modulesoptions.introspect_coptions.max_import_depthinfoptions.max_procsalloptions.no_symlinksoptions.no_syntax_errorsoptions.noneoptions.pruneoptions.quietoptions.respect_initoptions.splitoptions.topoptions.trace_onlyoptions.trap_cache/Users/pwntester/src/github.com/github/codeql-jupyter-kernel/example/codeql-db/working/trap_cacheoptions.unrolloptions.verboseoptions.verbositysys.prefixsys.path/Users/pwntester/.pyenv/versions/3.8.13/lib/python38.zip:/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8:/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynloadlanguage.version3.11extractor.path/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/site-packagessys.platformdarwinos.sepos.pathsep:extractor.version5.34__call____repr____getattribute____get____set____delete____objclass____bool____doc____add____contains__u'str(object='') -> str +str(bytes_or_buffer[, encoding[, errors]]) -> str + +Create a new string object from the given object. If encoding or +errors is specified, then the object must expose a data buffer +that will be decoded using the given encoding and error handler. +Otherwise, returns the result of object.__str__() (if defined) +or repr(object). +encoding defaults to sys.getdefaultencoding(). +errors defaults to 'strict'.'__eq__u'method_descriptor.__doc__'__qualname__u'member_descriptor.__doc__'__name__u'member_descriptor.__qualname__'__reduce__u'the object's class'u'object.__class__'__class____delattr____dir__u'The base class of the class hierarchy. + +When called, it accepts no arguments and returns a new featureless +instance that has no instance attributes and cannot be given any. +'__format____ge____gt____hash____init__u'classmethod_descriptor.__doc__'u'classmethod_descriptor.__qualname__'u'classmethod_descriptor.__text_signature__'__text_signature__.super.classmethod_descriptor__init_subclass____le____lt____ne____new____reduce_ex____setattr____sizeof____str____subclasshook__objectmember_descriptoru'method_descriptor.__qualname__'u'method_descriptor.__text_signature__'method_descriptor__getitem____getnewargs____iter____len____mod____mul____rmod____rmul__capitalizecasefoldcentercountencodeendswithexpandtabsfindformatformat_mapindexisalnumisalphaisasciiisdecimalisdigitisidentifierislowerisnumericisprintableisspaceistitleisupperjoinljustlowerlstripu'staticmethod.__dict__'__dict__u'staticmethod(function) -> method + +Convert a function to be a static method. + +A static method does not receive an implicit first argument. +To declare a static method, use this idiom: + + class C: + @staticmethod + def f(arg1, arg2, ...): + ... + +It can be called either on the class (e.g. C.f()) or on an instance +(e.g. C().f()). Both the class and the instance are ignored, and +neither is passed implicitly as the first argument to the method. + +Static methods in Python are similar to those found in Java or C++. +For a more advanced concept, see the classmethod builtin.'__func__u'staticmethod.__isabstractmethod__'__isabstractmethod__staticmethodmaketranspartitionreplacerfindrindexrjustrpartitionrsplitrstripsplitsplitlinesstartswithstripswapcasetitletranslateupperzfillunicodeu'builtin_function_or_method.__doc__'__module__u'builtin_function_or_method.__name__'u'builtin_function_or_method.__qualname__'u'builtin_function_or_method.__self__'__self__u'builtin_function_or_method.__text_signature__'builtin_function_or_methodNoneTypeNoneu'method-wrapper.__doc__'u'method-wrapper.__name__'u'method-wrapper.__objclass__'u'method-wrapper.__qualname__'u'method-wrapper.__text_signature__'method-wrapperu'wrapper_descriptor.__doc__'u'wrapper_descriptor.__qualname__'u'wrapper_descriptor.__text_signature__'wrapper_descriptoru'getset_descriptor.__doc__'u'getset_descriptor.__qualname__'getset_descriptoru'type.__abstractmethods__'__abstractmethods____base__u'type.__bases__'__bases____basicsize__u'type.__dict__'__dictoffset__u'type.__doc__'__flags____instancecheck____itemsize__u'type.__module__'__mro__u'type.__name__'__prepare__u'type.__qualname__'__subclasscheck____subclasses__u'type.__text_signature__'__weakrefoffset__mrotypeu'function.__annotations__'__annotations____closure__u'function.__code__'__code__u'function.__defaults__'__defaults__u'function.__dict__'__globals__u'function.__kwdefaults__'__kwdefaults__u'function.__name__'u'function.__qualname__'functionFunctionTypeBuiltinFunctionTypeu'classmethod.__dict__'u'classmethod(function) -> method + +Convert a function to be a class method. + +A class method receives the class as implicit first argument, +just like an instance method receives the instance. +To declare a class method, use this idiom: + + class C: + @classmethod + def f(cls, arg1, arg2, ...): + ... + +It can be called either on the class (e.g. C.f()) or on an instance +(e.g. C().f()). The instance is ignored except for its class. +If a class method is called for a derived class, the derived class +object is passed as the implied first argument. + +Class methods are different than C++ or Java static methods. +If you want those, see the staticmethod builtin.'u'classmethod.__isabstractmethod__'classmethodClassMethodStaticMethodu'Create a module object. + +The name must be a string; the optional doc argument can have any type.'moduleModuleType__del__u'name of the generator'u'generator.__name__'__next__u'qualified name of the generator'u'generator.__qualname__'closegi_codegi_framegi_runningu'object being iterated by yield from, or None'u'generator.gi_yieldfrom'gi_yieldfromsendthrowgenerator__and__u'bool(x) -> bool + +Returns True when the argument x is true, False otherwise. +The builtins True and False are the only two instances of the class bool. +The class bool is a subclass of the class int, and cannot be subclassed.'__or____rand____ror____rxor____xor____abs____ceil____divmod__u'int([x]) -> integer +int(x, base=10) -> integer + +Convert a number or string to an integer, or return 0 if no arguments +are given. If x is a number, return x.__int__(). For floating point +numbers, this truncates towards zero. + +If x is not a number or if base is given, then x must be a string, +bytes, or bytearray instance representing an integer literal in the +given base. The literal can be preceded by '+' or '-' and be surrounded +by whitespace. The base defaults to 10. Valid bases are 0 and 2-36. +Base 0 means to interpret the base from the string as an integer literal. +>>> int('0b100', base=0) +4'__float____floor____floordiv____index____int____invert____lshift____neg____pos____pow____radd____rdivmod____rfloordiv____rlshift____round____rpow____rrshift____rshift____rsub____rtruediv____sub____truediv____trunc__as_integer_ratiobit_lengthconjugateu'the denominator of a rational number in lowest terms'u'int.denominator'denominatorfrom_bytesu'the imaginary part of a complex number'u'int.imag'imagu'the numerator of a rational number in lowest terms'u'int.numerator'numeratoru'the real part of a complex number'u'int.real'realto_bytesintboolbreakpointhook__breakpointhook__displayhook__displayhook__u'This module provides access to some objects used or maintained by the +interpreter and to functions that interact strongly with the interpreter. + +Dynamic objects: + +argv -- command line arguments; argv[0] is the script pathname if known +path -- module search path; path[0] is the script directory, else '' +modules -- dictionary of loaded modules + +displayhook -- called to show results in an interactive session +excepthook -- called to handle any uncaught exception other than SystemExit + To customize printing in an interactive session or to install a custom + top-level exception handler, assign other functions to replace these. + +stdin -- standard input file object; used by input() +stdout -- standard output file object; used by print() +stderr -- standard error object; used for error messages + By assigning other file objects (or objects that behave like files) + to these, it is possible to redirect all of the interpreter's I/O. + +last_type -- type of last uncaught exception +last_value -- value of last uncaught exception +last_traceback -- traceback of last uncaught exception + These three are only available in an interactive session after a + traceback has been printed. + +Static objects: + +builtin_module_names -- tuple of module names built into this interpreter +copyright -- copyright notice pertaining to this interpreter +exec_prefix -- prefix used to find the machine-specific Python library +executable -- absolute path of the executable binary of the Python interpreter +float_info -- a named tuple with information about the float implementation. +float_repr_style -- string indicating the style of repr() output for floats +hash_info -- a named tuple with information about the hash algorithm. +hexversion -- version information encoded as a single integer +implementation -- Python implementation information. +int_info -- a named tuple with information about the int implementation. +maxsize -- the largest supported length of containers. +maxunicode -- the value of the largest Unicode code point +platform -- platform identifier +prefix -- prefix used to find the Python library +thread_info -- a named tuple with information about the thread implementation. +version -- the version of this interpreter as a string +version_info -- version information as a named tuple +__stdin__ -- the original stdin; don't touch! +__stdout__ -- the original stdout; don't touch! +__stderr__ -- the original stderr; don't touch! +__displayhook__ -- the original displayhook; don't touch! +__excepthook__ -- the original excepthook; don't touch! + +Functions: + +displayhook() -- print an object to the screen, and save it in builtins._ +excepthook() -- print an exception and its traceback to sys.stderr +exc_info() -- return thread-safe information about the current exception +exit() -- exit the interpreter by raising SystemExit +getdlopenflags() -- returns flags to be used for dlopen() calls +getprofile() -- get the global profiling function +getrefcount() -- return the reference count for an object (plus one :-) +getrecursionlimit() -- return the max recursion depth for the interpreter +getsizeof() -- return the size of an object in bytes +gettrace() -- get the global debug tracing function +setcheckinterval() -- control how often the interpreter checks for events +setdlopenflags() -- set the flags to be used for dlopen() calls +setprofile() -- set the global profiling function +setrecursionlimit() -- set the max recursion depth for the interpreter +settrace() -- set the global debug tracing function +'excepthook__excepthook__u'dictionary for instance variables (if defined)'u'BuiltinImporter.__dict__'u'Meta path import for built-in modules. + + All methods are either class or static methods to avoid the need to + instantiate the class. + + 'u'_frozen_importlib'u'list of weak references to the object (if defined)'u'BuiltinImporter.__weakref__'__weakref__create_moduleexec_modulefind_modulefind_specget_codeget_sourceis_packageload_modulemodule_repr_frozen_importlib.BuiltinImporter__loader__u'sys'u''__package__u'ModuleSpec.__dict__'u'The specification for a module, used for loading. + + A module's spec is the source for information about the module. For + data associated with the module, including source, use the spec's + loader. + + `name` is the absolute name of the module. `loader` is the loader + to use when loading the module. `parent` is the name of the + package the module is in. The parent is derived from the name. + + `is_package` determines if the module is considered a package or + not. On modules this is reflected by the `__path__` attribute. + + `origin` is the specific location used by the loader from which to + load the module, if that information is available. When filename is + set, origin will match. + + `has_location` indicates that a spec's "origin" reflects a location. + When this is True, `__file__` attribute of the module is set. + + `cached` is the location of the cached bytecode file, if any. It + corresponds to the `__cached__` attribute. + + `submodule_search_locations` is the sequence of path entries to + search when importing submodules. If set, is_package should be + True--and False otherwise. + + Packages are simply modules that (may) have submodules. If a spec + has a non-None value in `submodule_search_locations`, the import + system will consider modules loaded from the spec as packages. + + Only finders (see importlib.abc.MetaPathFinder and + importlib.abc.PathEntryFinder) should modify ModuleSpec instances. + + 'u'ModuleSpec.__weakref__'u'property.__isabstractmethod__'deleterfdelfgetfsetgettersetterpropertycachedhas_locationparent_frozen_importlib.ModuleSpec__spec__u'TextIOWrapper._CHUNK_SIZE'_CHUNK_SIZEu'Character and line based layer over a BufferedIOBase object, buffer. + +encoding gives the name of the encoding that the stream will be +decoded or encoded with. It defaults to locale.getpreferredencoding(False). + +errors determines the strictness of encoding and decoding (see +help(codecs.Codec) or the documentation for codecs.register) and +defaults to "strict". + +newline controls how line endings are handled. It can be None, '', +'\n', '\r', and '\r\n'. It works as follows: + +* On input, if newline is None, universal newlines mode is + enabled. Lines in the input can end in '\n', '\r', or '\r\n', and + these are translated into '\n' before being returned to the + caller. If it is '', universal newline mode is enabled, but line + endings are returned to the caller untranslated. If it has any of + the other legal values, input lines are only terminated by the given + string, and the line ending is returned to the caller untranslated. + +* On output, if newline is None, any '\n' characters written are + translated to the system default line separator, os.linesep. If + newline is '' or '\n', no translation takes place. If newline is any + of the other legal values, any '\n' characters written are translated + to the given string. + +If line_buffering is True, a call to flush is implied when a call to +write contains a newline character.'_finalizingbufferu'TextIOWrapper.closed'closeddetachencodingu'TextIOWrapper.errors'errorsfilenoflushisattyline_bufferingu'TextIOWrapper.name'nameu'TextIOWrapper.newlines'newlinesreadreadablereadlinereconfigureseekseekabletelltruncatewritablewritewrite_throughu'Base class for text I/O. + +This class provides a character and line based interface to stream +I/O. There is no readinto method because Python's character strings +are immutable. There is no public constructor. +'u'Encoding of the text stream. + +Subclasses should override. +'u'_TextIOBase.encoding'u'The error setting of the decoder or encoder. + +Subclasses should override. +'u'_TextIOBase.errors'u'Line endings translated so far. + +Only line endings translated during reading are considered. + +Subclasses should override. +'u'_TextIOBase.newlines'u'_IOBase.__dict__'u'The abstract base class for all I/O classes, acting on streams of +bytes. There is no public constructor. + +This class provides dummy implementations for many methods that +derived classes can override selectively; the default implementations +represent a file that cannot be read, written or seeked. + +Even though IOBase does not declare read, readinto, or write because +their signatures will vary, implementations and clients should +consider those methods part of the interface. Also, implementations +may raise UnsupportedOperation when operations they do not support are +called. + +The basic type used for binary data read from or written to a file is +bytes. Other bytes-like objects are accepted as method arguments too. +In some cases (such as readinto), a writable object is required. Text +I/O classes work with str data. + +Note that calling any method (except additional calls to close(), +which are ignored) on a closed stream should raise a ValueError. + +IOBase (and its subclasses) support the iterator protocol, meaning +that an IOBase object can be iterated over yielding the lines in a +stream. + +IOBase also supports the :keyword:`with` statement. In this example, +fp is closed after the suite of the with statement is complete: + +with open('spam.txt', 'r') as fp: + fp.write('Spam and eggs!') +'__enter____exit___checkClosed_checkReadable_checkSeekable_checkWritableu'_IOBase.closed'readlineswritelines_io._IOBase_io._TextIOBase_io.TextIOWrapper__stderr____stdin____stdout__unraisablehook__unraisablehook__u'/Users/pwntester/.pyenv/versions/3.8.13/bin/python3'_base_executable_clear_type_cache_current_frames_debugmallocstats_framework_getframeu'Built-in immutable sequence. + +If no argument is given, the constructor returns an empty tuple. +If iterable is specified the tuple is initialized from iterable's items. + +If the argument is a tuple, the return value is the same object.'tupleu'CPython'_git__delitem__u'dict() -> new empty dictionary +dict(mapping) -> new dictionary initialized from a mapping object's + (key, value) pairs +dict(iterable) -> new dictionary initialized as if via: + d = {} + for k, v in iterable: + d[k] = v +dict(**kwargs) -> new dictionary initialized with the name=value pairs + in the keyword argument list. For example: dict(one=1, two=2)'__reversed____setitem__clearcopyfromkeysgetitemskeyspoppopitemsetdefaultupdatevaluesdict_xoptionsabiflagsaddaudithook1013api_versionu'Built-in mutable sequence. + +If no argument is given, the constructor creates a new empty list. +The argument must be an iterable if specified.'__iadd____imul__appendextendinsertremovereversesortlistu'/Users/pwntester/.local/share/nvim/mason/packages/codeql/codeql/python/tools/python_tracer.py'u'-v'u'-z'u'all'u'-c'u'/Users/pwntester/src/github.com/github/codeql-jupyter-kernel/example/codeql-db/working/trap_cache'u'-p'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/site-packages'u'-R'u'/Users/pwntester/src/github.com/github/codeql-jupyter-kernel/example/src'argvauditu'/Users/pwntester/.pyenv/versions/3.8.13'base_exec_prefixbase_prefixu'_abc'u'_ast'u'_codecs'u'_collections'u'_functools'u'_imp'u'_io'u'_locale'u'_operator'u'_signal'u'_sre'u'_stat'u'_string'u'_symtable'u'_thread'u'_tracemalloc'u'_warnings'u'_weakref'u'atexit'u'builtins'u'errno'u'faulthandler'u'gc'u'itertools'u'marshal'u'posix'u'pwd'u'time'u'xxsubtype'builtin_module_namesu'little'byteordercall_tracingcallstatsu'Copyright (c) 2001-2022 Python Software Foundation. +All Rights Reserved. + +Copyright (c) 2000 BeOpen.com. +All Rights Reserved. + +Copyright (c) 1995-2001 Corporation for National Research Initiatives. +All Rights Reserved. + +Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam. +All Rights Reserved.'copyrightdont_write_bytecodeexc_infoexec_prefixexecutableexitu'sys.flags + +Flags provided through command line arguments or environment vars.'bytes_warningdebugdev_modehash_randomizationignore_environmentinspectinteractiveisolatedn_fieldsn_sequence_fieldsn_unnamed_fieldsno_siteno_user_siteoptimizequietutf8_modeverbosesys.flagsflagsu'sys.float_info + +A named tuple holding information about the float type. It contains low level +information about the precision and internal representation. Please study +your system's :file:`float.h` for more information.'digepsilonmant_digmaxmax_10_expmax_expminmin_10_expmin_expradixroundssys.float_infou'Convert a string or number to a floating point number, if possible.'__getformat____set_format__fromhexhexu'float.imag'is_integeru'float.real'floatfloat_infou'short'float_repr_styleget_asyncgen_hooksget_coroutine_origin_tracking_depthgetallocatedblocksgetcheckintervalgetdefaultencodinggetdlopenflagsgetfilesystemencodeerrorsgetfilesystemencodinggetprofilegetrecursionlimitgetrefcountgetsizeofgetswitchintervalgettraceu'hash_info + +A named tuple providing parameters used for computing +hashes. The attributes are read only.'algorithmcutoffhash_bitsmodulus9nanseed_bitswidthsys.hash_info6423058430092136939513141591000003u'siphash24'128hash_info50859504hexversionu'A simple attribute-based namespace. + +SimpleNamespace(**kwargs)'types.SimpleNamespaceimplementationu'sys.int_info + +A named tuple that holds information about Python's +internal representation of integers. The attributes are read only.'bits_per_digitsizeof_digitsys.int_info304int_infointernis_finalizing9223372036854775807maxsize1114111maxunicodeu'frozen'_ORIGINu'FrozenImporter.__dict__'u'Meta path import for frozen modules. + + All methods are either class or static methods to avoid the need to + instantiate the class. + + 'u'FrozenImporter.__weakref__'_frozen_importlib.FrozenImporteru'PathFinder.__dict__'u'Meta path finder for sys.path and package __path__ attributes.'u'_frozen_importlib_external'u'PathFinder.__weakref__'_get_spec_legacy_get_spec_path_hooks_path_importer_cachefind_distributionsinvalidate_caches_frozen_importlib_external.PathFindermeta_pathmodulesu'/Users/pwntester/.local/share/nvim/mason/packages/codeql/codeql/python/tools/python3src.zip'u'/Users/pwntester/.local/share/nvim/mason/packages/codeql/codeql/python/tools'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python38.zip'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload'pathu'zipimporter.__dict__'u'zipimporter(archivepath) -> zipimporter object + + Create a new zipimporter instance. 'archivepath' must be a path to + a zipfile, or to a specific path inside a zipfile. For example, it can be + '/tmp/myimport.zip', or '/tmp/myimport.zip/mydirectory', if mydirectory is a + valid directory inside the archive. + + 'ZipImportError is raised if 'archivepath' doesn't point to a valid Zip + archive. + + The 'archive' attribute of zipimporter objects contains the name of the + zipfile targeted. + 'u'zipimport'u'zipimporter.__weakref__'find_loaderget_dataget_filenameget_resource_readerzipimport.zipimporterpath_hook_for_FileFinderpath_hookspath_importer_cacheu'darwin'platformprefixpycache_prefixset_asyncgen_hooksset_coroutine_origin_tracking_depthsetcheckintervalsetdlopenflagssetprofilesetrecursionlimitsetswitchintervalsettracestderrstdinstdoutu'sys.thread_info + +A named tuple holding information about the thread implementation.'lockversionsys.thread_infou'pthread'u'mutex+cond'thread_infou'3.8.13 (default, Oct 3 2022, 12:54:21) +[Clang 14.0.0 (clang-1400.0.29.102)]'u'sys.version_info + +Version information as a named tuple.'majormicrominor5releaselevelserialsys.version_infou'final'version_infowarnoptionssysu'Common base class for all non-exit exceptions.'u'exception cause'u'BaseException.__cause__'__cause__u'exception context'u'BaseException.__context__'__context__u'BaseException.__dict__'u'Common base class for all exceptions'__setstate____suppress_context__u'BaseException.__traceback__'__traceback__u'BaseException.args'argswith_tracebackBaseExceptionExceptionu'Inappropriate argument type.'TypeErroru'Attribute not found.'AttributeErroru'Mapping key not found.'u'Base class for lookup errors.'LookupErrorKeyErroru'set() -> new empty set object +set(iterable) -> new set object + +Build an unordered collection of unique elements.'__iand____ior____isub____ixor__adddifferencedifference_updatediscardintersectionintersection_updateisdisjointissubsetissupersetsymmetric_differencesymmetric_difference_updateunionsetlocalsglobalsMethodDescriptorTypeu'super() -> same as super(__class__, ) +super(type) -> unbound super object +super(type, obj) -> bound super object; requires isinstance(obj, type) +super(type, type2) -> bound super object; requires issubclass(type2, type) +Typical use to call a cooperative superclass method: +class C(B): + def meth(self, arg): + super().meth(arg) +This works for class methods too: +class C(B): + @classmethod + def cmeth(cls, arg): + super().cmeth(arg) +'__self_class____thisclass__superu'method.__doc__'methodMethodType_1_2u'bytes(iterable_of_ints) -> bytes +bytes(string, encoding[, errors]) -> bytes +bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer +bytes(int) -> bytes object of size given by the parameter initialized with null bytes +bytes() -> empty bytes object + +Construct an immutable array of bytes from: + - an iterable yielding integers in range(256) + - a text string encoded using the specified encoding + - any object implementing the buffer API. + - an integer'decodebytesb'2'b2b'3'b3u'2'u2u'3'u3u'Base class for arithmetic errors.'ArithmeticErroru'Assertion failed.'AssertionErroru'I/O operation would block.'u'Base class for I/O related errors.'u'OSError.characters_written'characters_writtenerrnofilenamefilename2strerrorOSErrorBlockingIOErroru'Broken pipe.'u'Connection error.'ConnectionErrorBrokenPipeErroru'Buffer error.'BufferErroru'Base class for warnings about bytes and buffer related problems, mostly +related to conversion from str or comparing to str.'u'Base class for warning categories.'WarningBytesWarningu'Child process error.'ChildProcessErroru'Connection aborted.'ConnectionAbortedErroru'Connection refused.'ConnectionRefusedErroru'Connection reset.'ConnectionResetErroru'Base class for warnings about deprecated features.'DeprecationWarningu'Read beyond end of file.'EOFErrorellipsisEllipsisEnvironmentErroru'File already exists.'FileExistsErroru'File not found.'FileNotFoundErroru'Floating point operation failed.'FloatingPointErroru'Base class for warnings about constructs that will change semantically +in the future.'FutureWarningu'Request that a generator exit.'GeneratorExitIOErroru'Import can't find module, or can't find name in module.'msgImportErroru'Base class for warnings about probable mistakes in module imports'ImportWarningu'Improper indentation.'u'Invalid syntax.'linenooffsetprint_file_and_linetextSyntaxErrorIndentationErroru'Sequence index out of range.'IndexErroru'Interrupted by signal.'InterruptedErroru'Operation doesn't work on directories.'IsADirectoryErroru'Program interrupted by user.'KeyboardInterruptu'Out of memory.'MemoryErroru'Module not found.'ModuleNotFoundErroru'Name not found globally.'NameErroru'Operation only works on directories.'NotADirectoryErrorNotImplementedTypeNotImplementedu'Method or function hasn't been implemented yet.'u'Unspecified run-time error.'RuntimeErrorNotImplementedErroru'Result too large to be represented.'OverflowErroru'Base class for warnings about features which will be deprecated +in the future.'PendingDeprecationWarningu'Not enough permissions.'PermissionErroru'Process not found.'ProcessLookupErroru'Recursion limit exceeded.'RecursionErroru'Weak ref proxy used after referent went away.'ReferenceErroru'Base class for warnings about resource usage.'ResourceWarningu'Base class for warnings about dubious runtime behavior.'RuntimeWarningu'Signal the end from iterator.__anext__().'StopAsyncIterationu'Signal the end from iterator.__next__().'valueStopIterationu'Base class for warnings about dubious syntax.'SyntaxWarningu'Internal error in the Python interpreter. + +Please report this to the Python maintainer, along with the traceback, +the Python version, and the hardware/OS platform and version.'SystemErroru'Request to exit from the interpreter.'codeSystemExitu'Improper mixture of spaces and tabs.'TabErroru'Timeout expired.'TimeoutErroru'Local name referenced but not bound to a value.'UnboundLocalErroru'Unicode decoding error.'endreasonstartu'Unicode related error.'u'Inappropriate argument value (of correct type).'ValueErrorUnicodeErrorUnicodeDecodeErroru'Unicode encoding error.'UnicodeEncodeErroru'Unicode translation error.'UnicodeTranslateErroru'Base class for warnings about Unicode related problems, mostly +related to conversion problems.'UnicodeWarningu'Base class for warnings generated by user code.'UserWarningu'Second argument to a division or modulo operation was zero.'ZeroDivisionError__build_class____debug__u'Built-in functions, exceptions, and other objects. + +Noteworthy: None is the `nil' object; Ellipsis represents `...' in slices.'__import__absanyasciibinbreakpoint__alloc__u'bytearray(iterable_of_ints) -> bytearray +bytearray(string, encoding[, errors]) -> bytearray +bytearray(bytes_or_buffer) -> mutable copy of bytes_or_buffer +bytearray(int) -> bytes array of size given by the parameter initialized with null bytes +bytearray() -> empty bytes array + +Construct a mutable bytearray object from: + - an iterable yielding integers in range(256) + - a text string encoded using the specified encoding + - a bytes or a buffer object + - any object implementing the buffer API. + - an integer'bytearraycallablechrcompileu'Create a complex number from a real part and an optional imaginary part. + +This is equivalent to (real + imag*1j) where imag defaults to 0.'complexdelattrdirdivmodu'Return an enumerate object. + + iterable + an object supporting iteration + +The enumerate object yields pairs containing a count (from start, which +defaults to zero) and a value yielded by the iterable argument. + +enumerate is useful for obtaining an indexed list: + (0, seq[0]), (1, seq[1]), (2, seq[2]), ...'enumerateevalexecu'filter(function or None, iterable) --> filter object + +Return an iterator yielding those items of iterable for which function(item) +is true. If function is None, return the items that are true.'filteru'frozenset() -> empty frozenset object +frozenset(iterable) -> frozenset object + +Build an immutable unordered collection of unique elements.'frozensetgetattrhasattrhashidinputisinstanceissubclassiterlenu'map(func, *iterables) --> map object + +Make an iterator that computes the function using arguments from +each of the iterables. Stops when the shortest iterable is exhausted.'mapu'Create a new memoryview object which references the given object.'u'A bool indicating whether the memory is C contiguous.'u'memoryview.c_contiguous'c_contiguouscastu'A bool indicating whether the memory is contiguous.'u'memoryview.contiguous'contiguousu'A bool indicating whether the memory is Fortran contiguous.'u'memoryview.f_contiguous'f_contiguousu'A string containing the format (in struct module style) + for each element in the view.'u'memoryview.format'u'The size in bytes of each element of the memoryview.'u'memoryview.itemsize'itemsizeu'The amount of space in bytes that the array would use in + a contiguous representation.'u'memoryview.nbytes'nbytesu'An integer indicating how many dimensions of a multi-dimensional + array the memory represents.'u'memoryview.ndim'ndimu'The underlying object of the memoryview.'u'memoryview.obj'obju'A bool indicating whether the memory is read only.'u'memoryview.readonly'readonlyreleaseu'A tuple of ndim integers giving the shape of the memory + as an N-dimensional array.'u'memoryview.shape'shapeu'A tuple of ndim integers giving the size in bytes to access + each element for each dimension of the array.'u'memoryview.strides'stridesu'A tuple of integers used internally for PIL-style arrays.'u'memoryview.suboffsets'suboffsetstobytestolisttoreadonlymemoryviewnextoctopenordpowprintu'range(stop) -> range object +range(start, stop[, step]) -> range object + +Return an object that produces a sequence of integers from start (inclusive) +to stop (exclusive) by step. range(i, j) produces i, i+1, i+2, ..., j-1. +start defaults to 0, and stop is omitted! range(4) produces 0, 1, 2, 3. +These are exactly the valid indices for a list of 4 elements. +When step is given, it specifies the increment (or decrement).'stepstoprangerepru'Return a reverse iterator over the values of the given sequence.'__length_hint__reversedroundsetattru'slice(stop) +slice(start, stop[, step]) + +Create a slice object. This is used for extended slicing (e.g. a[0:10:2]).'indicesslicesortedstrsumvarsu'zip(*iterables) --> A zip object yielding tuples until an input is exhausted. + + >>> list(zip('abcdefg', range(3), range(4))) + [('a', 0, 0), ('b', 1, 1), ('c', 2, 2)] + +The zip object yields n-length tuples, where n is the number of iterables +passed as positional arguments to zip(). The i-th element in every tuple +comes from the i-th iterable argument to zip(). This continues until the +shortest argument is exhausted.'zipbuiltinsbuiltin_module_semmle_unknown_type_semmle_undefined_value$re('[^']*'|\"[^\"]*\"|::|//?|\.\.|\(\)|[/.*:\[\]\(\)@=])|((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|\s+r"r"("r"'[^']*'|\"[^\"]*\"|"r"::|"r"//?|"r"\.\.|"r"\(\)|"r"[/.*:\[\]\(\)@=])|"r"((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|"r"\s+"xpath_tokenizer_rexpath_tokenizerpatternnamespaces'default_namespaceparsing_attributetokenfindallttypetag{"uri{%s}%sprefix %r not found in prefix map@get_parent_mapcontextparent_mapproote_is_wildcard_tag{*}}*_prepare_tag_isinstance_str{*}*selectresultelem{}*el_tagsuffixno_nsnsns_onlyinternal parser error, got f"prepare_childselect_tagselect_child{}prepare_starprepare_selfprepare_descendant*invalid descendantprepare_parentresult_mapprepare_predicatesignaturepredicate]'"-@-key@-='match\-?\d+$.='-='itertext-()-()-XPath position >= 1 expectedlastunsupported functionunsupported expressionXPath offset from last() must be negativeelemsinvalid predicate...//[ops_cache_SelectorContextselfiterfindcache_keyselector100cannot use absolute path on elementinvalid pathfindtextdefault## ElementTree# $Id: ElementPath.py 3375 2008-02-13 08:05:08Z fredrik $# limited xpath support for element trees# history:# 2003-05-23 fl created# 2003-05-28 fl added support for // etc# 2003-08-27 fl fixed parsing of periods in element names# 2007-09-10 fl new selection engine# 2007-09-12 fl fixed parent selector# 2007-09-13 fl added iterfind; changed findall to return a list# 2007-11-30 fl added namespaces support# 2009-10-30 fl added child element value filter# Copyright (c) 2003-2009 by Fredrik Lundh. All rights reserved.# fredrik@pythonware.com# http://www.pythonware.com# --------------------------------------------------------------------# The ElementTree toolkit is# Copyright (c) 1999-2009 by Fredrik Lundh# By obtaining, using, and/or copying this software and/or its# associated documentation, you agree that you have read, understood,# and will comply with the following terms and conditions:# Permission to use, copy, modify, and distribute this software and# its associated documentation for any purpose and without fee is# hereby granted, provided that the above copyright notice appears in# all copies, and that both that copyright notice and this permission# notice appear in supporting documentation, and that the name of# Secret Labs AB or the author not be used in advertising or publicity# pertaining to distribution of the software without specific, written# prior permission.# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE# OF THIS SOFTWARE.# Licensed to PSF under a Contributor Agreement.# See http://www.python.org/psf/license for licensing details.### Implementation module for XPath support. There's usually no reason# to import this module directly; the ElementTree does this for# you, if needed.# Same as '*', but no comments or processing instructions.# It can be a surprise that '*' includes those, but there is no# justification for '{*}*' doing the same.# Any tag that is not in a namespace.# The tag in any (or no) namespace.# '}name'# Any tag in the given namespace.# '{}tag' == 'tag'# FIXME: raise error if .. is applied at toplevel?# FIXME: replace with real parser!!! refs:# http://effbot.org/zone/simple-iterator-parser.htm# http://javascript.crockford.com/tdop/tdop.html# ignore whitespace# use signature to determine predicate type# [@attribute] predicate# [@attribute='value']# [tag]# [.='value'] or [tag='value']# [index] or [last()] or [last()-index]# [index]# FIXME: what if the selector is "*" ?# Generate all matching objects.# compile selector pattern# implicit all (FIXME: keep this?)# execute selector pattern# Find first matching object.# Find all matching objects.# Find text for first matching object.b'('[^']*'|\"[^\"]*\"|::|//?|\.\.|\(\)|[/.*:\[\]\(\)@=])|((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|\s+'u'('[^']*'|\"[^\"]*\"|::|//?|\.\.|\(\)|[/.*:\[\]\(\)@=])|((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|\s+'b''b'{'u'{'b':'u':'b'{%s}%s'u'{%s}%s'b'prefix %r not found in prefix map'u'prefix %r not found in prefix map'b'@'u'@'b'{*}'u'{*}'b'}*'u'}*'b'{*}*'u'{*}*'b'{}*'u'{}*'b'internal parser error, got 'u'internal parser error, got 'b'{}'u'{}'b'*'u'*'b'invalid descendant'u'invalid descendant'b']'u']'b''"'u''"'b'''u'''b'-'u'-'b'@-'u'@-'b'@-=''u'@-=''b'\-?\d+$'u'\-?\d+$'b'.=''u'.=''b'-=''u'-=''b'-()'u'-()'b'-()-'u'-()-'b'XPath position >= 1 expected'u'XPath position >= 1 expected'b'last'u'last'b'unsupported function'u'unsupported function'b'unsupported expression'u'unsupported expression'b'XPath offset from last() must be negative'u'XPath offset from last() must be negative'b'invalid predicate'u'invalid predicate'b'.'u'.'b'..'u'..'b'//'u'//'b'['u'['b'/'u'/'b'cannot use absolute path on element'u'cannot use absolute path on element'b'invalid path'u'invalid path'u'xml.etree.ElementPath'u'xml.etree'u'xml'u'etree.ElementPath'u'etree'u'ElementPath'Lightweight XML support for Python. + + XML is an inherently hierarchical data format, and the most natural way to + represent it is with a tree. This module has two classes for this purpose: + + 1. ElementTree represents the whole XML document as a tree and + + 2. Element represents a single node in this tree. + + Interactions with the whole document (reading and writing to/from files) are + usually done on the ElementTree level. Interactions with a single XML element + and its sub-elements are done on the Element level. + + Element is a flexible container object designed to store hierarchical data + structures in memory. It can be described as a cross between a list and a + dictionary. Each Element has a number of properties associated with it: + + 'tag' - a string containing the element's name. + + 'attributes' - a Python dictionary storing the element's attributes. + + 'text' - a string containing the element's text content. + + 'tail' - an optional string containing text after the element's end tag. + + And a number of child elements stored in a Python sequence. + + To create an element instance, use the Element constructor, + or the SubElement factory function. + + You can also use the ElementTree class to wrap an element structure + and convert it to and from XML. + +"""CommentdumpElementElementTreefromstringfromstringlistiselementiterparseparseParseErrorPIProcessingInstructionQNameSubElementtostringtostringlistTreeBuilderVERSIONXMLXMLIDXMLParserXMLPullParserregister_namespacecanonicalizeC14NWriterTarget__all__1.3.0warningsiocollectionscollections.abccontextlibElementPathAn error when parsing an XML document. + + In addition to its exception value, a ParseError contains + two extra attributes: + 'code' - the specific exception code + 'position' - the line and column of the error + + elementReturn True if *element* appears to be an Element.An XML element. + + This class is the reference implementation of the Element interface. + + An element's length is its number of subelements. That means if you + want to check if an element is truly empty, you should check BOTH + its length AND its text attribute. + + The element tag, attribute names, and attribute values can be either + bytes or strings. + + *tag* is the element name. *attrib* is an optional dictionary containing + element attributes. *extra* are additional element attributes given as + keyword arguments. + + Example form: + text...tail + + The element's name.attribDictionary of the element's attributes. + Text before first subelement. This is either a string or the value None. + Note that if there is no text, this attribute may be either + None or the empty string, depending on the parser. + + tail + Text after this element's end tag, but before the next sibling element's + start tag. This is either a string or the value None. Note that if there + was no text, this attribute may be either None or an empty string, + depending on the parser. + + extraattrib must be dict, not %s_children<%s %r at %#x>makeelementCreate a new element with the same type. + + *tag* is a string containing the element name. + *attrib* is a dictionary containing the element attributes. + + Do not call this method, use the SubElement factory function instead. + + Return copy of current element. + + This creates a shallow copy. Subelements will be shared with the + original tree. + + warnThe behavior of this method will change in future versions. Use specific 'len(elem)' or 'elem is not None' test instead."The behavior of this method will change in future versions. ""Use specific 'len(elem)' or 'elem is not None' test instead."stacklevelelt_assert_is_elementsubelementAdd *subelement* to the end of this element. + + The new element will appear in document order after the last existing + subelement (or directly after the text, if it's the first subelement), + but before the end tag for this element. + + elementsAppend subelements from a sequence. + + *elements* is a sequence with zero or more elements. + + Insert *subelement* at position *index*._Element_Pyexpected an Element, not %sRemove matching subelement. + + Unlike the find methods, this method compares elements based on + identity, NOT ON tag value or contents. To remove subelements by + other means, the easiest way is to use a list comprehension to + select what elements to keep, and then use slice assignment to update + the parent element. + + ValueError is raised if a matching element could not be found. + + getchildren(Deprecated) Return all subelements. + + Elements are returned in document order. + + This method will be removed in future versions. Use 'list(elem)' or iteration over elem instead."This method will be removed in future versions. ""Use 'list(elem)' or iteration over elem instead."Find first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + Find text for first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *default* is the value to return if the element was not found, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return text content of first matching element, or default value if + none was found. Note that if an element is found having no text + content, the empty string is returned. + + Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Returns list containing all matching elements in document order. + + Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + Reset element. + + This function removes all subelements, clears all attributes, and sets + the text and tail attributes to None. + + Get element attribute. + + Equivalent to attrib.get, but some implementations may handle this a + bit more efficiently. *key* is what attribute to look for, and + *default* is what to return if the attribute was not found. + + Returns a string containing the attribute value, or the default if + attribute was not found. + + Set element attribute. + + Equivalent to attrib[key] = value, but some implementations may handle + this a bit more efficiently. *key* is what attribute to set, and + *value* is the attribute value to set it to. + + Get list of attribute names. + + Names are returned in an arbitrary order, just like an ordinary + Python dict. Equivalent to attrib.keys() + + Get element attributes as a sequence. + + The attributes are returned in arbitrary order. Equivalent to + attrib.items(). + + Return a list of (name, value) tuples. + + Create tree iterator. + + The iterator loops over the element and all subelements in document + order, returning all elements with a matching tag. + + If the tree structure is modified during iteration, new or removed + elements may or may not be included. To get a stable set, use the + list() function on the iterator, and loop over the resulting list. + + *tag* is what tags to look for (default is to return all elements) + + Return an iterator containing all the matching elements. + + getiteratorThis method will be removed in future versions. Use 'elem.iter()' or 'list(elem.iter())' instead."Use 'elem.iter()' or 'list(elem.iter())' instead."Create text iterator. + + The iterator loops over the element and all subelements in document + order, returning all inner text. + + tSubelement factory which creates an element instance, and appends it + to an existing parent. + + The element tag, attribute names, and attribute values can be either + bytes or Unicode strings. + + *parent* is the parent element, *tag* is the subelements name, *attrib* is + an optional directory containing element attributes, *extra* are + additional attributes given as keyword arguments. + + Comment element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *text* is a string containing the comment string. + + targetProcessing Instruction element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *target* is a string containing the processing instruction, *text* is a + string containing the processing instruction contents, if any. + + Qualified name wrapper. + + This class can be used to wrap a QName attribute value in order to get + proper namespace handing on output. + + *text_or_uri* is a string containing the QName value either in the form + {uri}local, or if the tag argument is given, the URI part of a QName. + + *tag* is an optional argument which if given, will make the first + argument (text_or_uri) be interpreted as a URI, and this argument (tag) + be interpreted as a local name. + + text_or_uri<%s %r>otherAn XML element hierarchy. + + This class also provides support for serialization to and from + standard XML. + + *element* is an optional root element node, + *file* is an optional file handle or file name of an XML file whose + contents will be used to initialize the tree with. + + file_rootgetrootReturn root element of this tree._setrootReplace root element of this tree. + + This will discard the current contents of the tree and replace it + with the given element. Use with care! + + sourceparserLoad external XML document into element tree. + + *source* is a file name or file object, *parser* is an optional parser + instance that defaults to XMLParser. + + ParseError is raised if the parser fails to parse the document. + + Returns the root element of the given source document. + + close_sourcerb_parse_whole65536datafeedCreate and return tree iterator for the root element. + + The iterator loops over all elements in this tree, in document order. + + *tag* is a string with the tag name to iterate over + (default is to return all elements). + + This method will be removed in future versions. Use 'tree.iter()' or 'list(tree.iter())' instead."Use 'tree.iter()' or 'list(tree.iter())' instead."Find first matching element by tag name or path. + + Same as getroot().find(path), which is Element.find() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + This search is broken in 1.3 and earlier, and will be fixed in a future version. If you rely on the current behaviour, change it to %r"This search is broken in 1.3 and earlier, and will be ""fixed in a future version. If you rely on the current ""behaviour, change it to %r"Find first matching element by tag name or path. + + Same as getroot().findtext(path), which is Element.findtext() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + Find all matching subelements by tag name or path. + + Same as getroot().findall(path), which is Element.findall(). + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return list containing all matching elements in document order. + + Find all matching subelements by tag name or path. + + Same as getroot().iterfind(path), which is element.iterfind() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + file_or_filenamexml_declarationshort_empty_elementsWrite element tree to a file as XML. + + Arguments: + *file_or_filename* -- file name or a file object opened for writing + + *encoding* -- the output encoding (default: US-ASCII) + + *xml_declaration* -- bool indicating if an XML declaration should be + added to the output. If None, an XML declaration + is added if encoding IS NOT either of: + US-ASCII, UTF-8, or Unicode + + *default_namespace* -- sets the default XML namespace (for "xmlns") + + *method* -- either "xml" (default), "html, "text", or "c14n" + + *short_empty_elements* -- controls the formatting of elements + that contain no content. If True (default) + they are emitted as a single self-closed + tag, otherwise they are emitted as a pair + of start/end tags + + xml_serializeunknown method %rc14nutf-8us-asciienc_lower_get_writerdeclared_encodinglocalegetpreferredencoding +_serialize_text_namespacesqnamesserializewrite_c14ncontextmanagerExitStackstackBufferedIOBaseRawIOBaseBufferedWritercallbacklambdaTextIOWrapperxmlcharrefreplace +newlinewadd_qnameqname}_namespace_mapns%d%s:%scannot use non-qualified names with default_namespace option"cannot use non-qualified names with ""default_namespace option"_raise_serialization_error_serialize_xmlkwargs_escape_cdataareabasebasefontbrcolframehrimgisindexlinkmetaparamHTML_EMPTY_serialize_html_escape_attrib_htmlltagscriptstyleparthtmlRegister a namespace prefix. + + The registry is global, and any existing mapping for either the + given prefix or the namespace URI will be removed. + + *prefix* is the namespace prefix, *uri* is a namespace uri. Tags and + attributes in this namespace will be serialized with prefix if possible. + + ValueError is raised if prefix is reserved or is invalid. + + ns\d+$Prefix format reserved for internal usehttp://www.w3.org/XML/1998/namespacehttp://www.w3.org/1999/xhtmlrdfhttp://www.w3.org/1999/02/22-rdf-syntax-ns#wsdlhttp://schemas.xmlsoap.org/wsdl/xshttp://www.w3.org/2001/XMLSchemaxsihttp://www.w3.org/2001/XMLSchema-instancedchttp://purl.org/dc/elements/1.1/cannot serialize %r (type %s)&&<>" + Generate string representation of XML element. + + All subelements are included. If encoding is "unicode", a string + is returned. Otherwise a bytestring is returned. + + *element* is an Element instance, *encoding* is an optional output + encoding defaulting to US-ASCII, *method* is an optional output which can + be one of "xml" (default), "html", "text" or "c14n", *default_namespace* + sets the default XML namespace (for "xmlns"). + + Returns an (optionally) encoded string containing the XML data. + + StringIOBytesIOstreamgetvalue_ListDataStreamAn auxiliary stream accumulating into a list reference.lstbWrite element tree or element structure to sys.stdout. + + This function should be used for debugging only. + + *elem* is either an ElementTree, or a single Element. The exact output + format is implementation dependent. In this version, it's written as an + ordinary XML file. + + Parse XML document into element tree. + + *source* is a filename or file object containing XML data, + *parser* is an optional parser instance defaulting to XMLParser. + + Return an ElementTree instance. + + treeeventsIncrementally parse XML document into ElementTree. + + This class also reports what's going on to the user based on the + *events* it is initialized with. The supported events are the strings + "start", "end", "start-ns" and "end-ns" (the "ns" events are used to get + detailed namespace information). If *events* is omitted, only + "end" events are reported. + + *source* is a filename or file object containing XML data, *events* is + a list of events to report back, *parser* is an optional parser instance. + + Returns an iterator providing (event, elem) pairs. + + _parserpullparseriteratorread_events16_close_and_return_rootitIterParseIteratorabcIteratordeque_events_queue_seteventsFeed encoded data to parser.feed() called after end of streamexcFinish feeding data to parser. + + Unlike XMLParser, does not return the root element. Use + read_events() to consume elements from XMLPullParser. + Return an iterator over currently available (event, elem) pairs. + + Events are consumed from the internal event queue as they are + retrieved from the iterator. + poplefteventParse XML document from string constant. + + This function can be used to embed "XML Literals" in Python code. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + Parse XML document from string constant for its IDs. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an (Element, dict) tuple, in which the + dict maps element id:s to elements. + + idssequenceParse XML document from sequence of string fragments. + + *sequence* is a list of other sequence, *parser* is an optional parser + instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + Generic element structure builder. + + This builder converts a sequence of start, data, and end method + calls to a well-formed element structure. + + You can use this class to build an element structure using a custom XML + parser, or a parser for some other XML-like format. + + *element_factory* is an optional element factory which is called + to create new Element instances, as necessary. + + *comment_factory* is a factory to create comments to be used instead of + the standard factory. If *insert_comments* is false (the default), + comments will not be inserted into the tree. + + *pi_factory* is a factory to create processing instructions to be used + instead of the standard factory. If *insert_pis* is false (the default), + processing instructions will not be inserted into the tree. + element_factorycomment_factorypi_factoryinsert_commentsinsert_pis_data_elem_last_tail_comment_factory_pi_factory_factoryFlush builder buffers and return toplevel document Element.missing end tagsmissing toplevel element_flushinternal error (tail)internal error (text)Add text to current element.attrsOpen new element and return it. + + *tag* is the element name, *attrs* is a dict containing element + attributes. + + Close and return current Element. + + *tag* is the element name. + + end tag mismatch (expected %s, got %s)commentCreate a comment using the comment_factory. + + *text* is the text of the comment. + _handle_singlepiCreate a processing instruction using the pi_factory. + + *target* is the target name of the processing instruction. + *text* is the data of the processing instruction, or ''. + factoryElement structure builder for XML source data based on the expat parser. + + *target* is an optional target object which defaults to an instance of the + standard TreeBuilder class, *encoding* is an optional encoding string + which if given, overrides the encoding specified in the XML file: + http://www.iana.org/assignments/character-sets + + xml.parsersexpatpyexpatNo module named expat; use SimpleXMLTreeBuilder insteadParserCreate_targeterror_error_names_defaultDefaultHandlerExpand_startStartElementHandler_endEndElementHandlerstart_ns_start_nsStartNamespaceDeclHandlerend_ns_end_nsEndNamespaceDeclHandlerCharacterDataHandlerCommentHandlerProcessingInstructionHandlerbuffer_textordered_attributesspecified_attributes_doctypeentityExpat %d.%d.%devents_queueevents_to_reportevent_namehandlerattrib_instart-nsend-nspi_targetunknown event %r_raiseerrorerrposition_fixnameattr_listfixnameidata_handlerundefined entity %s: line %d, column %dErrorLineNumberErrorColumnNumber _elementtree_set_factories#---------------------------------------------------------------------# Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved.# Copyright (c) 1999-2008 by Fredrik Lundh# public symbols# emulate old behaviour, for now# Need to refer to the actual Python implementation, not the# shadowing C implementation.# assert iselement(element)# compatibility# assert element is None or iselement(element)# first node# If no parser was specified, create a default XMLParser# The default XMLParser, when it comes from an accelerator,# can define an internal _parse_whole API for efficiency.# It can be used to parse the whole source without feeding# it with chunks.# assert self._root is not None# Retrieve the default encoding for the xml declaration# lxml.etree compatibility. use output method instead# serialization support# returns text write method and release all resources after using# file_or_filename is a file name# file_or_filename is a file-like object# encoding determines if it is a text or binary writer# use a text writer as is# wrap a binary writer with TextIOWrapper# Keep the original file open when the BufferedWriter is# destroyed# This is to handle passed objects that aren't in the# IOBase hierarchy, but just have a write method# TextIOWrapper uses this methods to determine# if BOM (for UTF-16, etc) should be added# Keep the original file open when the TextIOWrapper is# identify namespaces used in this tree# maps qnames to *encoded* prefix:local names# maps uri:s to prefixes# calculate serialized qname representation# default element# FIXME: can this be handled in XML 1.0?# populate qname and namespaces table# sort on prefix# FIXME: handle boolean attributes# this optional method is imported at the end of the module# "c14n": _serialize_c14n,# "well-known" namespace prefixes# xml schema# dublin core# For tests and troubleshooting# escape character data# it's worth avoiding do-nothing calls for strings that are# shorter than 500 characters, or so. assume that's, by far,# the most common case in most applications.# escape attribute value# The following business with carriage returns is to satisfy# Section 2.11 of the XML specification, stating that# CR or CR LN should be replaced with just LN# http://www.w3.org/TR/REC-xml/#sec-line-ends#The following four lines are issue 17582# debugging# parsing# Use the internal, undocumented _parser argument for now; When the# parser argument of iterparse is removed, this can be killed.# load event buffer# The _parser argument is for internal use only and must not be relied# upon in user code. It will be removed in a future release.# See http://bugs.python.org/issue17741 for more details.# wire up the parser for event reporting# iterparse needs this to set its root attribute properly :(# Parse XML document from string constant. Alias for XML().# data collector# element stack# last element# root element# true if we're after an end tag# also see ElementTree and TreeBuilder# underscored names are provided for compatibility only# name memo cache# main callbacks# miscellaneous callbacks# Configure pyexpat: buffering, new-style attribute handling.# unknown# Internal API for XMLPullParser# events_to_report: a list of events to report during parsing (same as# the *events* of XMLPullParser's constructor.# events_queue: a list of actual parsing events that will be populated# by the underlying parser.# TreeBuilder does not implement .start_ns()# TreeBuilder does not implement .end_ns()# expand qname, and convert name string to ascii, if possible# Handler for expat's StartElementHandler. Since ordered_attributes# is set, the attributes are reported as a list of alternating# attribute name,value.# deal with undefined entities# XML_ERROR_UNDEFINED_ENTITY# inside a doctype declaration# parse doctype contents# end of data# get rid of circular references# C14N 2.0# Stack with globally and newly declared namespaces as (uri, prefix) pairs.# Stack with user declared namespace prefixes as (uri, prefix) pairs.# almost no element declares new namespaces# Not declared yet => add new declaration.# No default namespace declared => no prefix needed.# As soon as a default namespace is defined,# anything that has no namespace (and thus, no prefix) goes there.# we may have to resolve qnames in text content# Need to parse text first to see if it requires a prefix declaration.# Resolve prefixes in attribute and tag text.# Assign prefixes in lexicographical order of used URIs.# Write namespace declarations in prefix order ...# almost always empty# ... followed by attributes in URI+name order# No prefix for attributes in default ('') namespace.# Honour xml:space attributes.# Write the tag.# Write the resolved qname text content.# shorter than 500 character, or so. assume that's, by far,# Import the C accelerators# Element is going to be shadowed by the C implementation. We need to keep# the Python version of it accessible for some "creative" by external code# (see tests)# Element, SubElement, ParseError, TreeBuilder, XMLParser, _set_factoriesb'Lightweight XML support for Python. + + XML is an inherently hierarchical data format, and the most natural way to + represent it is with a tree. This module has two classes for this purpose: + + 1. ElementTree represents the whole XML document as a tree and + + 2. Element represents a single node in this tree. + + Interactions with the whole document (reading and writing to/from files) are + usually done on the ElementTree level. Interactions with a single XML element + and its sub-elements are done on the Element level. + + Element is a flexible container object designed to store hierarchical data + structures in memory. It can be described as a cross between a list and a + dictionary. Each Element has a number of properties associated with it: + + 'tag' - a string containing the element's name. + + 'attributes' - a Python dictionary storing the element's attributes. + + 'text' - a string containing the element's text content. + + 'tail' - an optional string containing text after the element's end tag. + + And a number of child elements stored in a Python sequence. + + To create an element instance, use the Element constructor, + or the SubElement factory function. + + You can also use the ElementTree class to wrap an element structure + and convert it to and from XML. + +'u'Lightweight XML support for Python. + + XML is an inherently hierarchical data format, and the most natural way to + represent it is with a tree. This module has two classes for this purpose: + + 1. ElementTree represents the whole XML document as a tree and + + 2. Element represents a single node in this tree. + + Interactions with the whole document (reading and writing to/from files) are + usually done on the ElementTree level. Interactions with a single XML element + and its sub-elements are done on the Element level. + + Element is a flexible container object designed to store hierarchical data + structures in memory. It can be described as a cross between a list and a + dictionary. Each Element has a number of properties associated with it: + + 'tag' - a string containing the element's name. + + 'attributes' - a Python dictionary storing the element's attributes. + + 'text' - a string containing the element's text content. + + 'tail' - an optional string containing text after the element's end tag. + + And a number of child elements stored in a Python sequence. + + To create an element instance, use the Element constructor, + or the SubElement factory function. + + You can also use the ElementTree class to wrap an element structure + and convert it to and from XML. + +'b'Comment'u'Comment'b'dump'u'dump'b'Element'u'Element'b'ElementTree'u'ElementTree'b'fromstring'u'fromstring'b'fromstringlist'u'fromstringlist'b'iselement'u'iselement'b'iterparse'u'iterparse'b'parse'u'parse'b'ParseError'u'ParseError'b'PI'u'PI'b'ProcessingInstruction'u'ProcessingInstruction'b'QName'u'QName'b'SubElement'u'SubElement'b'tostring'u'tostring'b'tostringlist'u'tostringlist'b'TreeBuilder'u'TreeBuilder'b'VERSION'u'VERSION'b'XML'u'XML'b'XMLID'u'XMLID'b'XMLParser'u'XMLParser'b'XMLPullParser'u'XMLPullParser'b'register_namespace'u'register_namespace'b'canonicalize'u'canonicalize'b'C14NWriterTarget'u'C14NWriterTarget'b'1.3.0'u'1.3.0'b'An error when parsing an XML document. + + In addition to its exception value, a ParseError contains + two extra attributes: + 'code' - the specific exception code + 'position' - the line and column of the error + + 'u'An error when parsing an XML document. + + In addition to its exception value, a ParseError contains + two extra attributes: + 'code' - the specific exception code + 'position' - the line and column of the error + + 'b'Return True if *element* appears to be an Element.'u'Return True if *element* appears to be an Element.'b'tag'u'tag'b'An XML element. + + This class is the reference implementation of the Element interface. + + An element's length is its number of subelements. That means if you + want to check if an element is truly empty, you should check BOTH + its length AND its text attribute. + + The element tag, attribute names, and attribute values can be either + bytes or strings. + + *tag* is the element name. *attrib* is an optional dictionary containing + element attributes. *extra* are additional element attributes given as + keyword arguments. + + Example form: + text...tail + + 'u'An XML element. + + This class is the reference implementation of the Element interface. + + An element's length is its number of subelements. That means if you + want to check if an element is truly empty, you should check BOTH + its length AND its text attribute. + + The element tag, attribute names, and attribute values can be either + bytes or strings. + + *tag* is the element name. *attrib* is an optional dictionary containing + element attributes. *extra* are additional element attributes given as + keyword arguments. + + Example form: + text...tail + + 'b'The element's name.'u'The element's name.'b'Dictionary of the element's attributes.'u'Dictionary of the element's attributes.'b' + Text before first subelement. This is either a string or the value None. + Note that if there is no text, this attribute may be either + None or the empty string, depending on the parser. + + 'u' + Text before first subelement. This is either a string or the value None. + Note that if there is no text, this attribute may be either + None or the empty string, depending on the parser. + + 'b' + Text after this element's end tag, but before the next sibling element's + start tag. This is either a string or the value None. Note that if there + was no text, this attribute may be either None or an empty string, + depending on the parser. + + 'u' + Text after this element's end tag, but before the next sibling element's + start tag. This is either a string or the value None. Note that if there + was no text, this attribute may be either None or an empty string, + depending on the parser. + + 'b'attrib must be dict, not %s'u'attrib must be dict, not %s'b'<%s %r at %#x>'u'<%s %r at %#x>'b'Create a new element with the same type. + + *tag* is a string containing the element name. + *attrib* is a dictionary containing the element attributes. + + Do not call this method, use the SubElement factory function instead. + + 'u'Create a new element with the same type. + + *tag* is a string containing the element name. + *attrib* is a dictionary containing the element attributes. + + Do not call this method, use the SubElement factory function instead. + + 'b'Return copy of current element. + + This creates a shallow copy. Subelements will be shared with the + original tree. + + 'u'Return copy of current element. + + This creates a shallow copy. Subelements will be shared with the + original tree. + + 'b'The behavior of this method will change in future versions. Use specific 'len(elem)' or 'elem is not None' test instead.'u'The behavior of this method will change in future versions. Use specific 'len(elem)' or 'elem is not None' test instead.'b'Add *subelement* to the end of this element. + + The new element will appear in document order after the last existing + subelement (or directly after the text, if it's the first subelement), + but before the end tag for this element. + + 'u'Add *subelement* to the end of this element. + + The new element will appear in document order after the last existing + subelement (or directly after the text, if it's the first subelement), + but before the end tag for this element. + + 'b'Append subelements from a sequence. + + *elements* is a sequence with zero or more elements. + + 'u'Append subelements from a sequence. + + *elements* is a sequence with zero or more elements. + + 'b'Insert *subelement* at position *index*.'u'Insert *subelement* at position *index*.'b'expected an Element, not %s'u'expected an Element, not %s'b'Remove matching subelement. + + Unlike the find methods, this method compares elements based on + identity, NOT ON tag value or contents. To remove subelements by + other means, the easiest way is to use a list comprehension to + select what elements to keep, and then use slice assignment to update + the parent element. + + ValueError is raised if a matching element could not be found. + + 'u'Remove matching subelement. + + Unlike the find methods, this method compares elements based on + identity, NOT ON tag value or contents. To remove subelements by + other means, the easiest way is to use a list comprehension to + select what elements to keep, and then use slice assignment to update + the parent element. + + ValueError is raised if a matching element could not be found. + + 'b'(Deprecated) Return all subelements. + + Elements are returned in document order. + + 'u'(Deprecated) Return all subelements. + + Elements are returned in document order. + + 'b'This method will be removed in future versions. Use 'list(elem)' or iteration over elem instead.'u'This method will be removed in future versions. Use 'list(elem)' or iteration over elem instead.'b'Find first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + 'u'Find first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + 'b'Find text for first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *default* is the value to return if the element was not found, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return text content of first matching element, or default value if + none was found. Note that if an element is found having no text + content, the empty string is returned. + + 'u'Find text for first matching element by tag name or path. + + *path* is a string having either an element tag or an XPath, + *default* is the value to return if the element was not found, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return text content of first matching element, or default value if + none was found. Note that if an element is found having no text + content, the empty string is returned. + + 'b'Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Returns list containing all matching elements in document order. + + 'u'Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Returns list containing all matching elements in document order. + + 'b'Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + 'u'Find all matching subelements by tag name or path. + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + 'b'Reset element. + + This function removes all subelements, clears all attributes, and sets + the text and tail attributes to None. + + 'u'Reset element. + + This function removes all subelements, clears all attributes, and sets + the text and tail attributes to None. + + 'b'Get element attribute. + + Equivalent to attrib.get, but some implementations may handle this a + bit more efficiently. *key* is what attribute to look for, and + *default* is what to return if the attribute was not found. + + Returns a string containing the attribute value, or the default if + attribute was not found. + + 'u'Get element attribute. + + Equivalent to attrib.get, but some implementations may handle this a + bit more efficiently. *key* is what attribute to look for, and + *default* is what to return if the attribute was not found. + + Returns a string containing the attribute value, or the default if + attribute was not found. + + 'b'Set element attribute. + + Equivalent to attrib[key] = value, but some implementations may handle + this a bit more efficiently. *key* is what attribute to set, and + *value* is the attribute value to set it to. + + 'u'Set element attribute. + + Equivalent to attrib[key] = value, but some implementations may handle + this a bit more efficiently. *key* is what attribute to set, and + *value* is the attribute value to set it to. + + 'b'Get list of attribute names. + + Names are returned in an arbitrary order, just like an ordinary + Python dict. Equivalent to attrib.keys() + + 'u'Get list of attribute names. + + Names are returned in an arbitrary order, just like an ordinary + Python dict. Equivalent to attrib.keys() + + 'b'Get element attributes as a sequence. + + The attributes are returned in arbitrary order. Equivalent to + attrib.items(). + + Return a list of (name, value) tuples. + + 'u'Get element attributes as a sequence. + + The attributes are returned in arbitrary order. Equivalent to + attrib.items(). + + Return a list of (name, value) tuples. + + 'b'Create tree iterator. + + The iterator loops over the element and all subelements in document + order, returning all elements with a matching tag. + + If the tree structure is modified during iteration, new or removed + elements may or may not be included. To get a stable set, use the + list() function on the iterator, and loop over the resulting list. + + *tag* is what tags to look for (default is to return all elements) + + Return an iterator containing all the matching elements. + + 'u'Create tree iterator. + + The iterator loops over the element and all subelements in document + order, returning all elements with a matching tag. + + If the tree structure is modified during iteration, new or removed + elements may or may not be included. To get a stable set, use the + list() function on the iterator, and loop over the resulting list. + + *tag* is what tags to look for (default is to return all elements) + + Return an iterator containing all the matching elements. + + 'b'This method will be removed in future versions. Use 'elem.iter()' or 'list(elem.iter())' instead.'u'This method will be removed in future versions. Use 'elem.iter()' or 'list(elem.iter())' instead.'b'Create text iterator. + + The iterator loops over the element and all subelements in document + order, returning all inner text. + + 'u'Create text iterator. + + The iterator loops over the element and all subelements in document + order, returning all inner text. + + 'b'Subelement factory which creates an element instance, and appends it + to an existing parent. + + The element tag, attribute names, and attribute values can be either + bytes or Unicode strings. + + *parent* is the parent element, *tag* is the subelements name, *attrib* is + an optional directory containing element attributes, *extra* are + additional attributes given as keyword arguments. + + 'u'Subelement factory which creates an element instance, and appends it + to an existing parent. + + The element tag, attribute names, and attribute values can be either + bytes or Unicode strings. + + *parent* is the parent element, *tag* is the subelements name, *attrib* is + an optional directory containing element attributes, *extra* are + additional attributes given as keyword arguments. + + 'b'Comment element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *text* is a string containing the comment string. + + 'u'Comment element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *text* is a string containing the comment string. + + 'b'Processing Instruction element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *target* is a string containing the processing instruction, *text* is a + string containing the processing instruction contents, if any. + + 'u'Processing Instruction element factory. + + This function creates a special element which the standard serializer + serializes as an XML comment. + + *target* is a string containing the processing instruction, *text* is a + string containing the processing instruction contents, if any. + + 'b' 'u' 'b'Qualified name wrapper. + + This class can be used to wrap a QName attribute value in order to get + proper namespace handing on output. + + *text_or_uri* is a string containing the QName value either in the form + {uri}local, or if the tag argument is given, the URI part of a QName. + + *tag* is an optional argument which if given, will make the first + argument (text_or_uri) be interpreted as a URI, and this argument (tag) + be interpreted as a local name. + + 'u'Qualified name wrapper. + + This class can be used to wrap a QName attribute value in order to get + proper namespace handing on output. + + *text_or_uri* is a string containing the QName value either in the form + {uri}local, or if the tag argument is given, the URI part of a QName. + + *tag* is an optional argument which if given, will make the first + argument (text_or_uri) be interpreted as a URI, and this argument (tag) + be interpreted as a local name. + + 'b'<%s %r>'u'<%s %r>'b'An XML element hierarchy. + + This class also provides support for serialization to and from + standard XML. + + *element* is an optional root element node, + *file* is an optional file handle or file name of an XML file whose + contents will be used to initialize the tree with. + + 'u'An XML element hierarchy. + + This class also provides support for serialization to and from + standard XML. + + *element* is an optional root element node, + *file* is an optional file handle or file name of an XML file whose + contents will be used to initialize the tree with. + + 'b'Return root element of this tree.'u'Return root element of this tree.'b'Replace root element of this tree. + + This will discard the current contents of the tree and replace it + with the given element. Use with care! + + 'u'Replace root element of this tree. + + This will discard the current contents of the tree and replace it + with the given element. Use with care! + + 'b'Load external XML document into element tree. + + *source* is a file name or file object, *parser* is an optional parser + instance that defaults to XMLParser. + + ParseError is raised if the parser fails to parse the document. + + Returns the root element of the given source document. + + 'u'Load external XML document into element tree. + + *source* is a file name or file object, *parser* is an optional parser + instance that defaults to XMLParser. + + ParseError is raised if the parser fails to parse the document. + + Returns the root element of the given source document. + + 'b'read'u'read'b'rb'u'rb'b'_parse_whole'u'_parse_whole'b'Create and return tree iterator for the root element. + + The iterator loops over all elements in this tree, in document order. + + *tag* is a string with the tag name to iterate over + (default is to return all elements). + + 'u'Create and return tree iterator for the root element. + + The iterator loops over all elements in this tree, in document order. + + *tag* is a string with the tag name to iterate over + (default is to return all elements). + + 'b'This method will be removed in future versions. Use 'tree.iter()' or 'list(tree.iter())' instead.'u'This method will be removed in future versions. Use 'tree.iter()' or 'list(tree.iter())' instead.'b'Find first matching element by tag name or path. + + Same as getroot().find(path), which is Element.find() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + 'u'Find first matching element by tag name or path. + + Same as getroot().find(path), which is Element.find() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + 'b'This search is broken in 1.3 and earlier, and will be fixed in a future version. If you rely on the current behaviour, change it to %r'u'This search is broken in 1.3 and earlier, and will be fixed in a future version. If you rely on the current behaviour, change it to %r'b'Find first matching element by tag name or path. + + Same as getroot().findtext(path), which is Element.findtext() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + 'u'Find first matching element by tag name or path. + + Same as getroot().findtext(path), which is Element.findtext() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return the first matching element, or None if no element was found. + + 'b'Find all matching subelements by tag name or path. + + Same as getroot().findall(path), which is Element.findall(). + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return list containing all matching elements in document order. + + 'u'Find all matching subelements by tag name or path. + + Same as getroot().findall(path), which is Element.findall(). + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return list containing all matching elements in document order. + + 'b'Find all matching subelements by tag name or path. + + Same as getroot().iterfind(path), which is element.iterfind() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + 'u'Find all matching subelements by tag name or path. + + Same as getroot().iterfind(path), which is element.iterfind() + + *path* is a string having either an element tag or an XPath, + *namespaces* is an optional mapping from namespace prefix to full name. + + Return an iterable yielding all matching elements in document order. + + 'b'Write element tree to a file as XML. + + Arguments: + *file_or_filename* -- file name or a file object opened for writing + + *encoding* -- the output encoding (default: US-ASCII) + + *xml_declaration* -- bool indicating if an XML declaration should be + added to the output. If None, an XML declaration + is added if encoding IS NOT either of: + US-ASCII, UTF-8, or Unicode + + *default_namespace* -- sets the default XML namespace (for "xmlns") + + *method* -- either "xml" (default), "html, "text", or "c14n" + + *short_empty_elements* -- controls the formatting of elements + that contain no content. If True (default) + they are emitted as a single self-closed + tag, otherwise they are emitted as a pair + of start/end tags + + 'u'Write element tree to a file as XML. + + Arguments: + *file_or_filename* -- file name or a file object opened for writing + + *encoding* -- the output encoding (default: US-ASCII) + + *xml_declaration* -- bool indicating if an XML declaration should be + added to the output. If None, an XML declaration + is added if encoding IS NOT either of: + US-ASCII, UTF-8, or Unicode + + *default_namespace* -- sets the default XML namespace (for "xmlns") + + *method* -- either "xml" (default), "html, "text", or "c14n" + + *short_empty_elements* -- controls the formatting of elements + that contain no content. If True (default) + they are emitted as a single self-closed + tag, otherwise they are emitted as a pair + of start/end tags + + 'b'xml'b'unknown method %r'u'unknown method %r'b'c14n'u'c14n'b'utf-8'u'utf-8'b'us-ascii'u'us-ascii'b'unicode'u'unicode'b' +'u' +'b'text'u'text'b'xmlcharrefreplace'u'xmlcharrefreplace'b' +'u' +'b'w'u'w'b'}'u'}'b'ns%d'u'ns%d'b'%s:%s'u'%s:%s'b'cannot use non-qualified names with default_namespace option'u'cannot use non-qualified names with default_namespace option'b''u''b''u''b'<'u'<'b' xmlns%s="%s"'u' xmlns%s="%s"'b' %s="%s"'u' %s="%s"'b'>'u'>'b''u' />'b'area'u'area'b'base'u'base'b'basefont'u'basefont'b'br'u'br'b'col'u'col'b'frame'u'frame'b'hr'u'hr'b'img'u'img'b'input'u'input'b'isindex'u'isindex'b'link'u'link'b'meta'u'meta'b'param'u'param'b'script'u'script'b'style'u'style'b'html'u'html'b'Register a namespace prefix. + + The registry is global, and any existing mapping for either the + given prefix or the namespace URI will be removed. + + *prefix* is the namespace prefix, *uri* is a namespace uri. Tags and + attributes in this namespace will be serialized with prefix if possible. + + ValueError is raised if prefix is reserved or is invalid. + + 'u'Register a namespace prefix. + + The registry is global, and any existing mapping for either the + given prefix or the namespace URI will be removed. + + *prefix* is the namespace prefix, *uri* is a namespace uri. Tags and + attributes in this namespace will be serialized with prefix if possible. + + ValueError is raised if prefix is reserved or is invalid. + + 'b'ns\d+$'u'ns\d+$'b'Prefix format reserved for internal use'u'Prefix format reserved for internal use'b'http://www.w3.org/XML/1998/namespace'u'http://www.w3.org/XML/1998/namespace'b'http://www.w3.org/1999/xhtml'u'http://www.w3.org/1999/xhtml'b'rdf'u'rdf'b'http://www.w3.org/1999/02/22-rdf-syntax-ns#'u'http://www.w3.org/1999/02/22-rdf-syntax-ns#'b'wsdl'u'wsdl'b'http://schemas.xmlsoap.org/wsdl/'u'http://schemas.xmlsoap.org/wsdl/'b'xs'u'xs'b'http://www.w3.org/2001/XMLSchema'u'http://www.w3.org/2001/XMLSchema'b'xsi'u'xsi'b'http://www.w3.org/2001/XMLSchema-instance'u'http://www.w3.org/2001/XMLSchema-instance'b'dc'u'dc'b'http://purl.org/dc/elements/1.1/'u'http://purl.org/dc/elements/1.1/'b'cannot serialize %r (type %s)'u'cannot serialize %r (type %s)'b'&'u'&'b'&'u'&'b'<'u'<'b'>'u'>'b'"'u'"'b'"'u'"'b' +'u' +'b' 'u' 'b' 'u' 'b' 'u' 'b' 'u' 'b'Generate string representation of XML element. + + All subelements are included. If encoding is "unicode", a string + is returned. Otherwise a bytestring is returned. + + *element* is an Element instance, *encoding* is an optional output + encoding defaulting to US-ASCII, *method* is an optional output which can + be one of "xml" (default), "html", "text" or "c14n", *default_namespace* + sets the default XML namespace (for "xmlns"). + + Returns an (optionally) encoded string containing the XML data. + + 'u'Generate string representation of XML element. + + All subelements are included. If encoding is "unicode", a string + is returned. Otherwise a bytestring is returned. + + *element* is an Element instance, *encoding* is an optional output + encoding defaulting to US-ASCII, *method* is an optional output which can + be one of "xml" (default), "html", "text" or "c14n", *default_namespace* + sets the default XML namespace (for "xmlns"). + + Returns an (optionally) encoded string containing the XML data. + + 'b'An auxiliary stream accumulating into a list reference.'u'An auxiliary stream accumulating into a list reference.'b'Write element tree or element structure to sys.stdout. + + This function should be used for debugging only. + + *elem* is either an ElementTree, or a single Element. The exact output + format is implementation dependent. In this version, it's written as an + ordinary XML file. + + 'u'Write element tree or element structure to sys.stdout. + + This function should be used for debugging only. + + *elem* is either an ElementTree, or a single Element. The exact output + format is implementation dependent. In this version, it's written as an + ordinary XML file. + + 'b'Parse XML document into element tree. + + *source* is a filename or file object containing XML data, + *parser* is an optional parser instance defaulting to XMLParser. + + Return an ElementTree instance. + + 'u'Parse XML document into element tree. + + *source* is a filename or file object containing XML data, + *parser* is an optional parser instance defaulting to XMLParser. + + Return an ElementTree instance. + + 'b'Incrementally parse XML document into ElementTree. + + This class also reports what's going on to the user based on the + *events* it is initialized with. The supported events are the strings + "start", "end", "start-ns" and "end-ns" (the "ns" events are used to get + detailed namespace information). If *events* is omitted, only + "end" events are reported. + + *source* is a filename or file object containing XML data, *events* is + a list of events to report back, *parser* is an optional parser instance. + + Returns an iterator providing (event, elem) pairs. + + 'u'Incrementally parse XML document into ElementTree. + + This class also reports what's going on to the user based on the + *events* it is initialized with. The supported events are the strings + "start", "end", "start-ns" and "end-ns" (the "ns" events are used to get + detailed namespace information). If *events* is omitted, only + "end" events are reported. + + *source* is a filename or file object containing XML data, *events* is + a list of events to report back, *parser* is an optional parser instance. + + Returns an iterator providing (event, elem) pairs. + + 'b'end'u'end'b'Feed encoded data to parser.'u'Feed encoded data to parser.'b'feed() called after end of stream'u'feed() called after end of stream'b'Finish feeding data to parser. + + Unlike XMLParser, does not return the root element. Use + read_events() to consume elements from XMLPullParser. + 'u'Finish feeding data to parser. + + Unlike XMLParser, does not return the root element. Use + read_events() to consume elements from XMLPullParser. + 'b'Return an iterator over currently available (event, elem) pairs. + + Events are consumed from the internal event queue as they are + retrieved from the iterator. + 'u'Return an iterator over currently available (event, elem) pairs. + + Events are consumed from the internal event queue as they are + retrieved from the iterator. + 'b'Parse XML document from string constant. + + This function can be used to embed "XML Literals" in Python code. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + 'u'Parse XML document from string constant. + + This function can be used to embed "XML Literals" in Python code. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + 'b'Parse XML document from string constant for its IDs. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an (Element, dict) tuple, in which the + dict maps element id:s to elements. + + 'u'Parse XML document from string constant for its IDs. + + *text* is a string containing XML data, *parser* is an + optional parser instance, defaulting to the standard XMLParser. + + Returns an (Element, dict) tuple, in which the + dict maps element id:s to elements. + + 'b'id'u'id'b'Parse XML document from sequence of string fragments. + + *sequence* is a list of other sequence, *parser* is an optional parser + instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + 'u'Parse XML document from sequence of string fragments. + + *sequence* is a list of other sequence, *parser* is an optional parser + instance, defaulting to the standard XMLParser. + + Returns an Element instance. + + 'b'Generic element structure builder. + + This builder converts a sequence of start, data, and end method + calls to a well-formed element structure. + + You can use this class to build an element structure using a custom XML + parser, or a parser for some other XML-like format. + + *element_factory* is an optional element factory which is called + to create new Element instances, as necessary. + + *comment_factory* is a factory to create comments to be used instead of + the standard factory. If *insert_comments* is false (the default), + comments will not be inserted into the tree. + + *pi_factory* is a factory to create processing instructions to be used + instead of the standard factory. If *insert_pis* is false (the default), + processing instructions will not be inserted into the tree. + 'u'Generic element structure builder. + + This builder converts a sequence of start, data, and end method + calls to a well-formed element structure. + + You can use this class to build an element structure using a custom XML + parser, or a parser for some other XML-like format. + + *element_factory* is an optional element factory which is called + to create new Element instances, as necessary. + + *comment_factory* is a factory to create comments to be used instead of + the standard factory. If *insert_comments* is false (the default), + comments will not be inserted into the tree. + + *pi_factory* is a factory to create processing instructions to be used + instead of the standard factory. If *insert_pis* is false (the default), + processing instructions will not be inserted into the tree. + 'b'Flush builder buffers and return toplevel document Element.'u'Flush builder buffers and return toplevel document Element.'b'missing end tags'u'missing end tags'b'missing toplevel element'u'missing toplevel element'b'internal error (tail)'u'internal error (tail)'b'internal error (text)'u'internal error (text)'b'Add text to current element.'u'Add text to current element.'b'Open new element and return it. + + *tag* is the element name, *attrs* is a dict containing element + attributes. + + 'u'Open new element and return it. + + *tag* is the element name, *attrs* is a dict containing element + attributes. + + 'b'Close and return current Element. + + *tag* is the element name. + + 'u'Close and return current Element. + + *tag* is the element name. + + 'b'end tag mismatch (expected %s, got %s)'u'end tag mismatch (expected %s, got %s)'b'Create a comment using the comment_factory. + + *text* is the text of the comment. + 'u'Create a comment using the comment_factory. + + *text* is the text of the comment. + 'b'Create a processing instruction using the pi_factory. + + *target* is the target name of the processing instruction. + *text* is the data of the processing instruction, or ''. + 'u'Create a processing instruction using the pi_factory. + + *target* is the target name of the processing instruction. + *text* is the data of the processing instruction, or ''. + 'b'Element structure builder for XML source data based on the expat parser. + + *target* is an optional target object which defaults to an instance of the + standard TreeBuilder class, *encoding* is an optional encoding string + which if given, overrides the encoding specified in the XML file: + http://www.iana.org/assignments/character-sets + + 'u'Element structure builder for XML source data based on the expat parser. + + *target* is an optional target object which defaults to an instance of the + standard TreeBuilder class, *encoding* is an optional encoding string + which if given, overrides the encoding specified in the XML file: + http://www.iana.org/assignments/character-sets + + 'b'No module named expat; use SimpleXMLTreeBuilder instead'u'No module named expat; use SimpleXMLTreeBuilder instead'b'start'u'start'b'start_ns'u'start_ns'b'end_ns'u'end_ns'b'data'u'data'b'comment'u'comment'b'pi'u'pi'b'Expat %d.%d.%d'u'Expat %d.%d.%d'b'start-ns'u'start-ns'b'end-ns'u'end-ns'b'unknown event %r'u'unknown event %r'b'undefined entity %s: line %d, column %d'u'undefined entity %s: line %d, column %d'b''u'-->'b''u'?>'b' 'u' 'b' 'u' 'b' 'u' 'u'xml.etree.ElementTree'u'etree.ElementTree'Record of phased-in incompatible language changes. + +Each line is of the form: + + FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," + CompilerFlag ")" + +where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples +of the same form as sys.version_info: + + (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int + PY_MINOR_VERSION, # the 1; an int + PY_MICRO_VERSION, # the 0; an int + PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string + PY_RELEASE_SERIAL # the 3; an int + ) + +OptionalRelease records the first release in which + + from __future__ import FeatureName + +was accepted. + +In the case of MandatoryReleases that have not yet occurred, +MandatoryRelease predicts the release in which the feature will become part +of the language. + +Else MandatoryRelease records when the feature became part of the language; +in releases at or after that, modules no longer need + + from __future__ import FeatureName + +to use the feature in question, but may continue to use such imports. + +MandatoryRelease may also be None, meaning that a planned feature got +dropped. + +Instances of class _Feature have two corresponding methods, +.getOptionalRelease() and .getMandatoryRelease(). + +CompilerFlag is the (bitfield) flag that should be passed in the fourth +argument to the builtin function compile() to enable the feature in +dynamically compiled code. This flag is stored in the .compiler_flag +attribute on _Future instances. These values must match the appropriate +#defines of CO_xxx flags in Include/compile.h. + +No feature line is ever to be deleted from this file. +nested_scopesgeneratorsdivisionabsolute_importwith_statementprint_functionunicode_literalsbarry_as_FLUFLgenerator_stopannotationsall_feature_names0x0010CO_NESTEDCO_GENERATOR_ALLOWED1310720x20000CO_FUTURE_DIVISION2621440x40000CO_FUTURE_ABSOLUTE_IMPORT5242880x80000CO_FUTURE_WITH_STATEMENT10485760x100000CO_FUTURE_PRINT_FUNCTION20971520x200000CO_FUTURE_UNICODE_LITERALS41943040x400000CO_FUTURE_BARRY_AS_BDFL83886080x800000CO_FUTURE_GENERATOR_STOP167772160x1000000CO_FUTURE_ANNOTATIONS_FeatureoptionalReleasemandatoryReleasecompiler_flagoptionalmandatorygetOptionalReleaseReturn first release in which this feature was recognized. + + This is a 5-tuple, of the same form as sys.version_info. + getMandatoryReleaseReturn release in which this feature will become mandatory. + + This is a 5-tuple, of the same form as sys.version_info, or, if + the feature was dropped, is None. + betaalpha6710# The CO_xxx symbols are defined here under the same names defined in# code.h and used by compile.h, so that an editor search will find them here.# However, they're not exported in __all__, because they don't really belong to# this module.# nested_scopes# generators (obsolete, was 0x1000)# division# perform absolute imports by default# with statement# print function# unicode string literals# StopIteration becomes RuntimeError in generators# annotations become strings at runtimeb'Record of phased-in incompatible language changes. + +Each line is of the form: + + FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," + CompilerFlag ")" + +where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples +of the same form as sys.version_info: + + (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int + PY_MINOR_VERSION, # the 1; an int + PY_MICRO_VERSION, # the 0; an int + PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string + PY_RELEASE_SERIAL # the 3; an int + ) + +OptionalRelease records the first release in which + + from __future__ import FeatureName + +was accepted. + +In the case of MandatoryReleases that have not yet occurred, +MandatoryRelease predicts the release in which the feature will become part +of the language. + +Else MandatoryRelease records when the feature became part of the language; +in releases at or after that, modules no longer need + + from __future__ import FeatureName + +to use the feature in question, but may continue to use such imports. + +MandatoryRelease may also be None, meaning that a planned feature got +dropped. + +Instances of class _Feature have two corresponding methods, +.getOptionalRelease() and .getMandatoryRelease(). + +CompilerFlag is the (bitfield) flag that should be passed in the fourth +argument to the builtin function compile() to enable the feature in +dynamically compiled code. This flag is stored in the .compiler_flag +attribute on _Future instances. These values must match the appropriate +#defines of CO_xxx flags in Include/compile.h. + +No feature line is ever to be deleted from this file. +'u'Record of phased-in incompatible language changes. + +Each line is of the form: + + FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," + CompilerFlag ")" + +where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples +of the same form as sys.version_info: + + (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int + PY_MINOR_VERSION, # the 1; an int + PY_MICRO_VERSION, # the 0; an int + PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string + PY_RELEASE_SERIAL # the 3; an int + ) + +OptionalRelease records the first release in which + + from __future__ import FeatureName + +was accepted. + +In the case of MandatoryReleases that have not yet occurred, +MandatoryRelease predicts the release in which the feature will become part +of the language. + +Else MandatoryRelease records when the feature became part of the language; +in releases at or after that, modules no longer need + + from __future__ import FeatureName + +to use the feature in question, but may continue to use such imports. + +MandatoryRelease may also be None, meaning that a planned feature got +dropped. + +Instances of class _Feature have two corresponding methods, +.getOptionalRelease() and .getMandatoryRelease(). + +CompilerFlag is the (bitfield) flag that should be passed in the fourth +argument to the builtin function compile() to enable the feature in +dynamically compiled code. This flag is stored in the .compiler_flag +attribute on _Future instances. These values must match the appropriate +#defines of CO_xxx flags in Include/compile.h. + +No feature line is ever to be deleted from this file. +'b'nested_scopes'u'nested_scopes'b'generators'u'generators'b'division'u'division'b'absolute_import'u'absolute_import'b'with_statement'u'with_statement'b'print_function'u'print_function'b'unicode_literals'u'unicode_literals'b'barry_as_FLUFL'u'barry_as_FLUFL'b'generator_stop'u'generator_stop'b'annotations'u'annotations'b'all_feature_names'u'all_feature_names'b'Return first release in which this feature was recognized. + + This is a 5-tuple, of the same form as sys.version_info. + 'u'Return first release in which this feature was recognized. + + This is a 5-tuple, of the same form as sys.version_info. + 'b'Return release in which this feature will become mandatory. + + This is a 5-tuple, of the same form as sys.version_info, or, if + the feature was dropped, is None. + 'u'Return release in which this feature will become mandatory. + + This is a 5-tuple, of the same form as sys.version_info, or, if + the feature was dropped, is None. + 'b'_Feature'u'_Feature'b'beta'u'beta'b'alpha'u'alpha'b'final'u'__future__'u'urllib.__init__'u'urllib'u'__init__'osunittesttestsupportimport_modulectypesctypes_symbolsneed_symbolskipUnless{!r} is requiredload_testsload_package_testsdirname__file__# skip tests if _ctypes was not builtb'ctypes'u'ctypes'b'{!r} is required'u'{!r} is required'u'ctypes.test.__init__'u'ctypes.test'u'test.__init__'u'test'A pure Python implementation of import.reload_imp_frozen_importlib_bootstrapimportlib._bootstrapimportlib__init__.py_bootstrap.py_setup_frozen_importlib_external_bootstrap_externalimportlib._bootstrap_external_bootstrap_external.py_pack_uint32_unpack_uint32typesCall the invalidate_caches() method on all meta path finders stored in + sys.meta_path (where implemented).finderReturn the loader for the specified module. + + This is a backward-compatible wrapper around find_spec(). + + This function is deprecated in favor of importlib.util.find_spec(). + + Deprecated since Python 3.4. Use importlib.util.find_spec() instead.'Deprecated since Python 3.4. ''Use importlib.util.find_spec() instead.'loader{}.__loader__ is None{}.__loader__ is not set_find_specspecsubmodule_search_locationsspec for {} missing loadernamespace packages do not have loaderspackageImport a module. + + The 'package' argument is required when performing a relative import. It + specifies the package to use as the anchor point from which to resolve the + relative import to an absolute import. + + levelthe 'package' argument is required to perform a relative import for {!r}"the 'package' argument is required to perform a relative ""import for {!r}"character_gcd_import_RELOADINGReload the module and return it. + + The module must have been successfully imported before. + + reload() argument must be a modulemodule {} not in sys.modulesparent_name__path__pkgpathparent {!r} not in sys.modulesspec not found for the module _exec# Bootstrap help ###################################################### Until bootstrapping is complete, DO NOT import any modules that attempt# to import importlib._bootstrap (directly or indirectly). Since this# partially initialised package would be present in sys.modules, those# modules would get an uninitialised copy of the source version, instead# of a fully initialised version (either the frozen one or the one# initialised below if the frozen one is not available).# Just the builtin component, NOT the full Python module# importlib._bootstrap is the built-in import, ensure we don't create# a second copy of the module.# __file__ is not guaranteed to be defined, e.g. if this code gets# frozen by a tool like cx_Freeze.# To simplify imports in test code# Fully bootstrapped at this point, import whatever you like, circular# dependencies and startup overhead minimisation permitting :)# Public API ########################################################## We won't worry about malformed specs (missing attributes).# The module may have replaced itself in sys.modules!b'A pure Python implementation of import.'u'A pure Python implementation of import.'b'__import__'u'__import__'b'import_module'u'import_module'b'invalidate_caches'u'invalidate_caches'b'reload'u'reload'b'importlib._bootstrap'u'importlib._bootstrap'b'importlib'u'importlib'b'__init__.py'u'__init__.py'b'_bootstrap.py'u'_bootstrap.py'b'importlib._bootstrap_external'u'importlib._bootstrap_external'b'_bootstrap_external.py'u'_bootstrap_external.py'b'Call the invalidate_caches() method on all meta path finders stored in + sys.meta_path (where implemented).'u'Call the invalidate_caches() method on all meta path finders stored in + sys.meta_path (where implemented).'b'Return the loader for the specified module. + + This is a backward-compatible wrapper around find_spec(). + + This function is deprecated in favor of importlib.util.find_spec(). + + 'u'Return the loader for the specified module. + + This is a backward-compatible wrapper around find_spec(). + + This function is deprecated in favor of importlib.util.find_spec(). + + 'b'Deprecated since Python 3.4. Use importlib.util.find_spec() instead.'u'Deprecated since Python 3.4. Use importlib.util.find_spec() instead.'b'{}.__loader__ is None'u'{}.__loader__ is None'b'{}.__loader__ is not set'u'{}.__loader__ is not set'b'spec for {} missing loader'u'spec for {} missing loader'b'namespace packages do not have loaders'u'namespace packages do not have loaders'b'Import a module. + + The 'package' argument is required when performing a relative import. It + specifies the package to use as the anchor point from which to resolve the + relative import to an absolute import. + + 'u'Import a module. + + The 'package' argument is required when performing a relative import. It + specifies the package to use as the anchor point from which to resolve the + relative import to an absolute import. + + 'b'the 'package' argument is required to perform a relative import for {!r}'u'the 'package' argument is required to perform a relative import for {!r}'b'Reload the module and return it. + + The module must have been successfully imported before. + + 'u'Reload the module and return it. + + The module must have been successfully imported before. + + 'b'reload() argument must be a module'u'reload() argument must be a module'b'module {} not in sys.modules'u'module {} not in sys.modules'b'parent {!r} not in sys.modules'u'parent {!r} not in sys.modules'b'spec not found for the module 'u'spec not found for the module 'u'importlib.__init__'enumIntEnumHTTPStatusHTTP status codes and reason phrases + + Status codes from the following RFCs are all observed: + + * RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616 + * RFC 6585: Additional HTTP Status Codes + * RFC 3229: Delta encoding in HTTP + * RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518 + * RFC 5842: Binding Extensions to WebDAV + * RFC 7238: Permanent Redirect + * RFC 2295: Transparent Content Negotiation in HTTP + * RFC 2774: An HTTP Extension Framework + * RFC 7725: An HTTP Status Code to Report Legal Obstacles + * RFC 7540: Hypertext Transfer Protocol Version 2 (HTTP/2) + clsphrasedescription_value_ContinueRequest received, please continueCONTINUE101Switching ProtocolsSwitching to new protocol; obey Upgrade headerSWITCHING_PROTOCOLS102ProcessingPROCESSING200OKRequest fulfilled, document follows201CreatedDocument created, URL followsCREATED202AcceptedRequest accepted, processing continues off-lineACCEPTED203Non-Authoritative InformationRequest fulfilled from cacheNON_AUTHORITATIVE_INFORMATION204No ContentRequest fulfilled, nothing followsNO_CONTENT205Reset ContentClear input form for further inputRESET_CONTENT206Partial ContentPartial content followsPARTIAL_CONTENT207Multi-StatusMULTI_STATUS208Already ReportedALREADY_REPORTED226IM UsedIM_USED300Multiple ChoicesObject has several resources -- see URI listMULTIPLE_CHOICES301Moved PermanentlyObject moved permanently -- see URI listMOVED_PERMANENTLY302FoundObject moved temporarily -- see URI listFOUND303See OtherObject moved -- see Method and URL listSEE_OTHER304Not ModifiedDocument has not changed since given timeNOT_MODIFIED305Use ProxyYou must use proxy specified in Location to access this resourceUSE_PROXY307Temporary RedirectTEMPORARY_REDIRECTPermanent RedirectPERMANENT_REDIRECT400Bad RequestBad request syntax or unsupported methodBAD_REQUEST401UnauthorizedNo permission -- see authorization schemesUNAUTHORIZED402Payment RequiredNo payment -- see charging schemesPAYMENT_REQUIRED403ForbiddenRequest forbidden -- authorization will not helpFORBIDDEN404Not FoundNothing matches the given URINOT_FOUND405Method Not AllowedSpecified method is invalid for this resourceMETHOD_NOT_ALLOWED406Not AcceptableURI not available in preferred formatNOT_ACCEPTABLE407Proxy Authentication RequiredYou must authenticate with this proxy before proceedingPROXY_AUTHENTICATION_REQUIRED408Request TimeoutRequest timed out; try again laterREQUEST_TIMEOUT409ConflictRequest conflictCONFLICT410GoneURI no longer exists and has been permanently removedGONE411Length RequiredClient must specify Content-LengthLENGTH_REQUIRED412Precondition FailedPrecondition in headers is falsePRECONDITION_FAILED413Request Entity Too LargeEntity is too largeREQUEST_ENTITY_TOO_LARGE414Request-URI Too LongURI is too longREQUEST_URI_TOO_LONG415Unsupported Media TypeEntity body in unsupported formatUNSUPPORTED_MEDIA_TYPE416Requested Range Not SatisfiableCannot satisfy request rangeREQUESTED_RANGE_NOT_SATISFIABLE417Expectation FailedExpect condition could not be satisfiedEXPECTATION_FAILED421Misdirected RequestServer is not able to produce a responseMISDIRECTED_REQUEST422Unprocessable EntityUNPROCESSABLE_ENTITY423LockedLOCKED424Failed DependencyFAILED_DEPENDENCY426Upgrade RequiredUPGRADE_REQUIRED428Precondition RequiredThe origin server requires the request to be conditionalPRECONDITION_REQUIRED429Too Many RequestsThe user has sent too many requests in a given amount of time ("rate limiting")'The user has sent too many requests in ''a given amount of time ("rate limiting")'TOO_MANY_REQUESTS431Request Header Fields Too LargeThe server is unwilling to process the request because its header fields are too large'The server is unwilling to process the request because its header ''fields are too large'REQUEST_HEADER_FIELDS_TOO_LARGE451Unavailable For Legal ReasonsThe server is denying access to the resource as a consequence of a legal demand'The server is denying access to the ''resource as a consequence of a legal demand'UNAVAILABLE_FOR_LEGAL_REASONS500Internal Server ErrorServer got itself in troubleINTERNAL_SERVER_ERROR501Not ImplementedServer does not support this operationNOT_IMPLEMENTED502Bad GatewayInvalid responses from another server/proxyBAD_GATEWAY503Service UnavailableThe server cannot process the request due to a high loadSERVICE_UNAVAILABLE504Gateway TimeoutThe gateway server did not receive a timely responseGATEWAY_TIMEOUT505HTTP Version Not SupportedCannot fulfill requestHTTP_VERSION_NOT_SUPPORTED506Variant Also NegotiatesVARIANT_ALSO_NEGOTIATES507Insufficient StorageINSUFFICIENT_STORAGE508Loop DetectedLOOP_DETECTED510Not ExtendedNOT_EXTENDED511Network Authentication RequiredThe client needs to authenticate to gain network accessNETWORK_AUTHENTICATION_REQUIRED# informational# success# redirection# client error# server errorsb'HTTPStatus'u'HTTPStatus'b'HTTP status codes and reason phrases + + Status codes from the following RFCs are all observed: + + * RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616 + * RFC 6585: Additional HTTP Status Codes + * RFC 3229: Delta encoding in HTTP + * RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518 + * RFC 5842: Binding Extensions to WebDAV + * RFC 7238: Permanent Redirect + * RFC 2295: Transparent Content Negotiation in HTTP + * RFC 2774: An HTTP Extension Framework + * RFC 7725: An HTTP Status Code to Report Legal Obstacles + * RFC 7540: Hypertext Transfer Protocol Version 2 (HTTP/2) + 'u'HTTP status codes and reason phrases + + Status codes from the following RFCs are all observed: + + * RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616 + * RFC 6585: Additional HTTP Status Codes + * RFC 3229: Delta encoding in HTTP + * RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518 + * RFC 5842: Binding Extensions to WebDAV + * RFC 7238: Permanent Redirect + * RFC 2295: Transparent Content Negotiation in HTTP + * RFC 2774: An HTTP Extension Framework + * RFC 7725: An HTTP Status Code to Report Legal Obstacles + * RFC 7540: Hypertext Transfer Protocol Version 2 (HTTP/2) + 'b'Continue'u'Continue'b'Request received, please continue'u'Request received, please continue'b'Switching Protocols'u'Switching Protocols'b'Switching to new protocol; obey Upgrade header'u'Switching to new protocol; obey Upgrade header'b'Processing'u'Processing'b'OK'u'OK'b'Request fulfilled, document follows'u'Request fulfilled, document follows'b'Created'u'Created'b'Document created, URL follows'u'Document created, URL follows'b'Accepted'u'Accepted'b'Request accepted, processing continues off-line'u'Request accepted, processing continues off-line'b'Non-Authoritative Information'u'Non-Authoritative Information'b'Request fulfilled from cache'u'Request fulfilled from cache'b'No Content'u'No Content'b'Request fulfilled, nothing follows'u'Request fulfilled, nothing follows'b'Reset Content'u'Reset Content'b'Clear input form for further input'u'Clear input form for further input'b'Partial Content'u'Partial Content'b'Partial content follows'u'Partial content follows'b'Multi-Status'u'Multi-Status'b'Already Reported'u'Already Reported'b'IM Used'u'IM Used'b'Multiple Choices'u'Multiple Choices'b'Object has several resources -- see URI list'u'Object has several resources -- see URI list'b'Moved Permanently'u'Moved Permanently'b'Object moved permanently -- see URI list'u'Object moved permanently -- see URI list'b'Found'u'Found'b'Object moved temporarily -- see URI list'u'Object moved temporarily -- see URI list'b'See Other'u'See Other'b'Object moved -- see Method and URL list'u'Object moved -- see Method and URL list'b'Not Modified'u'Not Modified'b'Document has not changed since given time'u'Document has not changed since given time'b'Use Proxy'u'Use Proxy'b'You must use proxy specified in Location to access this resource'u'You must use proxy specified in Location to access this resource'b'Temporary Redirect'u'Temporary Redirect'b'Permanent Redirect'u'Permanent Redirect'b'Bad Request'u'Bad Request'b'Bad request syntax or unsupported method'u'Bad request syntax or unsupported method'b'Unauthorized'u'Unauthorized'b'No permission -- see authorization schemes'u'No permission -- see authorization schemes'b'Payment Required'u'Payment Required'b'No payment -- see charging schemes'u'No payment -- see charging schemes'b'Forbidden'u'Forbidden'b'Request forbidden -- authorization will not help'u'Request forbidden -- authorization will not help'b'Not Found'u'Not Found'b'Nothing matches the given URI'u'Nothing matches the given URI'b'Method Not Allowed'u'Method Not Allowed'b'Specified method is invalid for this resource'u'Specified method is invalid for this resource'b'Not Acceptable'u'Not Acceptable'b'URI not available in preferred format'u'URI not available in preferred format'b'Proxy Authentication Required'u'Proxy Authentication Required'b'You must authenticate with this proxy before proceeding'u'You must authenticate with this proxy before proceeding'b'Request Timeout'u'Request Timeout'b'Request timed out; try again later'u'Request timed out; try again later'b'Conflict'u'Conflict'b'Request conflict'u'Request conflict'b'Gone'u'Gone'b'URI no longer exists and has been permanently removed'u'URI no longer exists and has been permanently removed'b'Length Required'u'Length Required'b'Client must specify Content-Length'u'Client must specify Content-Length'b'Precondition Failed'u'Precondition Failed'b'Precondition in headers is false'u'Precondition in headers is false'b'Request Entity Too Large'u'Request Entity Too Large'b'Entity is too large'u'Entity is too large'b'Request-URI Too Long'u'Request-URI Too Long'b'URI is too long'u'URI is too long'b'Unsupported Media Type'u'Unsupported Media Type'b'Entity body in unsupported format'u'Entity body in unsupported format'b'Requested Range Not Satisfiable'u'Requested Range Not Satisfiable'b'Cannot satisfy request range'u'Cannot satisfy request range'b'Expectation Failed'u'Expectation Failed'b'Expect condition could not be satisfied'u'Expect condition could not be satisfied'b'Misdirected Request'u'Misdirected Request'b'Server is not able to produce a response'u'Server is not able to produce a response'b'Unprocessable Entity'u'Unprocessable Entity'b'Locked'u'Locked'b'Failed Dependency'u'Failed Dependency'b'Upgrade Required'u'Upgrade Required'b'Precondition Required'u'Precondition Required'b'The origin server requires the request to be conditional'u'The origin server requires the request to be conditional'b'Too Many Requests'u'Too Many Requests'b'The user has sent too many requests in a given amount of time ("rate limiting")'u'The user has sent too many requests in a given amount of time ("rate limiting")'b'Request Header Fields Too Large'u'Request Header Fields Too Large'b'The server is unwilling to process the request because its header fields are too large'u'The server is unwilling to process the request because its header fields are too large'b'Unavailable For Legal Reasons'u'Unavailable For Legal Reasons'b'The server is denying access to the resource as a consequence of a legal demand'u'The server is denying access to the resource as a consequence of a legal demand'b'Internal Server Error'u'Internal Server Error'b'Server got itself in trouble'u'Server got itself in trouble'b'Not Implemented'u'Not Implemented'b'Server does not support this operation'u'Server does not support this operation'b'Bad Gateway'u'Bad Gateway'b'Invalid responses from another server/proxy'u'Invalid responses from another server/proxy'b'Service Unavailable'u'Service Unavailable'b'The server cannot process the request due to a high load'u'The server cannot process the request due to a high load'b'Gateway Timeout'u'Gateway Timeout'b'The gateway server did not receive a timely response'u'The gateway server did not receive a timely response'b'HTTP Version Not Supported'u'HTTP Version Not Supported'b'Cannot fulfill request'u'Cannot fulfill request'b'Variant Also Negotiates'u'Variant Also Negotiates'b'Insufficient Storage'u'Insufficient Storage'b'Loop Detected'u'Loop Detected'b'Not Extended'u'Not Extended'b'Network Authentication Required'u'Network Authentication Required'b'The client needs to authenticate to gain network access'u'The client needs to authenticate to gain network access'u'http.__init__'u'http' +Enough Mach-O to make your head spin. + +See the relevant header files in /usr/include/mach-o + +And also Apple's documentation. +1.0__version__b' +Enough Mach-O to make your head spin. + +See the relevant header files in /usr/include/mach-o + +And also Apple's documentation. +'u' +Enough Mach-O to make your head spin. + +See the relevant header files in /usr/include/mach-o + +And also Apple's documentation. +'b'1.0'u'1.0'u'ctypes.macholib.__init__'u'ctypes.macholib'u'macholib.__init__'u'macholib'__default_contextgenexprSUBDEBUG25SUBWARNING__main____mp_main__# Package analogous to 'threading.py' but using processes# multiprocessing/__init__.py# This package is intended to duplicate the functionality (and much of# the API) of threading.py but uses processes instead of threads. A# subpackage 'multiprocessing.dummy' has the same API but is a simple# wrapper for 'threading'.# Copyright (c) 2006-2008, R Oudkerk# Copy stuff from default context# XXX These should not really be documented or public.# Alias for main module -- will be reset by bootstrapping child processesb'_'u'_'b'__main__'u'__main__'b'__mp_main__'u'__mp_main__'u'multiprocessing.__init__'u'multiprocessing'Wrapper functions for Tcl/Tk. + +Tkinter provides classes which allow the display, positioning and +control of widgets. Toplevel widgets are Tk and Toplevel. Other +widgets are Frame, Label, Entry, Text, Canvas, Button, Radiobutton, +Checkbutton, Scale, Listbox, Scrollbar, OptionMenu, Spinbox +LabelFrame and PanedWindow. + +Properties of the widgets are specified with keyword arguments. +Keyword arguments have the same name as the corresponding resource +under Tk. + +Widgets are positioned with one of the geometry managers Place, Pack +or Grid. These managers can be called with methods place, pack, grid +available in every Widget. + +Actions are bound to events by resources (e.g. keyword argument +command) or with the method bind. + +Example (Hello, World): +import tkinter +from tkinter.constants import * +tk = tkinter.Tk() +frame = tkinter.Frame(tk, relief=RIDGE, borderwidth=2) +frame.pack(fill=BOTH,expand=1) +label = tkinter.Label(frame, text="Hello, World") +label.pack(fill=X, expand=1) +button = tkinter.Button(frame,text="Exit",command=tk.destroy) +button.pack(side=BOTTOM) +tk.mainloop() +_tkinterTclErrortkinter.constantswantobjectsTK_VERSIONTkVersionTCL_VERSIONTclVersionREADABLEWRITABLEEXCEPTION([\\{}])_magic_re([\s])ASCII_space_re_joinInternal function._stringifysearch{%s}sub\\\1\n\_flattenseqresitem_cnfmergecnfscnfc_cnfmerge: fallback due to:_splitdicttkcut_minusconvReturn a properly formatted dict built from Tcl list pairs. + + If cut_minus is True, the supposed '-' prefix will be removed from + keys. If conv is specified, it is used to convert values. + + Tcl list is expected to contain an even number of elements. + splitlistTcl list representing a dict is expected to contain an even number of elements'Tcl list representing a dict is expected ''to contain an even number of elements'EventTypeEnumKeyPressKeyKeyReleaseButtonPressButtonButtonReleaseMotionEnterLeaveFocusInFocusOutKeymap12ExposeGraphicsExpose14NoExposeVisibilityCreate17Destroy18Unmap19Map20MapRequest21Reparent22Configure23ConfigureRequest24GravityResizeRequest26Circulate27CirculateRequest28Property29SelectionClearSelectionRequest31Selection32Colormap33ClientMessage34Mapping35VirtualEvent36Activate37Deactivate38MouseWheelEventContainer for the properties of an event. + + Instances of this type are generated if one of the following events occurs: + + KeyPress, KeyRelease - for keyboard events + ButtonPress, ButtonRelease, Motion, Enter, Leave, MouseWheel - for mouse events + Visibility, Unmap, Map, Expose, FocusIn, FocusOut, Circulate, + Colormap, Gravity, Reparent, Property, Destroy, Activate, + Deactivate - for window events. + + If a callback function for one of these events is registered + using bind, bind_all, bind_class, or tag_bind, the callback is + called with an Event as first argument. It will have the + following attributes (in braces are the event types for which + the attribute is valid): + + serial - serial number of event + num - mouse button pressed (ButtonPress, ButtonRelease) + focus - whether the window has the focus (Enter, Leave) + height - height of the exposed window (Configure, Expose) + width - width of the exposed window (Configure, Expose) + keycode - keycode of the pressed key (KeyPress, KeyRelease) + state - state of the event as a number (ButtonPress, ButtonRelease, + Enter, KeyPress, KeyRelease, + Leave, Motion) + state - state as a string (Visibility) + time - when the event occurred + x - x-position of the mouse + y - y-position of the mouse + x_root - x-position of the mouse on the screen + (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion) + y_root - y-position of the mouse on the screen + (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion) + char - pressed character (KeyPress, KeyRelease) + send_event - see X/Windows documentation + keysym - keysym of the event as a string (KeyPress, KeyRelease) + keysym_num - keysym of the event as a number (KeyPress, KeyRelease) + type - type of the event as a number + widget - widget in which the event occurred + delta - delta of wheel movement (MouseWheel) + ??charsend_eventstateShiftLockControlMod1Mod2Mod3Mod4Mod5Button1Button2Button3Button4Button5modss|deltakeysymkeycodenumfocusyheight<%s event%s> %s=%s_support_default_root_default_rootNoDefaultRootInhibit setting of default root window. + + Call this function to inhibit that the first instance of + Tk is used for windows without an explicit parent window. + _get_default_rootwhatNo master specified and tkinter is configured to not support default root"No master specified and tkinter is ""configured to not support default root"Too early to : no default root windowTk_tkerror_exitInternal function. Calling it will raise the exception SystemExit._varnumVariableClass to define value holders for e.g. buttons. + + Subclasses StringVar, IntVar, DoubleVar, BooleanVar are specializations + that constrain the type of the value returned from get()._tk_tclCommandsmasterConstruct a variable + + MASTER can be given as master widget. + VALUE is an optional value (defaults to "") + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + name must be a stringcreate variable_namePY_VARinitializegetbooleancallinfoexistsUnset the variable in Tcl.globalunsetvardeletecommandReturn the name of the variable in Tcl.Set the variable to VALUE.globalsetvarReturn value of variable.globalgetvar_registerCallWrapperfcbnamecreatecommandtrace_addmodeDefine a trace callback for the variable. + + Mode is one of "read", "write", "unset", or a list or tuple of + such strings. + Callback must be a function which is called when the variable is + read, written or unset. + + Return the name of the callback. + tracevariabletrace_removeDelete the trace callback for a variable. + + Mode is one of "read", "write", "unset" or a list or tuple of + such strings. Must be same as were specified in trace_add(). + cbname is the name of the callback returned from trace_add(). + mcatrace_infoReturn all trace callback information.trace_variableDefine a trace callback for the variable. + + MODE is one of "r", "w", "u" for read, write, undefine. + CALLBACK must be a function which is called when + the variable is read, written or undefined. + + Return the name of the callback. + + This deprecated method wraps a deprecated Tcl method that will + likely be removed in the future. Use trace_add() instead. + trace_vdeleteDelete the trace callback for a variable. + + MODE is one of "r", "w", "u" for read, write, undefine. + CBNAME is the name of the callback returned from trace_variable or trace. + + This deprecated method wraps a deprecated Tcl method that will + likely be removed in the future. Use trace_remove() instead. + vdeletetrace_vinfoReturn all trace callback information. + + This deprecated method wraps a deprecated Tcl method that will + likely be removed in the future. Use trace_info() instead. + vinfoStringVarValue holder for strings variables.Construct a string variable. + + MASTER can be given as master widget. + VALUE is an optional value (defaults to "") + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + Return value of variable as string.IntVarValue holder for integer variables.Construct an integer variable. + + MASTER can be given as master widget. + VALUE is an optional value (defaults to 0) + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + Return the value of the variable as an integer.getintgetdoubleDoubleVarValue holder for float variables.0.0Construct a float variable. + + MASTER can be given as master widget. + VALUE is an optional value (defaults to 0.0) + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + Return the value of the variable as a float.BooleanVarValue holder for boolean variables.Construct a boolean variable. + + MASTER can be given as master widget. + VALUE is an optional value (defaults to False) + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + Return the value of the variable as a bool.invalid literal for getboolean()mainloopRun the main loop of Tcl.run the main loopConvert Tcl object to True or False.use getboolean()MiscInternal class. + + Base class which defines methods common for interior widgets._last_child_idsdestroyInternal function. + + Delete all Tcl commands created for + this widget in the Tcl interpreter.Internal function. + + Delete the Tcl command provided in NAME.tk_strictMotifbooleanSet Tcl internal variable, whether the look and feel + should adhere to Motif. + + A parameter of 1 means adhere to Motif (e.g. no color + change if mouse passes over slider). + Returns the set value.tk_bisqueChange the color scheme to light brown as used in Tk 3.6 and before.tk_setPalettekwSet a new color scheme for all widget elements. + + A single color as argument will cause that all colors of Tk + widget elements are derived from this. + Alternatively several keyword parameters and its associated + colors can be given. The following keywords are valid: + activeBackground, foreground, selectColor, + activeForeground, highlightBackground, selectBackground, + background, highlightColor, selectForeground, + disabledForeground, insertBackground, troughColor.wait_variableWait until the variable is modified. + + A parameter of type IntVar, StringVar, DoubleVar or + BooleanVar must be given.tkwaitwaitvarwait_windowwindowWait until a WIDGET is destroyed. + + If no parameter is given self is used._wwait_visibilityWait until the visibility of a WIDGET changes + (e.g. it appears). + + If no parameter is given self is used.visibilitysetvarSet Tcl variable NAME to VALUE.getvarReturn value of Tcl variable NAME.Return a boolean value for Tcl boolean values true and false given as parameter.focus_setDirect input focus to this widget. + + If the application currently does not have the focus + this widget will get the focus if the application gets + the focus through the window manager.focus_forceDirect input focus to this widget even if the + application does not have the focus. Use with + caution!-forcefocus_getReturn the widget which has currently the focus in the + application. + + Use focus_displayof to allow working with several + displays. Return None if application does not have + the focus.none_nametowidgetfocus_displayofReturn the widget which has currently the focus on the + display where this widget is located. + + Return None if the application does not have the focus.-displayoffocus_lastforReturn the widget which would have the focus if top level + for this widget gets the focus from the window manager.-lastfortk_focusFollowsMouseThe widget under mouse will get automatically focus. Can not + be disabled easily.tk_focusNextReturn the next widget in the focus order which follows + widget which has currently the focus. + + The focus order first goes to the next child, then to + the children of the child recursively and then to the + next sibling which is higher in the stacking order. A + widget is omitted if it has the takefocus resource set + to 0.tk_focusPrevReturn previous widget in the focus order. See tk_focusNext for details.aftermsfuncCall function once after given time. + + MS specifies the time in milliseconds. FUNC gives the + function which shall be called. Additional parameters + are given as parameters to the function call. Return + identifier to cancel scheduling with after_cancel.callitafter_idleCall FUNC once if the Tcl main loop has no event to + process. + + Return an identifier to cancel the scheduling with + after_cancel.idleafter_cancelCancel scheduling of function identified with ID. + + Identifier returned by after or after_idle must be + given as first parameter. + id must be a valid identifier returned from after or after_idle'id must be a valid identifier returned from ''after or after_idle'cancelbelldisplayofRing a display's bell._displayofclipboard_getRetrieve data from the clipboard on window's display. + + The window keyword defaults to the root window of the Tkinter + application. + + The type keyword specifies the form in which the data is + to be returned and should be an atom name such as STRING + or FILE_NAME. Type defaults to STRING, except on X11, where the default + is to try UTF8_STRING and fall back to STRING. + + This command is equivalent to: + + selection_get(CLIPBOARD) + _windowingsystemx11UTF8_STRINGclipboard_optionsclipboard_clearClear the data in the Tk clipboard. + + A widget specified for the optional displayof keyword + argument specifies the target display.clipboard_appendstringAppend STRING to the Tk clipboard. + + A widget specified at the optional displayof keyword + argument specifies the target display. The clipboard + can be retrieved with selection_get.--grab_currentReturn widget which has currently the grab in this application + or None.grabcurrentgrab_releaseRelease grab for this widget if currently set.grab_setSet grab for this widget. + + A grab directs all events to this and descendant + widgets in the application.grab_set_globalSet global grab for this widget. + + A global grab directs all events to this and + descendant widgets on the display. Use with caution - + other applications do not get events anymore.-globalgrab_statusReturn None, "local" or "global" if this widget has + no, a local or a global grab.statusoption_addprioritySet a VALUE (second parameter) for an option + PATTERN (first parameter). + + An optional third parameter gives the numeric priority + (defaults to 80).optionoption_clearClear the option database. + + It will be reloaded if option_add is called.option_getclassNameReturn the value for an option NAME for this widget + with CLASSNAME. + + Values with higher priority override lower values.option_readfilefileNameRead file FILENAME into the option database. + + An optional second parameter gives the numeric + priority.readfileselection_clearClear the current X selection.selectionselection_getReturn the contents of the current X selection. + + A keyword parameter selection specifies the name of + the selection and defaults to PRIMARY. A keyword + parameter displayof specifies a widget on the display + to use. A keyword parameter type specifies the form of data to be + fetched, defaulting to STRING except on X11, where UTF8_STRING is tried + before STRING.selection_handlecommandSpecify a function COMMAND to call if the X + selection owned by this widget is queried by another + application. + + This function must return the contents of the + selection. The function will be called with the + arguments OFFSET and LENGTH which allows the chunking + of very long selections. The following keyword + parameters can be provided: + selection - name of the selection (default PRIMARY), + type - type of the selection (e.g. STRING, FILE_NAME).handleselection_ownBecome owner of X selection. + + A keyword parameter selection specifies the name of + the selection (default PRIMARY).ownselection_own_getReturn owner of X selection. + + The following keyword parameter can + be provided: + selection - name of the selection (default PRIMARY), + type - type of the selection (e.g. STRING, FILE_NAME).interpcmdSend Tcl command CMD to different interpreter INTERP to be executed.belowThisLower this widget in the stacking order.tkraiseaboveThisRaise this widget in the stacking order.raiseliftwinfo_atomReturn integer which represents atom NAME.winfoatomwinfo_atomnameReturn name of atom with identifier ID.atomnamewinfo_cellsReturn number of cells in the colormap for this widget.cellswinfo_childrenReturn a list of all widgets which are children of this widget.childchildrenwinfo_classReturn window class name of this widget.classwinfo_colormapfullReturn True if at the last color request the colormap was full.colormapfullwinfo_containingrootXrootYReturn the widget which is at the root coordinates ROOTX, ROOTY.containingwinfo_depthReturn the number of bits per pixel.depthwinfo_existsReturn true if this widget exists.winfo_fpixelsnumberReturn the number of pixels for the given distance NUMBER + (e.g. "3c") as float.fpixelswinfo_geometryReturn geometry string for this widget in the form "widthxheight+X+Y".geometrywinfo_heightReturn height of this widget.winfo_idReturn identifier ID for this widget.winfo_interpsReturn the name of all Tcl interpreters for this display.interpswinfo_ismappedReturn true if this widget is mapped.ismappedwinfo_managerReturn the window manager name for this widget.managerwinfo_nameReturn the name of this widget.winfo_parentReturn the name of the parent of this widget.winfo_pathnameReturn the pathname of the widget given by ID.pathnamewinfo_pixelsRounded integer value of winfo_fpixels.pixelswinfo_pointerxReturn the x coordinate of the pointer on the root window.pointerxwinfo_pointerxyReturn a tuple of x and y coordinates of the pointer on the root window._getintspointerxywinfo_pointeryReturn the y coordinate of the pointer on the root window.pointerywinfo_reqheightReturn requested height of this widget.reqheightwinfo_reqwidthReturn requested width of this widget.reqwidthwinfo_rgbcolorReturn a tuple of integer RGB values in range(65536) for color in this widget.rgbwinfo_rootxReturn x coordinate of upper left corner of this widget on the + root window.rootxwinfo_rootyReturn y coordinate of upper left corner of this widget on the + root window.rootywinfo_screenReturn the screen name of this widget.screenwinfo_screencellsReturn the number of the cells in the colormap of the screen + of this widget.screencellswinfo_screendepthReturn the number of bits per pixel of the root window of the + screen of this widget.screendepthwinfo_screenheightReturn the number of pixels of the height of the screen of this widget + in pixel.screenheightwinfo_screenmmheightReturn the number of pixels of the height of the screen of + this widget in mm.screenmmheightwinfo_screenmmwidthReturn the number of pixels of the width of the screen of + this widget in mm.screenmmwidthwinfo_screenvisualReturn one of the strings directcolor, grayscale, pseudocolor, + staticcolor, staticgray, or truecolor for the default + colormodel of this screen.screenvisualwinfo_screenwidthReturn the number of pixels of the width of the screen of + this widget in pixel.screenwidthwinfo_serverReturn information of the X-Server of the screen of this widget in + the form "XmajorRminor vendor vendorVersion".serverwinfo_toplevelReturn the toplevel widget of this widget.toplevelwinfo_viewableReturn true if the widget and all its higher ancestors are mapped.viewablewinfo_visualReturn one of the strings directcolor, grayscale, pseudocolor, + staticcolor, staticgray, or truecolor for the + colormodel of this widget.visualwinfo_visualidReturn the X identifier for the visual for this widget.visualidwinfo_visualsavailableincludeidsReturn a list of all visuals available for the screen + of this widget. + + Each item in the list consists of a visual name (see winfo_visual), a + depth and if includeids is true is given also the X identifier.visualsavailable__winfo_parseitem__winfo_getintwinfo_vrootheightReturn the height of the virtual root window associated with this + widget in pixels. If there is no virtual root window return the + height of the screen.vrootheightwinfo_vrootwidthReturn the width of the virtual root window associated with this + widget in pixel. If there is no virtual root window return the + width of the screen.vrootwidthwinfo_vrootxReturn the x offset of the virtual root relative to the root + window of the screen of this widget.vrootxwinfo_vrootyReturn the y offset of the virtual root relative to the root + window of the screen of this widget.vrootywinfo_widthReturn the width of this widget.winfo_xReturn the x coordinate of the upper left corner of this widget + in the parent.winfo_yReturn the y coordinate of the upper left corner of this widget + in the parent.Enter event loop until all pending events have been processed by Tcl.update_idletasksEnter event loop until all idle callbacks have been called. This + will update the display of windows but not process events caused by + the user.idletasksbindtagstagListSet or get the list of bindtags for this widget. + + With no argument return the list of all bindtags associated with + this widget. With a list of strings as argument the bindtags are + set to this list. The bindtags determine in which order events are + processed (see bind)._bindneedcleanup_substitutefuncid%sif {"[%s %s]" == "break"} break ++_subst_format_strbindBind to this widget at event SEQUENCE a call to function FUNC. + + SEQUENCE is a string of concatenated event + patterns. An event pattern is of the form + where MODIFIER is one + of Control, Mod2, M2, Shift, Mod3, M3, Lock, Mod4, M4, + Button1, B1, Mod5, M5 Button2, B2, Meta, M, Button3, + B3, Alt, Button4, B4, Double, Button5, B5 Triple, + Mod1, M1. TYPE is one of Activate, Enter, Map, + ButtonPress, Button, Expose, Motion, ButtonRelease + FocusIn, MouseWheel, Circulate, FocusOut, Property, + Colormap, Gravity Reparent, Configure, KeyPress, Key, + Unmap, Deactivate, KeyRelease Visibility, Destroy, + Leave and DETAIL is the button number for ButtonPress, + ButtonRelease and DETAIL is the Keysym for KeyPress and + KeyRelease. Examples are + for pressing Control and mouse button 1 or + for pressing A and the Alt key (KeyPress can be omitted). + An event pattern can also be a virtual event of the form + <> where AString can be arbitrary. This + event can be generated by event_generate. + If events are concatenated they must appear shortly + after each other. + + FUNC will be called if the event sequence occurs with an + instance of Event as argument. If the return value of FUNC is + "break" no further bound function is invoked. + + An additional boolean parameter ADD specifies whether FUNC will + be called additionally to the other bound function or whether + it will replace the previous function. + + Bind will return an identifier to allow deletion of the bound function with + unbind without memory leak. + + If FUNC or SEQUENCE is omitted the bound function or list + of bound events are returned.unbindUnbind for this widget for event SEQUENCE the + function identified with FUNCID.bind_allBind to all widgets at an event SEQUENCE a call to function FUNC. + An additional boolean parameter ADD specifies whether FUNC will + be called additionally to the other bound function or whether + it will replace the previous function. See bind for the return value.unbind_allUnbind for all widgets for event SEQUENCE all functions.bind_classBind to widgets with bindtag CLASSNAME at event + SEQUENCE a call of function FUNC. An additional + boolean parameter ADD specifies whether FUNC will be + called additionally to the other bound function or + whether it will replace the previous function. See bind for + the return value.unbind_classUnbind for all widgets with bindtag CLASSNAME for event SEQUENCE + all functions.Call the mainloop of Tk.quitQuit the Tcl interpreter. All widgets will be destroyed._getdoubles_getboolean_windowingsystem_cachedwindowingsystemwsnvnametowidgetReturn the Tkinter instance of a widget identified by + its Tcl name NAME.substReturn a newly created Tcl function. If this + function is called, the Python function FUNC will + be executed. An optional function SUBST can + be given which will be executed before FUNC.register%#%b%f%h%k%s%t%w%x%y%A%E%K%N%W%T%X%Y%D_subst_formatgetint_eventTk changed behavior in 8.4.2, returning "??" rather more often.nsignhAEKNWTXYDtimekeysym_numwidgetx_rooty_root_report_exceptionvaltbreport_callback_exception_getconfigureCall Tcl configure command and return the result as a dict._getconfigure1_configureconfigureConfigure resources of a widget. + + The values for resources are specified as keyword + arguments. To get an overview about + the allowed keyword arguments call the method keys. + configcgetReturn the resource value for a KEY given as string.Return a list of all resource names of this widget.Return the window path name of this widget.<%s.%s object %s>_noarg_pack_propagateflagSet or get the status for propagation of geometry information. + + A boolean argument specifies whether the geometry information + of the slaves will determine the size of this widget. If no argument + is given the current setting will be returned. + packpropagatepack_slavesReturn a list of all slaves of this widget + in its packing order.slavesplace_slavesplacegrid_anchoranchorThe anchor value controls how to place the grid within the + master when no row/column has any weight. + + The default anchor is nw.gridgrid_bboxcolumnrowcol2row2Return a tuple of integer coordinates for the bounding + box of this widget controlled by the geometry manager grid. + + If COLUMN, ROW is given the bounding box applies from + the cell with row and column 0 to the specified + cell. If COL2 and ROW2 are given the bounding box + starts at that cell. + + The returned integers specify the offset of the upper left + corner in the master widget and the width and height. + bbox_gridconvvalueTcl_Objsvalue_grid_configuregrid_columnconfigureConfigure column INDEX of a grid. + + Valid resources are minsize (minimum size of the column), + weight (how much does additional space propagate to this column) + and pad (how much space to let additionally).columnconfiguregrid_locationReturn a tuple of column and row which identify the cell + at which the pixel at position X and Y inside the master + widget is located.locationgrid_propagateSet or get the status for propagation of geometry information. + + A boolean argument specifies whether the geometry information + of the slaves will determine the size of this widget. If no argument + is given, the current setting will be returned. + grid_rowconfigureConfigure row INDEX of a grid. + + Valid resources are minsize (minimum size of the row), + weight (how much does additional space propagate to this row) + and pad (how much space to let additionally).rowconfiguregrid_sizeReturn a tuple of the number of column and rows in the grid.sizegrid_slaves-row-columnevent_addvirtualsequencesBind a virtual event VIRTUAL (of the form <>) + to an event SEQUENCE such that the virtual event is triggered + whenever SEQUENCE occurs.event_deleteUnbind a virtual event VIRTUAL from SEQUENCE.deleteevent_generateGenerate an event SEQUENCE. Additional + keyword arguments specify parameter of the event + (e.g. x, y, rootx, rooty).generate-%sevent_infoReturn a list of all virtual events or the information + about the SEQUENCE bound to the virtual event VIRTUAL.image_namesReturn a list of all existing image names.imagenamesimage_typesReturn a list of all available image types (e.g. photo bitmap).Internal class. Stores function to call when some user + defined Tcl function is called e.g. after an event occurred.Store FUNC, SUBST and WIDGET as members.Apply first function SUBST to arguments, than FUNC.XViewMix-in class for querying and changing the horizontal position + of a widget's window.xviewQuery and change the horizontal position of the view.xview_movetofractionAdjusts the view in the window so that FRACTION of the + total width of the canvas is off-screen to the left.movetoxview_scrollShift the x-view according to NUMBER which is measured in "units" + or "pages" (WHAT).scrollYViewMix-in class for querying and changing the vertical position + of a widget's window.yviewQuery and change the vertical position of the view.yview_movetoAdjusts the view in the window so that FRACTION of the + total height of the canvas is off-screen to the top.yview_scrollShift the y-view according to NUMBER which is measured in + "units" or "pages" (WHAT).WmProvides functions for the communication with the window manager.wm_aspectminNumerminDenommaxNumermaxDenomInstruct the window manager to set the aspect ratio (width/height) + of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple + of the actual values if no argument is given.wmaspectwm_attributesThis subcommand returns or sets platform specific attributes + + The first form returns a list of the platform specific flags and + their values. The second form returns the value for the specific + option. The third form sets one or more of the values. The values + are as follows: + + On Windows, -disabled gets or sets whether the window is in a + disabled state. -toolwindow gets or sets the style of the window + to toolwindow (as defined in the MSDN). -topmost gets or sets + whether this is a topmost window (displays above all other + windows). + + On Macintosh, XXXXX + + On Unix, there are currently no special attribute values. + attributeswm_clientStore NAME in WM_CLIENT_MACHINE property of this widget. Return + current value.clientwm_colormapwindowswlistStore list of window names (WLIST) into WM_COLORMAPWINDOWS property + of this widget. This list contains windows whose colormaps differ from their + parents. Return current list of widgets if WLIST is empty.colormapwindowswm_commandStore VALUE in WM_COMMAND property. It is the command + which shall be used to invoke the application. Return current + command if VALUE is None.wm_deiconifyDeiconify this widget. If it was never mapped it will not be mapped. + On Windows it will raise this widget and give it the focus.deiconifywm_focusmodelmodelSet focus model to MODEL. "active" means that this widget will claim + the focus itself, "passive" means that the window manager shall give + the focus. Return current focus model if MODEL is None.focusmodelwm_forgetThe window will be unmapped from the screen and will no longer + be managed by wm. toplevel windows will be treated like frame + windows once they are no longer managed by wm, however, the menu + option configuration will be remembered and the menus will return + once the widget is managed again.forgetwm_frameReturn identifier for decorative frame of this widget if present.wm_geometrynewGeometrySet geometry to NEWGEOMETRY of the form =widthxheight+x+y. Return + current value if None is given.wm_gridbaseWidthbaseHeightwidthIncheightIncInstruct the window manager that this widget shall only be + resized on grid boundaries. WIDTHINC and HEIGHTINC are the width and + height of a grid unit in pixels. BASEWIDTH and BASEHEIGHT are the + number of grid units requested in Tk_GeometryRequest.wm_grouppathNameSet the group leader widgets for related widgets to PATHNAME. Return + the group leader of this widget if None is given.groupwm_iconbitmapbitmapSet bitmap for the iconified widget to BITMAP. Return + the bitmap if None is given. + + Under Windows, the DEFAULT parameter can be used to set the icon + for the widget and any descendents that don't have an icon set + explicitly. DEFAULT can be the relative path to a .ico file + (example: root.iconbitmap(default='myicon.ico') ). See Tk + documentation for more information.iconbitmap-defaultwm_iconifyDisplay widget as icon.iconifywm_iconmaskSet mask for the icon bitmap of this widget. Return the + mask if None is given.iconmaskwm_iconnamenewNameSet the name of the icon for this widget. Return the name if + None is given.iconnamewm_iconphotoSets the titlebar icon for this window based on the named photo + images passed through args. If default is True, this is applied to + all future created toplevels as well. + + The data in the images is taken as a snapshot at the time of + invocation. If the images are later changed, this is not reflected + to the titlebar icons. Multiple images are accepted to allow + different images sizes to be provided. The window manager may scale + provided icons to an appropriate size. + + On Windows, the images are packed into a Windows icon structure. + This will override an icon specified to wm_iconbitmap, and vice + versa. + + On X, the images are arranged into the _NET_WM_ICON X property, + which most modern window managers support. An icon specified by + wm_iconbitmap may exist simultaneously. + + On Macintosh, this currently does nothing.iconphotowm_iconpositionSet the position of the icon of this widget to X and Y. Return + a tuple of the current values of X and X if None is given.iconpositionwm_iconwindowSet widget PATHNAME to be displayed instead of icon. Return the current + value if None is given.iconwindowwm_manageThe widget specified will become a stand alone top-level window. + The window will be decorated with the window managers title bar, + etc.managewm_maxsizeSet max WIDTH and HEIGHT for this widget. If the window is gridded + the values are given in grid units. Return the current values if None + is given.wm_minsizeSet min WIDTH and HEIGHT for this widget. If the window is gridded + the values are given in grid units. Return the current values if None + is given.minsizewm_overrideredirectInstruct the window manager to ignore this widget + if BOOLEAN is given with 1. Return the current value if None + is given.overrideredirectwm_positionfromwhoInstruct the window manager that the position of this widget shall + be defined by the user if WHO is "user", and by its own policy if WHO is + "program".positionfromwm_protocolBind function FUNC to command NAME for this widget. + Return the function bound to NAME if None is given. NAME could be + e.g. "WM_SAVE_YOURSELF" or "WM_DELETE_WINDOW".protocolwm_resizableInstruct the window manager whether this width can be resized + in WIDTH or HEIGHT. Both values are boolean values.resizablewm_sizefromInstruct the window manager that the size of this widget shall + be defined by the user if WHO is "user", and by its own policy if WHO is + "program".sizefromwm_statenewstateQuery or set the state of this widget as one of normal, icon, + iconic (see wm_iconwindow), withdrawn, or zoomed (Windows only).wm_titleSet the title of this widget.wm_transientInstruct the window manager that this widget is transient + with regard to widget MASTER.transientwm_withdrawWithdraw this widget from the screen such that it is unmapped + and forgotten by the window manager. Re-draw it with wm_deiconify.withdrawToplevel widget of Tk which represents mostly the main window + of an application. It has an associated Tcl interpreter.screenNamebaseNameuseTksyncuseReturn a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will + be created. BASENAME will be used for the identification of the profile file (see + readprofile). + It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME + is the name of the widget class._tkloadedbasenamesplitextext.py.pyccreate_loadtkreadprofileloadtktk_versiontk.h version (%s) doesn't match libtk.a version (%s)tcl_versiontcl.h version (%s) doesn't match libtcl.a version (%s)tkerrorWM_DELETE_WINDOWDestroy this and all descendants widgets. This will + end the application of this Tcl interpreter.Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into + the Tcl Interpreter and calls exec on the contents of BASENAME.py and + CLASSNAME.py if such a file exists in the home directory.HOMEenvironhomecurdir.%s.tclclass_tcl.%s.pyclass_pybase_tclbase_pyfrom tkinter import *isfileReport callback exception on sys.stderr. + + Applications may want to override this internal function, and + should when sys.stderr is None.tracebackException in Tkinter callbacklast_typelast_valuelast_tracebackprint_exception__getattr__attrDelegate attribute access to the interpreter objectTclPackGeometry manager Pack. + + Base class to use the methods pack_* in every widget.pack_configurePack a widget in the parent widget. Use as options: + after=widget - pack it after you have packed widget + anchor=NSEW (or subset) - position widget according to + given direction + before=widget - pack it before you will pack widget + expand=bool - expand widget if parent size grows + fill=NONE or X or Y or BOTH - fill widget if widget grows + in=master - use master to contain this widget + in_=master - see 'in' option description + ipadx=amount - add internal padding in x direction + ipady=amount - add internal padding in y direction + padx=amount - add padding in x direction + pady=amount - add padding in y direction + side=TOP or BOTTOM or LEFT or RIGHT - where to add this widget. + pack_forgetUnmap this widget and do not use it for the packing order.pack_infoReturn information about the packing options + for this widget.dinPlaceGeometry manager Place. + + Base class to use the methods place_* in every widget.place_configurePlace a widget in the parent widget. Use as options: + in=master - master relative to which the widget is placed + in_=master - see 'in' option description + x=amount - locate anchor of this widget at position x of master + y=amount - locate anchor of this widget at position y of master + relx=amount - locate anchor of this widget between 0.0 and 1.0 + relative to width of master (1.0 is right edge) + rely=amount - locate anchor of this widget between 0.0 and 1.0 + relative to height of master (1.0 is bottom edge) + anchor=NSEW (or subset) - position anchor according to given direction + width=amount - width of this widget in pixel + height=amount - height of this widget in pixel + relwidth=amount - width of this widget between 0.0 and 1.0 + relative to width of master (1.0 is the same width + as the master) + relheight=amount - height of this widget between 0.0 and 1.0 + relative to height of master (1.0 is the same + height as the master) + bordermode="inside" or "outside" - whether to take border width of + master widget into account + place_forgetUnmap this widget.place_infoReturn information about the placing options + for this widget.GridGeometry manager Grid. + + Base class to use the methods grid_* in every widget.grid_configurePosition a widget in the parent widget in a grid. Use as options: + column=number - use cell identified with given column (starting with 0) + columnspan=number - this widget will span several columns + in=master - use master to contain this widget + in_=master - see 'in' option description + ipadx=amount - add internal padding in x direction + ipady=amount - add internal padding in y direction + padx=amount - add padding in x direction + pady=amount - add padding in y direction + row=number - use cell identified with given row (starting with 0) + rowspan=number - this widget will span several rows + sticky=NSEW - if cell is larger on which sides will this + widget stick to the cell boundary + grid_forgetgrid_removeUnmap this widget but remember the grid options.grid_infoReturn information about the options + for positioning this widget in a grid.BaseWidgetInternal class.Internal function. Sets up information about children.!%s!%s%dwidgetNameConstruct a widget with the parent widget MASTER, a name WIDGETNAME + and appropriate options.classesDestroy this and all descendants widgets._doWidgetInternal class. + + Base class for a widget which can be positioned with the geometry managers + Pack, Place or Grid.ToplevelToplevel widget, e.g. for dialogs.Construct a toplevel widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, class, + colormap, container, cursor, height, highlightbackground, + highlightcolor, highlightthickness, menu, relief, screen, takefocus, + use, visual, width.wmkeyclass_colormapoptButton widget.Construct a button widget with the parent MASTER. + + STANDARD OPTIONS + + activebackground, activeforeground, anchor, + background, bitmap, borderwidth, cursor, + disabledforeground, font, foreground + highlightbackground, highlightcolor, + highlightthickness, image, justify, + padx, pady, relief, repeatdelay, + repeatinterval, takefocus, text, + textvariable, underline, wraplength + + WIDGET-SPECIFIC OPTIONS + + command, compound, default, height, + overrelief, state, width + buttonflashFlash the button. + + This is accomplished by redisplaying + the button several times, alternating between active and + normal colors. At the end of the flash the button is left + in the same normal/active state as when the command was + invoked. This command is ignored if the button's state is + disabled. + invokeInvoke the command associated with the button. + + The return value is the return value from the command, + or an empty string if there is no command associated with + the button. This command is ignored if the button's state + is disabled. + CanvasCanvas widget to display graphical elements like lines or text.Construct a canvas widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, closeenough, + confine, cursor, height, highlightbackground, highlightcolor, + highlightthickness, insertbackground, insertborderwidth, + insertofftime, insertontime, insertwidth, offset, relief, + scrollregion, selectbackground, selectborderwidth, selectforeground, + state, takefocus, width, xscrollcommand, xscrollincrement, + yscrollcommand, yscrollincrement.canvasaddtagaddtag_abovenewtagtagOrIdAdd tag NEWTAG to all items above TAGORID.aboveaddtag_allAdd tag NEWTAG to all items.addtag_belowAdd tag NEWTAG to all items below TAGORID.belowaddtag_closesthaloAdd tag NEWTAG to item which is closest to pixel at X, Y. + If several match take the top-most. + All items closer than HALO are considered overlapping (all are + closests). If START is specified the next below this tag is taken.closestaddtag_enclosedx1y1x2y2Add tag NEWTAG to all items in the rectangle defined + by X1,Y1,X2,Y2.enclosedaddtag_overlappingAdd tag NEWTAG to all items which overlap the rectangle + defined by X1,Y1,X2,Y2.overlappingaddtag_withtagAdd tag NEWTAG to all items with TAGORID.withtagReturn a tuple of X1,Y1,X2,Y2 coordinates for a rectangle + which encloses all items with tags specified as arguments.tag_unbindUnbind for all items with TAGORID for event SEQUENCE the + function identified with FUNCID.tag_bindBind to all items with TAGORID at event SEQUENCE a call to function FUNC. + + An additional boolean parameter ADD specifies whether FUNC will be + called additionally to the other bound function or whether it will + replace the previous function. See bind for the return value.canvasxscreenxgridspacingReturn the canvas x coordinate of pixel position SCREENX rounded + to nearest multiple of GRIDSPACING units.canvasyscreenyReturn the canvas y coordinate of pixel position SCREENY rounded + to nearest multiple of GRIDSPACING units.coordsReturn a list of coordinates for the item given in ARGS._createitemTypecreate_arcCreate arc shaped region with coordinates x1,y1,x2,y2.arccreate_bitmapCreate bitmap with coordinates x1,y1.create_imageCreate image item with coordinates x1,y1.create_lineCreate line with coordinates x1,y1,...,xn,yn.linecreate_ovalCreate oval with coordinates x1,y1,x2,y2.ovalcreate_polygonCreate polygon with coordinates x1,y1,...,xn,yn.polygoncreate_rectangleCreate rectangle with coordinates x1,y1,x2,y2.rectanglecreate_textCreate text with coordinates x1,y1.create_windowCreate window with coordinates x1,y1,x2,y2.dcharsDelete characters of text items identified by tag or id in ARGS (possibly + several times) from FIRST to LAST character (including).Delete items identified by all tag or ids contained in ARGS.dtagDelete tag or id given as last arguments in ARGS from items + identified by first argument in ARGS.find_aboveReturn items above TAGORID.find_allReturn all items.find_belowReturn all items below TAGORID.find_closestReturn item which is closest to pixel at X, Y. + If several match take the top-most. + All items closer than HALO are considered overlapping (all are + closest). If START is specified the next below this tag is taken.find_enclosedReturn all items in rectangle defined + by X1,Y1,X2,Y2.find_overlappingReturn all items which overlap the rectangle + defined by X1,Y1,X2,Y2.find_withtagReturn all items with TAGORID.Set focus to the first item specified in ARGS.gettagsReturn tags associated with the first item specified in ARGS.icursorSet cursor at position POS in the item identified by TAGORID. + In ARGS TAGORID must be first.Return position of cursor as integer in item specified in ARGS.Insert TEXT in item TAGORID at position POS. ARGS must + be TAGORID POS TEXT.itemcgetReturn the resource value for an OPTION for item TAGORID.itemconfigureConfigure resources of an item TAGORID. + + The values for resources are specified as keyword + arguments. To get an overview about + the allowed keyword arguments call the method without arguments. + itemconfigtag_lowerLower an item TAGORID given in ARGS + (optional below another item).moveMove an item TAGORID given in ARGS.Move the items given by TAGORID in the canvas coordinate + space so that the first coordinate pair of the bottommost + item with tag TAGORID is located at position (X,Y). + X and Y may be the empty string, in which case the + corresponding coordinate will be unchanged. All items matching + TAGORID remain in the same positions relative to each other.postscriptPrint the contents of the canvas to a postscript + file. Valid options: colormap, colormode, file, fontmap, + height, pageanchor, pageheight, pagewidth, pagex, pagey, + rotate, width, x, y.tag_raiseRaise an item TAGORID given in ARGS + (optional above another item).scaleScale item TAGORID with XORIGIN, YORIGIN, XSCALE, YSCALE.scan_markRemember the current X, Y coordinates.scanmarkscan_dragtogainAdjust the view of the canvas to GAIN times the + difference between X and Y and the coordinates given in + scan_mark.dragtoselect_adjustAdjust the end of the selection near the cursor of an item TAGORID to index.adjustselect_clearClear the selection if it is in this widget.select_fromSet the fixed end of a selection in item TAGORID to INDEX.fromselect_itemReturn the item which has the selection.select_toSet the variable end of a selection in item TAGORID to INDEX.toReturn the type of the item TAGORID.CheckbuttonCheckbutton widget which is either in on- or off-state.Construct a checkbutton widget with the parent MASTER. + + Valid resource names: activebackground, activeforeground, anchor, + background, bd, bg, bitmap, borderwidth, command, cursor, + disabledforeground, fg, font, foreground, height, + highlightbackground, highlightcolor, highlightthickness, image, + indicatoron, justify, offvalue, onvalue, padx, pady, relief, + selectcolor, selectimage, state, takefocus, text, textvariable, + underline, variable, width, wraplength.checkbuttondeselectPut the button in off-state.Flash the button.Toggle the button and invoke a command if given as resource.Put the button in on-state.toggleToggle the button.EntryEntry widget which allows displaying simple text.Construct an entry widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, cursor, + exportselection, fg, font, foreground, highlightbackground, + highlightcolor, highlightthickness, insertbackground, + insertborderwidth, insertofftime, insertontime, insertwidth, + invalidcommand, invcmd, justify, relief, selectbackground, + selectborderwidth, selectforeground, show, state, takefocus, + textvariable, validate, validatecommand, vcmd, width, + xscrollcommand.entryfirstDelete text from FIRST to LAST (not included).Return the text.Insert cursor at INDEX.Return position of cursor.Insert STRING at INDEX.Adjust the view of the canvas to 10 times the + difference between X and Y and the coordinates given in + scan_mark.selection_adjustAdjust the end of the selection near the cursor to INDEX.selection_fromSet the fixed end of a selection to INDEX.selection_presentReturn True if there are characters selected in the entry, False + otherwise.presentselect_presentselection_rangeSet the selection from START to END (not included).select_rangeselection_toSet the variable end of a selection to INDEX.FrameFrame widget which may contain other widgets and can have a 3D border.Construct a frame widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, class, + colormap, container, cursor, height, highlightbackground, + highlightcolor, highlightthickness, relief, takefocus, visual, width.-classLabelLabel widget which can display text and bitmaps.Construct a label widget with the parent MASTER. + + STANDARD OPTIONS + + activebackground, activeforeground, anchor, + background, bitmap, borderwidth, cursor, + disabledforeground, font, foreground, + highlightbackground, highlightcolor, + highlightthickness, image, justify, + padx, pady, relief, takefocus, text, + textvariable, underline, wraplength + + WIDGET-SPECIFIC OPTIONS + + height, state, width + + labelListboxListbox widget which can display a list of strings.Construct a listbox widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, cursor, + exportselection, fg, font, foreground, height, highlightbackground, + highlightcolor, highlightthickness, relief, selectbackground, + selectborderwidth, selectforeground, selectmode, setgrid, takefocus, + width, xscrollcommand, yscrollcommand, listvariable.listboxactivateActivate item identified by INDEX.Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle + which encloses the item identified by the given index.curselectionReturn the indices of currently selected item.Delete items from FIRST to LAST (included).Get list of items from FIRST to LAST (included).Return index of item identified with INDEX.Insert ELEMENTS at INDEX.nearestGet index of item which is nearest to y coordinate Y.Adjust the view of the listbox to 10 times the + difference between X and Y and the coordinates given in + scan_mark.seeScroll such that INDEX is visible.selection_anchorSet the fixed end oft the selection to INDEX.select_anchorClear the selection from FIRST to LAST (included).selection_includesReturn True if INDEX is part of the selection.includesselect_includesselection_setSet the selection from FIRST to LAST (included) without + changing the currently selected elements.select_setReturn the number of elements in the listbox.Return the resource value for an ITEM and an OPTION.Configure resources of an ITEM. + + The values for resources are specified as keyword arguments. + To get an overview about the allowed keyword arguments + call the method without arguments. + Valid resource names: background, bg, foreground, fg, + selectbackground, selectforeground.MenuMenu widget which allows displaying menu bars, pull-down menus and pop-up menus.Construct menu widget with the parent MASTER. + + Valid resource names: activebackground, activeborderwidth, + activeforeground, background, bd, bg, borderwidth, cursor, + disabledforeground, fg, font, foreground, postcommand, relief, + selectcolor, takefocus, tearoff, tearoffcommand, title, type.menutk_popupPost the menu at position X,Y with entry ENTRY.Activate entry at INDEX.add_cascadeAdd hierarchical menu item.cascadeadd_checkbuttonAdd checkbutton menu item.add_commandAdd command menu item.add_radiobuttonAddd radio menu item.radiobuttonadd_separatorAdd separator.separatorinsert_cascadeAdd hierarchical menu item at INDEX.insert_checkbuttonAdd checkbutton menu item at INDEX.insert_commandAdd command menu item at INDEX.insert_radiobuttonAddd radio menu item at INDEX.insert_separatorAdd separator at INDEX.index1index2Delete menu items between INDEX1 and INDEX2 (included).num_index1num_index2entryconfigentrycgetReturn the resource value of a menu item for OPTION at INDEX.entryconfigureConfigure a menu item at INDEX.Return the index of a menu item identified by INDEX.Invoke a menu item identified by INDEX and execute + the associated command.postDisplay a menu at position X,Y.Return the type of the menu item at INDEX.unpostUnmap a menu.xpositionReturn the x-position of the leftmost pixel of the menu item + at INDEX.ypositionReturn the y-position of the topmost pixel of the menu item at INDEX.MenubuttonMenubutton widget, obsolete since Tk8.0.menubuttonMessageMessage widget to display multiline text. Obsolete since Label does it too.messageRadiobuttonRadiobutton widget which shows only one of several buttons in on-state.Construct a radiobutton widget with the parent MASTER. + + Valid resource names: activebackground, activeforeground, anchor, + background, bd, bg, bitmap, borderwidth, command, cursor, + disabledforeground, fg, font, foreground, height, + highlightbackground, highlightcolor, highlightthickness, image, + indicatoron, justify, padx, pady, relief, selectcolor, selectimage, + state, takefocus, text, textvariable, underline, value, variable, + width, wraplength.ScaleScale widget which can display a numerical scale.Construct a scale widget with the parent MASTER. + + Valid resource names: activebackground, background, bigincrement, bd, + bg, borderwidth, command, cursor, digits, fg, font, foreground, from, + highlightbackground, highlightcolor, highlightthickness, label, + length, orient, relief, repeatdelay, repeatinterval, resolution, + showvalue, sliderlength, sliderrelief, state, takefocus, + tickinterval, to, troughcolor, variable, width.Get the current value as integer or float.Set the value to VALUE.Return a tuple (X,Y) of the point along the centerline of the + trough that corresponds to VALUE or the current value if None is + given.identifyReturn where the point X,Y lies. Valid return values are "slider", + "though1" and "though2".ScrollbarScrollbar widget which displays a slider at a certain position.Construct a scrollbar widget with the parent MASTER. + + Valid resource names: activebackground, activerelief, + background, bd, bg, borderwidth, command, cursor, + elementborderwidth, highlightbackground, + highlightcolor, highlightthickness, jump, orient, + relief, repeatdelay, repeatinterval, takefocus, + troughcolor, width.scrollbarMarks the element indicated by index as active. + The only index values understood by this method are "arrow1", + "slider", or "arrow2". If any other value is specified then no + element of the scrollbar will be active. If index is not specified, + the method returns the name of the element that is currently active, + or None if no element is active.deltaxdeltayReturn the fractional change of the scrollbar setting if it + would be moved by DELTAX or DELTAY pixels.Return the fractional value which corresponds to a slider + position of X,Y.Return the element under position X,Y as one of + "arrow1","slider","arrow2" or "".Return the current fractional values (upper and lower end) + of the slider position.Set the fractional values of the slider position (upper and + lower ends as value between 0 and 1).TextText widget which can display text in various forms.Construct a text widget with the parent MASTER. + + STANDARD OPTIONS + + background, borderwidth, cursor, + exportselection, font, foreground, + highlightbackground, highlightcolor, + highlightthickness, insertbackground, + insertborderwidth, insertofftime, + insertontime, insertwidth, padx, pady, + relief, selectbackground, + selectborderwidth, selectforeground, + setgrid, takefocus, + xscrollcommand, yscrollcommand, + + WIDGET-SPECIFIC OPTIONS + + autoseparators, height, maxundo, + spacing1, spacing2, spacing3, + state, tabs, undo, width, wrap, + + Return a tuple of (x,y,width,height) which gives the bounding + box of the visible part of the character at the given index.compareopReturn whether between index INDEX1 and index INDEX2 the + relation OP is satisfied. OP is one of <, <=, ==, >=, >, or !=.Counts the number of relevant things between the two indices. + If index1 is after index2, the result will be a negative number + (and this holds for each of the possible options). + + The actual items which are counted depends on the options given by + args. The result is a list of integers, one for the result of each + counting option given. Valid counting options are "chars", + "displaychars", "displayindices", "displaylines", "indices", + "lines", "xpixels" and "ypixels". There is an additional possible + option "update", which if given then all subsequent options ensure + that any possible out of date information is recalculated.argTurn on the internal consistency checks of the B-Tree inside the text + widget according to BOOLEAN.Delete the characters between INDEX1 and INDEX2 (not included).dlineinfoReturn tuple (x,y,width,height,baseline) giving the bounding box + and baseline position of the visible part of the line containing + the character at INDEX.Return the contents of the widget between index1 and index2. + + The type of contents returned in filtered based on the keyword + parameters; if 'all', 'image', 'mark', 'tag', 'text', or 'window' are + given and true, then the corresponding items are returned. The result + is a list of triples of the form (key, value, index). If none of the + keywords are true then 'all' is used by default. + + If the 'command' argument is given, it is called once for each element + of the list of triples, with the values of each triple serving as the + arguments to the function. In this case the list is not returned.func_nameappend_triple-commandeditInternal method + + This method controls the undo mechanism and + the modified flag. The exact behavior of the + command depends on the option argument that + follows the edit argument. The following forms + of the command are currently supported: + + edit_modified, edit_redo, edit_reset, edit_separator + and edit_undo + + edit_modifiedGet or Set the modified flag + + If arg is not specified, returns the modified + flag of the widget. The insert, delete, edit undo and + edit redo commands or the user can set or clear the + modified flag. If boolean is specified, sets the + modified flag of the widget to arg. + modifiededit_redoRedo the last undone edit + + When the undo option is true, reapplies the last + undone edits provided no other edits were done since + then. Generates an error when the redo stack is empty. + Does nothing when the undo option is false. + redoedit_resetClears the undo and redo stacks + resetedit_separatorInserts a separator (boundary) on the undo stack. + + Does nothing when the undo option is false + edit_undoUndoes the last edit action + + If the undo option is true. An edit action is defined + as all the insert and delete commands that are recorded + on the undo stack in between two separators. Generates + an error when the undo stack is empty. Does nothing + when the undo option is false + undoReturn the text from INDEX1 to INDEX2 (not included).image_cgetReturn the value of OPTION of an embedded image at INDEX.image_configureConfigure an embedded image at INDEX.image_createCreate an embedded image at INDEX.Return all names of embedded images in this widget.Return the index in the form line.char for INDEX.charsInsert CHARS before the characters at INDEX. An additional + tag can be given in ARGS. Additional CHARS and tags can follow in ARGS.mark_gravitymarkNamedirectionChange the gravity of a mark MARKNAME to DIRECTION (LEFT or RIGHT). + Return the current value if None is given for DIRECTION.gravitymark_namesReturn all mark names.mark_setSet mark MARKNAME before the character at INDEX.mark_unsetmarkNamesDelete all marks in MARKNAMES.unsetmark_nextReturn the name of the next mark after INDEX.mark_previousReturn the name of the previous mark before INDEX.previouspeer_createnewPathNameCreates a peer text widget with the given newPathName, and any + optional standard configuration options. By default the peer will + have the same start and end line as the parent widget, but + these can be overridden with the standard configuration options.peerpeer_namesReturns a list of peers of this widget (this does not include + the widget itself).Replaces the range of characters between index1 and index2 with + the given characters and tags specified by args. + + See the method insert for some more information about args, and the + method delete for information about the indices.Adjust the view of the text to 10 times the + difference between X and Y and the coordinates given in + scan_mark.stopindexforwardsbackwardsexactregexpnocaseelideSearch PATTERN beginning from INDEX until STOPINDEX. + Return the index of the first character of a match or an + empty string.-forwards-backwards-exact-regexp-nocase-elide-countScroll such that the character at INDEX is visible.tag_addtagNameAdd tag TAGNAME to all characters between INDEX1 and index2 in ARGS. + Additional pairs of indices may follow in ARGS.Unbind for all characters with TAGNAME for event SEQUENCE the + function identified with FUNCID.Bind to all characters with TAGNAME at event SEQUENCE a call to function FUNC. + + An additional boolean parameter ADD specifies whether FUNC will be + called additionally to the other bound function or whether it will + replace the previous function. See bind for the return value.tag_cgetReturn the value of OPTION for tag TAGNAME.tag_configureConfigure a tag TAGNAME.tag_configtag_deletetagNamesDelete all tags in TAGNAMES.Change the priority of tag TAGNAME such that it is lower + than the priority of BELOWTHIS.tag_namesReturn a list of all tag names.tag_nextrangeReturn a list of start and end index for the first sequence of + characters between INDEX1 and INDEX2 which all have tag TAGNAME. + The text is searched forward from INDEX1.nextrangetag_prevrangeReturn a list of start and end index for the first sequence of + characters between INDEX1 and INDEX2 which all have tag TAGNAME. + The text is searched backwards from INDEX1.prevrangeChange the priority of tag TAGNAME such that it is higher + than the priority of ABOVETHIS.tag_rangesReturn a list of ranges of text which have tag TAGNAME.rangestag_removeRemove tag TAGNAME from all characters between INDEX1 and INDEX2.window_cgetReturn the value of OPTION of an embedded window at INDEX.window_configureConfigure an embedded window at INDEX.window_configwindow_createCreate a window at INDEX.window_namesReturn all names of embedded windows in this widget.yview_pickplaceObsolete function, use see.-pickplace_setitInternal class. It wraps the command in the widget OptionMenu.var__value__var__callbackOptionMenuOptionMenu which allows the user to select a value from a menu.Construct an optionmenu widget with the parent MASTER, with + the resource textvariable set to VARIABLE, the initially selected + value VALUE, the other menu values VALUES and an additional + keyword argument command.borderwidthtextvariableindicatoronRAISEDreliefhighlightthicknesstk_optionMenutearoff__menumenunameunknown option -Destroy this widget and the associated menu.ImageBase class for images._last_idimgtypecreate imagepyimage%rConfigure the image.Return the height of the image.Return the type of the image, e.g. "photo" or "bitmap".Return the width of the image.PhotoImageWidget which can display images in PGM, PPM, GIF, PNG format.Create an image with NAME. + + Valid resource names: data, format, file, gamma, height, palette, + width.photoblankDisplay a transparent image.Return the value of OPTION.Return a new PhotoImage with the same image as this widget.destImagezoomReturn a new PhotoImage with the same image as this widget + but zoom it with a factor of x in the X direction and y in the Y + direction. If y is not given, the default value is the same as x. + -zoomsubsampleReturn a new PhotoImage based on the same image as this widget + but use only every Xth or Yth pixel. If y is not given, the + default value is the same as x. + -subsampleReturn the color (red, green, blue) of the pixel at X,Y.putPut row formatted colors to image starting from + position TO, e.g. image.put("{red green} {blue yellow}", to=(4,6))-tofrom_coordsWrite image to file FILENAME in FORMAT starting from + position FROM_COORDS.-format-fromtransparency_getReturn True if the pixel at x,y is transparent.transparencytransparency_setSet the transparency of the pixel at x,y.BitmapImageWidget which can display images in XBM format.Create a bitmap with NAME. + + Valid resource names: background, data, file, foreground, maskdata, maskfile.use image_names()use image_types()Spinboxspinbox widget.Construct a spinbox widget with the parent MASTER. + + STANDARD OPTIONS + + activebackground, background, borderwidth, + cursor, exportselection, font, foreground, + highlightbackground, highlightcolor, + highlightthickness, insertbackground, + insertborderwidth, insertofftime, + insertontime, insertwidth, justify, relief, + repeatdelay, repeatinterval, + selectbackground, selectborderwidth + selectforeground, takefocus, textvariable + xscrollcommand. + + WIDGET-SPECIFIC OPTIONS + + buttonbackground, buttoncursor, + buttondownrelief, buttonuprelief, + command, disabledbackground, + disabledforeground, format, from, + invalidcommand, increment, + readonlybackground, state, to, + validate, validatecommand values, + width, wrap, + spinboxReturn a tuple of X1,Y1,X2,Y2 coordinates for a + rectangle which encloses the character given by index. + + The first two elements of the list give the x and y + coordinates of the upper-left corner of the screen + area covered by the character (in pixels relative + to the widget) and the last two elements give the + width and height of the character, in pixels. The + bounding box may refer to a region outside the + visible area of the window. + Delete one or more elements of the spinbox. + + First is the index of the first character to delete, + and last is the index of the character just after + the last one to delete. If last isn't specified it + defaults to first+1, i.e. a single character is + deleted. This command returns an empty string. + Returns the spinbox's stringAlter the position of the insertion cursor. + + The insertion cursor will be displayed just before + the character given by index. Returns an empty string + Returns the name of the widget at position x, y + + Return value is one of: none, buttondown, buttonup, entry + Returns the numerical index corresponding to index + Insert string s at index + + Returns an empty string. + Causes the specified element to be invoked + + The element could be buttondown or buttonup + triggering the action associated with it. + Records x and the current view in the spinbox window; + + used in conjunction with later scan dragto commands. + Typically this command is associated with a mouse button + press in the widget. It returns an empty string. + Compute the difference between the given x argument + and the x argument to the last scan mark command + + It then adjusts the view left or right by 10 times the + difference in x-coordinates. This command is typically + associated with mouse motion events in the widget, to + produce the effect of dragging the spinbox at high speed + through the window. The return value is an empty string. + Locate the end of the selection nearest to the character + given by index, + + Then adjust that end of the selection to be at index + (i.e including but not going beyond index). The other + end of the selection is made the anchor point for future + select to commands. If the selection isn't currently in + the spinbox, then a new selection is created to include + the characters between index and the most recent selection + anchor point, inclusive. + Clear the selection + + If the selection isn't in this widget then the + command has no effect. + selection_elementSets or gets the currently selected element. + + If a spinbutton element is specified, it will be + displayed depressed. + Return True if there are characters selected in the spinbox, False + otherwise.LabelFramelabelframe widget.Construct a labelframe widget with the parent MASTER. + + STANDARD OPTIONS + + borderwidth, cursor, font, foreground, + highlightbackground, highlightcolor, + highlightthickness, padx, pady, relief, + takefocus, text + + WIDGET-SPECIFIC OPTIONS + + background, class, colormap, container, + height, labelanchor, labelwidget, + visual, width + labelframePanedWindowpanedwindow widget.Construct a panedwindow widget with the parent MASTER. + + STANDARD OPTIONS + + background, borderwidth, cursor, height, + orient, relief, width + + WIDGET-SPECIFIC OPTIONS + + handlepad, handlesize, opaqueresize, + sashcursor, sashpad, sashrelief, + sashwidth, showhandle, + panedwindowAdd a child widget to the panedwindow in a new pane. + + The child argument is the name of the child widget + followed by pairs of arguments that specify how to + manage the windows. The possible options and values + are the ones accepted by the paneconfigure method. + Remove the pane containing child from the panedwindow + + All geometry management options for child will be forgotten. + Identify the panedwindow component at point x, y + + If the point is over a sash or a sash handle, the result + is a two element list containing the index of the sash or + handle, and a word indicating whether it is over a sash + or a handle, such as {0 sash} or {2 handle}. If the point + is over any other part of the panedwindow, the result is + an empty list. + proxyproxy_coordReturn the x and y pair of the most recent proxy location + coordproxy_forgetRemove the proxy from the display. + proxy_placePlace the proxy at the given x and y coordinates. + sashsash_coordReturn the current x and y pair for the sash given by index. + + Index must be an integer between 0 and 1 less than the + number of panes in the panedwindow. The coordinates given are + those of the top left corner of the region containing the sash. + pathName sash dragto index x y This command computes the + difference between the given coordinates and the coordinates + given to the last sash coord command for the given sash. It then + moves that sash the computed difference. The return value is the + empty string. + sash_markRecords x and y for the sash given by index; + + Used in conjunction with later dragto commands to move the sash. + sash_placePlace the sash given by index at the given coordinates + panecgetQuery a management option for window. + + Option may be any value allowed by the paneconfigure subcommand + paneconfigureQuery or modify the management options for window. + + If no option is specified, returns a list describing all + of the available options for pathName. If option is + specified with no value, then the command returns a list + describing the one named option (this list will be identical + to the corresponding sublist of the value returned if no + option is specified). If one or more option-value pairs are + specified, then the command modifies the given widget + option(s) to have the given value(s); in this case the + command returns an empty string. The following options + are supported: + + after window + Insert the window after the window specified. window + should be the name of a window already managed by pathName. + before window + Insert the window before the window specified. window + should be the name of a window already managed by pathName. + height size + Specify a height for the window. The height will be the + outer dimension of the window including its border, if + any. If size is an empty string, or if -height is not + specified, then the height requested internally by the + window will be used initially; the height may later be + adjusted by the movement of sashes in the panedwindow. + Size may be any value accepted by Tk_GetPixels. + minsize n + Specifies that the size of the window cannot be made + less than n. This constraint only affects the size of + the widget in the paned dimension -- the x dimension + for horizontal panedwindows, the y dimension for + vertical panedwindows. May be any value accepted by + Tk_GetPixels. + padx n + Specifies a non-negative value indicating how much + extra space to leave on each side of the window in + the X-direction. The value may have any of the forms + accepted by Tk_GetPixels. + pady n + Specifies a non-negative value indicating how much + extra space to leave on each side of the window in + the Y-direction. The value may have any of the forms + accepted by Tk_GetPixels. + sticky style + If a window's pane is larger than the requested + dimensions of the window, this option may be used + to position (or stretch) the window within its pane. + Style is a string that contains zero or more of the + characters n, s, e or w. The string can optionally + contains spaces or commas, but they are ignored. Each + letter refers to a side (north, south, east, or west) + that the window will "stick" to. If both n and s + (or e and w) are specified, the window will be + stretched to fill the entire height (or width) of + its cavity. + width size + Specify a width for the window. The width will be + the outer dimension of the window including its + border, if any. If size is an empty string, or + if -width is not specified, then the width requested + internally by the window will be used initially; the + width may later be adjusted by the movement of sashes + in the panedwindow. Size may be any value accepted by + Tk_GetPixels. + + paneconfigpanesReturns an ordered list of the child panes._testThis is Tcl/Tk version %s +This should be a cedilla: çClick me![%s]QUIT# If this fails your Python may not be configured for Tk# add '\' before special characters and spaces# undocumented# widget usually is known# serial and time are not very interesting# keysym_num duplicates keysym# x_root and y_root mostly duplicate x and y# Delete, so any use of _default_root will immediately raise an exception.# Rebind before deletion, so repeated calls will not fail.# check for type of NAME parameter to override weird error message# raised from Modules/_tkinter.c:SetVar like:# TypeError: setvar() takes exactly 3 arguments (2 given)#print '- Tkinter: deleted command', name# TODO: Add deprecation warning# Methods defined on both toplevel and interior widgets# used for generating child widget names# XXX font command?# XXX b/w compat# XXX b/w compat?# I'd rather use time.sleep(ms*0.001)# Clipboard handling:# XXX grab current w/o window argument# Tcl sometimes returns extra windows, e.g. for# menus; those need to be skipped# Missing: (a, c, d, m, o, v, B, R)# serial field: valid for all events# number of button: ButtonPress and ButtonRelease events only# height field: Configure, ConfigureRequest, Create,# ResizeRequest, and Expose events only# keycode field: KeyPress and KeyRelease events only# time field: "valid for events that contain a time field"# width field: Configure, ConfigureRequest, Create, ResizeRequest,# and Expose events only# x field: "valid for events that contain an x field"# y field: "valid for events that contain a y field"# keysym as decimal: KeyPress and KeyRelease events only# x_root, y_root fields: ButtonPress, ButtonRelease, KeyPress,# KeyRelease, and Motion events# These used to be defined in Widget:# Pack methods that apply to the master# Place method that applies to the master# Grid methods that apply to the master# new in Tk 8.5# Support for the "event" command, new in Tk 4.2.# By Case Roole.# Image related commands# Tk needs a list of windows here# to avoid recursions in the getattr code in case of failure, we# ensure that self.tk is always _something_.# Issue #16248: Honor the -E flag to avoid code injection.# Version sanity checks# Under unknown circumstances, tcl_version gets coerced to float# Create and register the tkerror and exit commands# We need to inline parts of _register here, _ register# would register differently-named commands.# Ideally, the classes Pack, Place and Grid disappear, the# pack/place/grid methods are defined on the Widget class, and# everybody uses w.pack_whatever(...) instead of Pack.whatever(w,# ...), with pack(), place() and grid() being short for# pack_configure(), place_configure() and grid_columnconfigure(), and# forget() being short for pack_forget(). As a practical matter, I'm# afraid that there is too much code out there that may be using the# Pack, Place or Grid class, so I leave them intact -- but only as# backwards compatibility features. Also note that those methods that# take a master as argument (e.g. pack_propagate) have been moved to# the Misc class (which now incorporates all methods common between# toplevel and interior widgets). Again, for compatibility, these are# copied into the Pack, Place or Grid class.# Thanks to Masazumi Yoshikawa (yosikawa@isi.edu)# XXX Obsolete -- better use self.tk.call directly!# TBD: a hack needed because some keys# are not valid as keyword arguments# XXX Should use _flatten on args# Args: (val, val, ..., cnf={})# lower, tkraise/lift hide Misc.lower, Misc.tkraise/lift,# so the preferred name for them is tag_lower, tag_raise# (similar to tag_bind, and similar to the Text widget);# unfortunately can't delete the old ones yet (maybe in 1.6)# Never call the dump command without the -command flag, since the# output could involve Tcl quoting and would be a pain to parse# right. Instead just set the command to build a list of triples# as if we had done the parsing.## new in tk8.4# (Image commands are new in 8.0)# 'command' is the only supported keyword# tk itself would use image# May happen if the root was destroyed# XXX config# XXX copy -from, -to, ...?# XXX read#################################################################################################################################################### Test:# The following three commands are needed so the window pops# up on top on Windows...b'Wrapper functions for Tcl/Tk. + +Tkinter provides classes which allow the display, positioning and +control of widgets. Toplevel widgets are Tk and Toplevel. Other +widgets are Frame, Label, Entry, Text, Canvas, Button, Radiobutton, +Checkbutton, Scale, Listbox, Scrollbar, OptionMenu, Spinbox +LabelFrame and PanedWindow. + +Properties of the widgets are specified with keyword arguments. +Keyword arguments have the same name as the corresponding resource +under Tk. + +Widgets are positioned with one of the geometry managers Place, Pack +or Grid. These managers can be called with methods place, pack, grid +available in every Widget. + +Actions are bound to events by resources (e.g. keyword argument +command) or with the method bind. + +Example (Hello, World): +import tkinter +from tkinter.constants import * +tk = tkinter.Tk() +frame = tkinter.Frame(tk, relief=RIDGE, borderwidth=2) +frame.pack(fill=BOTH,expand=1) +label = tkinter.Label(frame, text="Hello, World") +label.pack(fill=X, expand=1) +button = tkinter.Button(frame,text="Exit",command=tk.destroy) +button.pack(side=BOTTOM) +tk.mainloop() +'u'Wrapper functions for Tcl/Tk. + +Tkinter provides classes which allow the display, positioning and +control of widgets. Toplevel widgets are Tk and Toplevel. Other +widgets are Frame, Label, Entry, Text, Canvas, Button, Radiobutton, +Checkbutton, Scale, Listbox, Scrollbar, OptionMenu, Spinbox +LabelFrame and PanedWindow. + +Properties of the widgets are specified with keyword arguments. +Keyword arguments have the same name as the corresponding resource +under Tk. + +Widgets are positioned with one of the geometry managers Place, Pack +or Grid. These managers can be called with methods place, pack, grid +available in every Widget. + +Actions are bound to events by resources (e.g. keyword argument +command) or with the method bind. + +Example (Hello, World): +import tkinter +from tkinter.constants import * +tk = tkinter.Tk() +frame = tkinter.Frame(tk, relief=RIDGE, borderwidth=2) +frame.pack(fill=BOTH,expand=1) +label = tkinter.Label(frame, text="Hello, World") +label.pack(fill=X, expand=1) +button = tkinter.Button(frame,text="Exit",command=tk.destroy) +button.pack(side=BOTTOM) +tk.mainloop() +'b'([\\{}])'u'([\\{}])'b'([\s])'u'([\s])'b'Internal function.'u'Internal function.'b'{%s}'u'{%s}'b'\\\1'u'\\\1'b'\n'u'\n'b'\'u'\'b'_cnfmerge: fallback due to:'u'_cnfmerge: fallback due to:'b'Return a properly formatted dict built from Tcl list pairs. + + If cut_minus is True, the supposed '-' prefix will be removed from + keys. If conv is specified, it is used to convert values. + + Tcl list is expected to contain an even number of elements. + 'u'Return a properly formatted dict built from Tcl list pairs. + + If cut_minus is True, the supposed '-' prefix will be removed from + keys. If conv is specified, it is used to convert values. + + Tcl list is expected to contain an even number of elements. + 'b'Tcl list representing a dict is expected to contain an even number of elements'u'Tcl list representing a dict is expected to contain an even number of elements'b'4'u'4'b'5'u'5'b'6'u'6'b'7'u'7'b'8'u'8'b'9'u'9'b'10'u'10'b'11'u'11'b'12'u'12'b'13'u'13'b'14'u'14'b'15'u'15'b'16'u'16'b'17'u'17'b'18'u'18'b'19'u'19'b'20'u'20'b'21'u'21'b'22'u'22'b'23'u'23'b'24'u'24'b'25'u'25'b'26'u'26'b'27'u'27'b'28'u'28'b'29'u'29'b'30'u'30'b'31'u'31'b'32'u'32'b'33'u'33'b'34'u'34'b'35'u'35'b'36'u'36'b'37'u'37'b'38'u'38'b'Container for the properties of an event. + + Instances of this type are generated if one of the following events occurs: + + KeyPress, KeyRelease - for keyboard events + ButtonPress, ButtonRelease, Motion, Enter, Leave, MouseWheel - for mouse events + Visibility, Unmap, Map, Expose, FocusIn, FocusOut, Circulate, + Colormap, Gravity, Reparent, Property, Destroy, Activate, + Deactivate - for window events. + + If a callback function for one of these events is registered + using bind, bind_all, bind_class, or tag_bind, the callback is + called with an Event as first argument. It will have the + following attributes (in braces are the event types for which + the attribute is valid): + + serial - serial number of event + num - mouse button pressed (ButtonPress, ButtonRelease) + focus - whether the window has the focus (Enter, Leave) + height - height of the exposed window (Configure, Expose) + width - width of the exposed window (Configure, Expose) + keycode - keycode of the pressed key (KeyPress, KeyRelease) + state - state of the event as a number (ButtonPress, ButtonRelease, + Enter, KeyPress, KeyRelease, + Leave, Motion) + state - state as a string (Visibility) + time - when the event occurred + x - x-position of the mouse + y - y-position of the mouse + x_root - x-position of the mouse on the screen + (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion) + y_root - y-position of the mouse on the screen + (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion) + char - pressed character (KeyPress, KeyRelease) + send_event - see X/Windows documentation + keysym - keysym of the event as a string (KeyPress, KeyRelease) + keysym_num - keysym of the event as a number (KeyPress, KeyRelease) + type - type of the event as a number + widget - widget in which the event occurred + delta - delta of wheel movement (MouseWheel) + 'u'Container for the properties of an event. + + Instances of this type are generated if one of the following events occurs: + + KeyPress, KeyRelease - for keyboard events + ButtonPress, ButtonRelease, Motion, Enter, Leave, MouseWheel - for mouse events + Visibility, Unmap, Map, Expose, FocusIn, FocusOut, Circulate, + Colormap, Gravity, Reparent, Property, Destroy, Activate, + Deactivate - for window events. + + If a callback function for one of these events is registered + using bind, bind_all, bind_class, or tag_bind, the callback is + called with an Event as first argument. It will have the + following attributes (in braces are the event types for which + the attribute is valid): + + serial - serial number of event + num - mouse button pressed (ButtonPress, ButtonRelease) + focus - whether the window has the focus (Enter, Leave) + height - height of the exposed window (Configure, Expose) + width - width of the exposed window (Configure, Expose) + keycode - keycode of the pressed key (KeyPress, KeyRelease) + state - state of the event as a number (ButtonPress, ButtonRelease, + Enter, KeyPress, KeyRelease, + Leave, Motion) + state - state as a string (Visibility) + time - when the event occurred + x - x-position of the mouse + y - y-position of the mouse + x_root - x-position of the mouse on the screen + (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion) + y_root - y-position of the mouse on the screen + (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion) + char - pressed character (KeyPress, KeyRelease) + send_event - see X/Windows documentation + keysym - keysym of the event as a string (KeyPress, KeyRelease) + keysym_num - keysym of the event as a number (KeyPress, KeyRelease) + type - type of the event as a number + widget - widget in which the event occurred + delta - delta of wheel movement (MouseWheel) + 'b'??'u'??'b'char'u'char'b'send_event'u'send_event'b'state'u'state'b'Shift'u'Shift'b'Lock'u'Lock'b'Control'u'Control'b'Mod1'u'Mod1'b'Mod2'u'Mod2'b'Mod3'u'Mod3'b'Mod4'u'Mod4'b'Mod5'u'Mod5'b'Button1'u'Button1'b'Button2'u'Button2'b'Button3'u'Button3'b'Button4'u'Button4'b'Button5'u'Button5'b'|'u'|'b'delta'u'delta'b'keysym'u'keysym'b'keycode'u'keycode'b'num'u'num'b'focus'u'focus'b'x'u'x'b'y'u'y'b'width'u'width'b'height'u'height'b'<%s event%s>'u'<%s event%s>'b'name'u'name'b' %s=%s'u' %s=%s'b'Inhibit setting of default root window. + + Call this function to inhibit that the first instance of + Tk is used for windows without an explicit parent window. + 'u'Inhibit setting of default root window. + + Call this function to inhibit that the first instance of + Tk is used for windows without an explicit parent window. + 'b'No master specified and tkinter is configured to not support default root'u'No master specified and tkinter is configured to not support default root'b'Too early to 'u'Too early to 'b': no default root window'u': no default root window'b'Internal function. Calling it will raise the exception SystemExit.'u'Internal function. Calling it will raise the exception SystemExit.'b'Class to define value holders for e.g. buttons. + + Subclasses StringVar, IntVar, DoubleVar, BooleanVar are specializations + that constrain the type of the value returned from get().'u'Class to define value holders for e.g. buttons. + + Subclasses StringVar, IntVar, DoubleVar, BooleanVar are specializations + that constrain the type of the value returned from get().'b'Construct a variable + + MASTER can be given as master widget. + VALUE is an optional value (defaults to "") + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + 'u'Construct a variable + + MASTER can be given as master widget. + VALUE is an optional value (defaults to "") + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + 'b'name must be a string'u'name must be a string'b'create variable'u'create variable'b'PY_VAR'u'PY_VAR'b'info'u'info'b'exists'u'exists'b'Unset the variable in Tcl.'u'Unset the variable in Tcl.'b'Return the name of the variable in Tcl.'u'Return the name of the variable in Tcl.'b'Set the variable to VALUE.'u'Set the variable to VALUE.'b'Return value of variable.'u'Return value of variable.'b'Define a trace callback for the variable. + + Mode is one of "read", "write", "unset", or a list or tuple of + such strings. + Callback must be a function which is called when the variable is + read, written or unset. + + Return the name of the callback. + 'u'Define a trace callback for the variable. + + Mode is one of "read", "write", "unset", or a list or tuple of + such strings. + Callback must be a function which is called when the variable is + read, written or unset. + + Return the name of the callback. + 'b'trace'u'trace'b'add'u'add'b'variable'u'variable'b'Delete the trace callback for a variable. + + Mode is one of "read", "write", "unset" or a list or tuple of + such strings. Must be same as were specified in trace_add(). + cbname is the name of the callback returned from trace_add(). + 'u'Delete the trace callback for a variable. + + Mode is one of "read", "write", "unset" or a list or tuple of + such strings. Must be same as were specified in trace_add(). + cbname is the name of the callback returned from trace_add(). + 'b'remove'u'remove'b'Return all trace callback information.'u'Return all trace callback information.'b'Define a trace callback for the variable. + + MODE is one of "r", "w", "u" for read, write, undefine. + CALLBACK must be a function which is called when + the variable is read, written or undefined. + + Return the name of the callback. + + This deprecated method wraps a deprecated Tcl method that will + likely be removed in the future. Use trace_add() instead. + 'u'Define a trace callback for the variable. + + MODE is one of "r", "w", "u" for read, write, undefine. + CALLBACK must be a function which is called when + the variable is read, written or undefined. + + Return the name of the callback. + + This deprecated method wraps a deprecated Tcl method that will + likely be removed in the future. Use trace_add() instead. + 'b'Delete the trace callback for a variable. + + MODE is one of "r", "w", "u" for read, write, undefine. + CBNAME is the name of the callback returned from trace_variable or trace. + + This deprecated method wraps a deprecated Tcl method that will + likely be removed in the future. Use trace_remove() instead. + 'u'Delete the trace callback for a variable. + + MODE is one of "r", "w", "u" for read, write, undefine. + CBNAME is the name of the callback returned from trace_variable or trace. + + This deprecated method wraps a deprecated Tcl method that will + likely be removed in the future. Use trace_remove() instead. + 'b'vdelete'u'vdelete'b'Return all trace callback information. + + This deprecated method wraps a deprecated Tcl method that will + likely be removed in the future. Use trace_info() instead. + 'u'Return all trace callback information. + + This deprecated method wraps a deprecated Tcl method that will + likely be removed in the future. Use trace_info() instead. + 'b'vinfo'u'vinfo'b'Value holder for strings variables.'u'Value holder for strings variables.'b'Construct a string variable. + + MASTER can be given as master widget. + VALUE is an optional value (defaults to "") + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + 'u'Construct a string variable. + + MASTER can be given as master widget. + VALUE is an optional value (defaults to "") + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + 'b'Return value of variable as string.'u'Return value of variable as string.'b'Value holder for integer variables.'u'Value holder for integer variables.'b'Construct an integer variable. + + MASTER can be given as master widget. + VALUE is an optional value (defaults to 0) + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + 'u'Construct an integer variable. + + MASTER can be given as master widget. + VALUE is an optional value (defaults to 0) + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + 'b'Return the value of the variable as an integer.'u'Return the value of the variable as an integer.'b'Value holder for float variables.'u'Value holder for float variables.'b'Construct a float variable. + + MASTER can be given as master widget. + VALUE is an optional value (defaults to 0.0) + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + 'u'Construct a float variable. + + MASTER can be given as master widget. + VALUE is an optional value (defaults to 0.0) + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + 'b'Return the value of the variable as a float.'u'Return the value of the variable as a float.'b'Value holder for boolean variables.'u'Value holder for boolean variables.'b'Construct a boolean variable. + + MASTER can be given as master widget. + VALUE is an optional value (defaults to False) + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + 'u'Construct a boolean variable. + + MASTER can be given as master widget. + VALUE is an optional value (defaults to False) + NAME is an optional Tcl name (defaults to PY_VARnum). + + If NAME matches an existing variable and VALUE is omitted + then the existing value is retained. + 'b'Return the value of the variable as a bool.'u'Return the value of the variable as a bool.'b'invalid literal for getboolean()'u'invalid literal for getboolean()'b'Run the main loop of Tcl.'u'Run the main loop of Tcl.'b'run the main loop'u'run the main loop'b'Convert Tcl object to True or False.'u'Convert Tcl object to True or False.'b'use getboolean()'u'use getboolean()'b'Internal class. + + Base class which defines methods common for interior widgets.'u'Internal class. + + Base class which defines methods common for interior widgets.'b'Internal function. + + Delete all Tcl commands created for + this widget in the Tcl interpreter.'u'Internal function. + + Delete all Tcl commands created for + this widget in the Tcl interpreter.'b'Internal function. + + Delete the Tcl command provided in NAME.'u'Internal function. + + Delete the Tcl command provided in NAME.'b'Set Tcl internal variable, whether the look and feel + should adhere to Motif. + + A parameter of 1 means adhere to Motif (e.g. no color + change if mouse passes over slider). + Returns the set value.'u'Set Tcl internal variable, whether the look and feel + should adhere to Motif. + + A parameter of 1 means adhere to Motif (e.g. no color + change if mouse passes over slider). + Returns the set value.'b'set'u'set'b'tk_strictMotif'u'tk_strictMotif'b'Change the color scheme to light brown as used in Tk 3.6 and before.'u'Change the color scheme to light brown as used in Tk 3.6 and before.'b'tk_bisque'u'tk_bisque'b'Set a new color scheme for all widget elements. + + A single color as argument will cause that all colors of Tk + widget elements are derived from this. + Alternatively several keyword parameters and its associated + colors can be given. The following keywords are valid: + activeBackground, foreground, selectColor, + activeForeground, highlightBackground, selectBackground, + background, highlightColor, selectForeground, + disabledForeground, insertBackground, troughColor.'u'Set a new color scheme for all widget elements. + + A single color as argument will cause that all colors of Tk + widget elements are derived from this. + Alternatively several keyword parameters and its associated + colors can be given. The following keywords are valid: + activeBackground, foreground, selectColor, + activeForeground, highlightBackground, selectBackground, + background, highlightColor, selectForeground, + disabledForeground, insertBackground, troughColor.'b'tk_setPalette'u'tk_setPalette'b'Wait until the variable is modified. + + A parameter of type IntVar, StringVar, DoubleVar or + BooleanVar must be given.'u'Wait until the variable is modified. + + A parameter of type IntVar, StringVar, DoubleVar or + BooleanVar must be given.'b'tkwait'u'tkwait'b'Wait until a WIDGET is destroyed. + + If no parameter is given self is used.'u'Wait until a WIDGET is destroyed. + + If no parameter is given self is used.'b'window'u'window'b'Wait until the visibility of a WIDGET changes + (e.g. it appears). + + If no parameter is given self is used.'u'Wait until the visibility of a WIDGET changes + (e.g. it appears). + + If no parameter is given self is used.'b'visibility'u'visibility'b'1'u'1'b'Set Tcl variable NAME to VALUE.'u'Set Tcl variable NAME to VALUE.'b'Return value of Tcl variable NAME.'u'Return value of Tcl variable NAME.'b'Return a boolean value for Tcl boolean values true and false given as parameter.'u'Return a boolean value for Tcl boolean values true and false given as parameter.'b'Direct input focus to this widget. + + If the application currently does not have the focus + this widget will get the focus if the application gets + the focus through the window manager.'u'Direct input focus to this widget. + + If the application currently does not have the focus + this widget will get the focus if the application gets + the focus through the window manager.'b'Direct input focus to this widget even if the + application does not have the focus. Use with + caution!'u'Direct input focus to this widget even if the + application does not have the focus. Use with + caution!'b'-force'u'-force'b'Return the widget which has currently the focus in the + application. + + Use focus_displayof to allow working with several + displays. Return None if application does not have + the focus.'u'Return the widget which has currently the focus in the + application. + + Use focus_displayof to allow working with several + displays. Return None if application does not have + the focus.'b'none'u'none'b'Return the widget which has currently the focus on the + display where this widget is located. + + Return None if the application does not have the focus.'u'Return the widget which has currently the focus on the + display where this widget is located. + + Return None if the application does not have the focus.'b'-displayof'u'-displayof'b'Return the widget which would have the focus if top level + for this widget gets the focus from the window manager.'u'Return the widget which would have the focus if top level + for this widget gets the focus from the window manager.'b'-lastfor'u'-lastfor'b'The widget under mouse will get automatically focus. Can not + be disabled easily.'u'The widget under mouse will get automatically focus. Can not + be disabled easily.'b'tk_focusFollowsMouse'u'tk_focusFollowsMouse'b'Return the next widget in the focus order which follows + widget which has currently the focus. + + The focus order first goes to the next child, then to + the children of the child recursively and then to the + next sibling which is higher in the stacking order. A + widget is omitted if it has the takefocus resource set + to 0.'u'Return the next widget in the focus order which follows + widget which has currently the focus. + + The focus order first goes to the next child, then to + the children of the child recursively and then to the + next sibling which is higher in the stacking order. A + widget is omitted if it has the takefocus resource set + to 0.'b'tk_focusNext'u'tk_focusNext'b'Return previous widget in the focus order. See tk_focusNext for details.'u'Return previous widget in the focus order. See tk_focusNext for details.'b'tk_focusPrev'u'tk_focusPrev'b'Call function once after given time. + + MS specifies the time in milliseconds. FUNC gives the + function which shall be called. Additional parameters + are given as parameters to the function call. Return + identifier to cancel scheduling with after_cancel.'u'Call function once after given time. + + MS specifies the time in milliseconds. FUNC gives the + function which shall be called. Additional parameters + are given as parameters to the function call. Return + identifier to cancel scheduling with after_cancel.'b'after'u'after'b'Call FUNC once if the Tcl main loop has no event to + process. + + Return an identifier to cancel the scheduling with + after_cancel.'u'Call FUNC once if the Tcl main loop has no event to + process. + + Return an identifier to cancel the scheduling with + after_cancel.'b'idle'u'idle'b'Cancel scheduling of function identified with ID. + + Identifier returned by after or after_idle must be + given as first parameter. + 'u'Cancel scheduling of function identified with ID. + + Identifier returned by after or after_idle must be + given as first parameter. + 'b'id must be a valid identifier returned from after or after_idle'u'id must be a valid identifier returned from after or after_idle'b'cancel'u'cancel'b'Ring a display's bell.'u'Ring a display's bell.'b'bell'u'bell'b'Retrieve data from the clipboard on window's display. + + The window keyword defaults to the root window of the Tkinter + application. + + The type keyword specifies the form in which the data is + to be returned and should be an atom name such as STRING + or FILE_NAME. Type defaults to STRING, except on X11, where the default + is to try UTF8_STRING and fall back to STRING. + + This command is equivalent to: + + selection_get(CLIPBOARD) + 'u'Retrieve data from the clipboard on window's display. + + The window keyword defaults to the root window of the Tkinter + application. + + The type keyword specifies the form in which the data is + to be returned and should be an atom name such as STRING + or FILE_NAME. Type defaults to STRING, except on X11, where the default + is to try UTF8_STRING and fall back to STRING. + + This command is equivalent to: + + selection_get(CLIPBOARD) + 'b'type'u'type'b'x11'u'x11'b'UTF8_STRING'u'UTF8_STRING'b'clipboard'u'clipboard'b'get'u'get'b'Clear the data in the Tk clipboard. + + A widget specified for the optional displayof keyword + argument specifies the target display.'u'Clear the data in the Tk clipboard. + + A widget specified for the optional displayof keyword + argument specifies the target display.'b'displayof'u'displayof'b'clear'u'clear'b'Append STRING to the Tk clipboard. + + A widget specified at the optional displayof keyword + argument specifies the target display. The clipboard + can be retrieved with selection_get.'u'Append STRING to the Tk clipboard. + + A widget specified at the optional displayof keyword + argument specifies the target display. The clipboard + can be retrieved with selection_get.'b'append'u'append'b'--'u'--'b'Return widget which has currently the grab in this application + or None.'u'Return widget which has currently the grab in this application + or None.'b'grab'u'grab'b'current'u'current'b'Release grab for this widget if currently set.'u'Release grab for this widget if currently set.'b'release'u'release'b'Set grab for this widget. + + A grab directs all events to this and descendant + widgets in the application.'u'Set grab for this widget. + + A grab directs all events to this and descendant + widgets in the application.'b'Set global grab for this widget. + + A global grab directs all events to this and + descendant widgets on the display. Use with caution - + other applications do not get events anymore.'u'Set global grab for this widget. + + A global grab directs all events to this and + descendant widgets on the display. Use with caution - + other applications do not get events anymore.'b'-global'u'-global'b'Return None, "local" or "global" if this widget has + no, a local or a global grab.'u'Return None, "local" or "global" if this widget has + no, a local or a global grab.'b'status'u'status'b'Set a VALUE (second parameter) for an option + PATTERN (first parameter). + + An optional third parameter gives the numeric priority + (defaults to 80).'u'Set a VALUE (second parameter) for an option + PATTERN (first parameter). + + An optional third parameter gives the numeric priority + (defaults to 80).'b'option'u'option'b'Clear the option database. + + It will be reloaded if option_add is called.'u'Clear the option database. + + It will be reloaded if option_add is called.'b'Return the value for an option NAME for this widget + with CLASSNAME. + + Values with higher priority override lower values.'u'Return the value for an option NAME for this widget + with CLASSNAME. + + Values with higher priority override lower values.'b'Read file FILENAME into the option database. + + An optional second parameter gives the numeric + priority.'u'Read file FILENAME into the option database. + + An optional second parameter gives the numeric + priority.'b'readfile'u'readfile'b'Clear the current X selection.'u'Clear the current X selection.'b'selection'u'selection'b'Return the contents of the current X selection. + + A keyword parameter selection specifies the name of + the selection and defaults to PRIMARY. A keyword + parameter displayof specifies a widget on the display + to use. A keyword parameter type specifies the form of data to be + fetched, defaulting to STRING except on X11, where UTF8_STRING is tried + before STRING.'u'Return the contents of the current X selection. + + A keyword parameter selection specifies the name of + the selection and defaults to PRIMARY. A keyword + parameter displayof specifies a widget on the display + to use. A keyword parameter type specifies the form of data to be + fetched, defaulting to STRING except on X11, where UTF8_STRING is tried + before STRING.'b'Specify a function COMMAND to call if the X + selection owned by this widget is queried by another + application. + + This function must return the contents of the + selection. The function will be called with the + arguments OFFSET and LENGTH which allows the chunking + of very long selections. The following keyword + parameters can be provided: + selection - name of the selection (default PRIMARY), + type - type of the selection (e.g. STRING, FILE_NAME).'u'Specify a function COMMAND to call if the X + selection owned by this widget is queried by another + application. + + This function must return the contents of the + selection. The function will be called with the + arguments OFFSET and LENGTH which allows the chunking + of very long selections. The following keyword + parameters can be provided: + selection - name of the selection (default PRIMARY), + type - type of the selection (e.g. STRING, FILE_NAME).'b'handle'u'handle'b'Become owner of X selection. + + A keyword parameter selection specifies the name of + the selection (default PRIMARY).'u'Become owner of X selection. + + A keyword parameter selection specifies the name of + the selection (default PRIMARY).'b'own'u'own'b'Return owner of X selection. + + The following keyword parameter can + be provided: + selection - name of the selection (default PRIMARY), + type - type of the selection (e.g. STRING, FILE_NAME).'u'Return owner of X selection. + + The following keyword parameter can + be provided: + selection - name of the selection (default PRIMARY), + type - type of the selection (e.g. STRING, FILE_NAME).'b'Send Tcl command CMD to different interpreter INTERP to be executed.'u'Send Tcl command CMD to different interpreter INTERP to be executed.'b'send'u'send'b'Lower this widget in the stacking order.'u'Lower this widget in the stacking order.'b'lower'u'lower'b'Raise this widget in the stacking order.'u'Raise this widget in the stacking order.'b'raise'u'raise'b'Return integer which represents atom NAME.'u'Return integer which represents atom NAME.'b'winfo'u'winfo'b'atom'u'atom'b'Return name of atom with identifier ID.'u'Return name of atom with identifier ID.'b'atomname'u'atomname'b'Return number of cells in the colormap for this widget.'u'Return number of cells in the colormap for this widget.'b'cells'u'cells'b'Return a list of all widgets which are children of this widget.'u'Return a list of all widgets which are children of this widget.'b'children'u'children'b'Return window class name of this widget.'u'Return window class name of this widget.'b'class'u'class'b'Return True if at the last color request the colormap was full.'u'Return True if at the last color request the colormap was full.'b'colormapfull'u'colormapfull'b'Return the widget which is at the root coordinates ROOTX, ROOTY.'u'Return the widget which is at the root coordinates ROOTX, ROOTY.'b'containing'u'containing'b'Return the number of bits per pixel.'u'Return the number of bits per pixel.'b'depth'u'depth'b'Return true if this widget exists.'u'Return true if this widget exists.'b'Return the number of pixels for the given distance NUMBER + (e.g. "3c") as float.'u'Return the number of pixels for the given distance NUMBER + (e.g. "3c") as float.'b'fpixels'u'fpixels'b'Return geometry string for this widget in the form "widthxheight+X+Y".'u'Return geometry string for this widget in the form "widthxheight+X+Y".'b'geometry'u'geometry'b'Return height of this widget.'u'Return height of this widget.'b'Return identifier ID for this widget.'u'Return identifier ID for this widget.'b'Return the name of all Tcl interpreters for this display.'u'Return the name of all Tcl interpreters for this display.'b'interps'u'interps'b'Return true if this widget is mapped.'u'Return true if this widget is mapped.'b'ismapped'u'ismapped'b'Return the window manager name for this widget.'u'Return the window manager name for this widget.'b'manager'u'manager'b'Return the name of this widget.'u'Return the name of this widget.'b'Return the name of the parent of this widget.'u'Return the name of the parent of this widget.'b'parent'u'parent'b'Return the pathname of the widget given by ID.'u'Return the pathname of the widget given by ID.'b'pathname'u'pathname'b'Rounded integer value of winfo_fpixels.'u'Rounded integer value of winfo_fpixels.'b'pixels'u'pixels'b'Return the x coordinate of the pointer on the root window.'u'Return the x coordinate of the pointer on the root window.'b'pointerx'u'pointerx'b'Return a tuple of x and y coordinates of the pointer on the root window.'u'Return a tuple of x and y coordinates of the pointer on the root window.'b'pointerxy'u'pointerxy'b'Return the y coordinate of the pointer on the root window.'u'Return the y coordinate of the pointer on the root window.'b'pointery'u'pointery'b'Return requested height of this widget.'u'Return requested height of this widget.'b'reqheight'u'reqheight'b'Return requested width of this widget.'u'Return requested width of this widget.'b'reqwidth'u'reqwidth'b'Return a tuple of integer RGB values in range(65536) for color in this widget.'u'Return a tuple of integer RGB values in range(65536) for color in this widget.'b'rgb'u'rgb'b'Return x coordinate of upper left corner of this widget on the + root window.'u'Return x coordinate of upper left corner of this widget on the + root window.'b'rootx'u'rootx'b'Return y coordinate of upper left corner of this widget on the + root window.'u'Return y coordinate of upper left corner of this widget on the + root window.'b'rooty'u'rooty'b'Return the screen name of this widget.'u'Return the screen name of this widget.'b'screen'u'screen'b'Return the number of the cells in the colormap of the screen + of this widget.'u'Return the number of the cells in the colormap of the screen + of this widget.'b'screencells'u'screencells'b'Return the number of bits per pixel of the root window of the + screen of this widget.'u'Return the number of bits per pixel of the root window of the + screen of this widget.'b'screendepth'u'screendepth'b'Return the number of pixels of the height of the screen of this widget + in pixel.'u'Return the number of pixels of the height of the screen of this widget + in pixel.'b'screenheight'u'screenheight'b'Return the number of pixels of the height of the screen of + this widget in mm.'u'Return the number of pixels of the height of the screen of + this widget in mm.'b'screenmmheight'u'screenmmheight'b'Return the number of pixels of the width of the screen of + this widget in mm.'u'Return the number of pixels of the width of the screen of + this widget in mm.'b'screenmmwidth'u'screenmmwidth'b'Return one of the strings directcolor, grayscale, pseudocolor, + staticcolor, staticgray, or truecolor for the default + colormodel of this screen.'u'Return one of the strings directcolor, grayscale, pseudocolor, + staticcolor, staticgray, or truecolor for the default + colormodel of this screen.'b'screenvisual'u'screenvisual'b'Return the number of pixels of the width of the screen of + this widget in pixel.'u'Return the number of pixels of the width of the screen of + this widget in pixel.'b'screenwidth'u'screenwidth'b'Return information of the X-Server of the screen of this widget in + the form "XmajorRminor vendor vendorVersion".'u'Return information of the X-Server of the screen of this widget in + the form "XmajorRminor vendor vendorVersion".'b'server'u'server'b'Return the toplevel widget of this widget.'u'Return the toplevel widget of this widget.'b'toplevel'u'toplevel'b'Return true if the widget and all its higher ancestors are mapped.'u'Return true if the widget and all its higher ancestors are mapped.'b'viewable'u'viewable'b'Return one of the strings directcolor, grayscale, pseudocolor, + staticcolor, staticgray, or truecolor for the + colormodel of this widget.'u'Return one of the strings directcolor, grayscale, pseudocolor, + staticcolor, staticgray, or truecolor for the + colormodel of this widget.'b'visual'u'visual'b'Return the X identifier for the visual for this widget.'u'Return the X identifier for the visual for this widget.'b'visualid'u'visualid'b'Return a list of all visuals available for the screen + of this widget. + + Each item in the list consists of a visual name (see winfo_visual), a + depth and if includeids is true is given also the X identifier.'u'Return a list of all visuals available for the screen + of this widget. + + Each item in the list consists of a visual name (see winfo_visual), a + depth and if includeids is true is given also the X identifier.'b'visualsavailable'u'visualsavailable'b'includeids'u'includeids'b'Return the height of the virtual root window associated with this + widget in pixels. If there is no virtual root window return the + height of the screen.'u'Return the height of the virtual root window associated with this + widget in pixels. If there is no virtual root window return the + height of the screen.'b'vrootheight'u'vrootheight'b'Return the width of the virtual root window associated with this + widget in pixel. If there is no virtual root window return the + width of the screen.'u'Return the width of the virtual root window associated with this + widget in pixel. If there is no virtual root window return the + width of the screen.'b'vrootwidth'u'vrootwidth'b'Return the x offset of the virtual root relative to the root + window of the screen of this widget.'u'Return the x offset of the virtual root relative to the root + window of the screen of this widget.'b'vrootx'u'vrootx'b'Return the y offset of the virtual root relative to the root + window of the screen of this widget.'u'Return the y offset of the virtual root relative to the root + window of the screen of this widget.'b'vrooty'u'vrooty'b'Return the width of this widget.'u'Return the width of this widget.'b'Return the x coordinate of the upper left corner of this widget + in the parent.'u'Return the x coordinate of the upper left corner of this widget + in the parent.'b'Return the y coordinate of the upper left corner of this widget + in the parent.'u'Return the y coordinate of the upper left corner of this widget + in the parent.'b'Enter event loop until all pending events have been processed by Tcl.'u'Enter event loop until all pending events have been processed by Tcl.'b'update'u'update'b'Enter event loop until all idle callbacks have been called. This + will update the display of windows but not process events caused by + the user.'u'Enter event loop until all idle callbacks have been called. This + will update the display of windows but not process events caused by + the user.'b'idletasks'u'idletasks'b'Set or get the list of bindtags for this widget. + + With no argument return the list of all bindtags associated with + this widget. With a list of strings as argument the bindtags are + set to this list. The bindtags determine in which order events are + processed (see bind).'u'Set or get the list of bindtags for this widget. + + With no argument return the list of all bindtags associated with + this widget. With a list of strings as argument the bindtags are + set to this list. The bindtags determine in which order events are + processed (see bind).'b'bindtags'u'bindtags'b'%sif {"[%s %s]" == "break"} break +'u'%sif {"[%s %s]" == "break"} break +'b'+'u'+'b'Bind to this widget at event SEQUENCE a call to function FUNC. + + SEQUENCE is a string of concatenated event + patterns. An event pattern is of the form + where MODIFIER is one + of Control, Mod2, M2, Shift, Mod3, M3, Lock, Mod4, M4, + Button1, B1, Mod5, M5 Button2, B2, Meta, M, Button3, + B3, Alt, Button4, B4, Double, Button5, B5 Triple, + Mod1, M1. TYPE is one of Activate, Enter, Map, + ButtonPress, Button, Expose, Motion, ButtonRelease + FocusIn, MouseWheel, Circulate, FocusOut, Property, + Colormap, Gravity Reparent, Configure, KeyPress, Key, + Unmap, Deactivate, KeyRelease Visibility, Destroy, + Leave and DETAIL is the button number for ButtonPress, + ButtonRelease and DETAIL is the Keysym for KeyPress and + KeyRelease. Examples are + for pressing Control and mouse button 1 or + for pressing A and the Alt key (KeyPress can be omitted). + An event pattern can also be a virtual event of the form + <> where AString can be arbitrary. This + event can be generated by event_generate. + If events are concatenated they must appear shortly + after each other. + + FUNC will be called if the event sequence occurs with an + instance of Event as argument. If the return value of FUNC is + "break" no further bound function is invoked. + + An additional boolean parameter ADD specifies whether FUNC will + be called additionally to the other bound function or whether + it will replace the previous function. + + Bind will return an identifier to allow deletion of the bound function with + unbind without memory leak. + + If FUNC or SEQUENCE is omitted the bound function or list + of bound events are returned.'u'Bind to this widget at event SEQUENCE a call to function FUNC. + + SEQUENCE is a string of concatenated event + patterns. An event pattern is of the form + where MODIFIER is one + of Control, Mod2, M2, Shift, Mod3, M3, Lock, Mod4, M4, + Button1, B1, Mod5, M5 Button2, B2, Meta, M, Button3, + B3, Alt, Button4, B4, Double, Button5, B5 Triple, + Mod1, M1. TYPE is one of Activate, Enter, Map, + ButtonPress, Button, Expose, Motion, ButtonRelease + FocusIn, MouseWheel, Circulate, FocusOut, Property, + Colormap, Gravity Reparent, Configure, KeyPress, Key, + Unmap, Deactivate, KeyRelease Visibility, Destroy, + Leave and DETAIL is the button number for ButtonPress, + ButtonRelease and DETAIL is the Keysym for KeyPress and + KeyRelease. Examples are + for pressing Control and mouse button 1 or + for pressing A and the Alt key (KeyPress can be omitted). + An event pattern can also be a virtual event of the form + <> where AString can be arbitrary. This + event can be generated by event_generate. + If events are concatenated they must appear shortly + after each other. + + FUNC will be called if the event sequence occurs with an + instance of Event as argument. If the return value of FUNC is + "break" no further bound function is invoked. + + An additional boolean parameter ADD specifies whether FUNC will + be called additionally to the other bound function or whether + it will replace the previous function. + + Bind will return an identifier to allow deletion of the bound function with + unbind without memory leak. + + If FUNC or SEQUENCE is omitted the bound function or list + of bound events are returned.'b'bind'u'bind'b'Unbind for this widget for event SEQUENCE the + function identified with FUNCID.'u'Unbind for this widget for event SEQUENCE the + function identified with FUNCID.'b'Bind to all widgets at an event SEQUENCE a call to function FUNC. + An additional boolean parameter ADD specifies whether FUNC will + be called additionally to the other bound function or whether + it will replace the previous function. See bind for the return value.'u'Bind to all widgets at an event SEQUENCE a call to function FUNC. + An additional boolean parameter ADD specifies whether FUNC will + be called additionally to the other bound function or whether + it will replace the previous function. See bind for the return value.'b'all'b'Unbind for all widgets for event SEQUENCE all functions.'u'Unbind for all widgets for event SEQUENCE all functions.'b'Bind to widgets with bindtag CLASSNAME at event + SEQUENCE a call of function FUNC. An additional + boolean parameter ADD specifies whether FUNC will be + called additionally to the other bound function or + whether it will replace the previous function. See bind for + the return value.'u'Bind to widgets with bindtag CLASSNAME at event + SEQUENCE a call of function FUNC. An additional + boolean parameter ADD specifies whether FUNC will be + called additionally to the other bound function or + whether it will replace the previous function. See bind for + the return value.'b'Unbind for all widgets with bindtag CLASSNAME for event SEQUENCE + all functions.'u'Unbind for all widgets with bindtag CLASSNAME for event SEQUENCE + all functions.'b'Call the mainloop of Tk.'u'Call the mainloop of Tk.'b'Quit the Tcl interpreter. All widgets will be destroyed.'u'Quit the Tcl interpreter. All widgets will be destroyed.'b'tk'u'tk'b'windowingsystem'u'windowingsystem'b'Return the Tkinter instance of a widget identified by + its Tcl name NAME.'u'Return the Tkinter instance of a widget identified by + its Tcl name NAME.'b'Return a newly created Tcl function. If this + function is called, the Python function FUNC will + be executed. An optional function SUBST can + be given which will be executed before FUNC.'u'Return a newly created Tcl function. If this + function is called, the Python function FUNC will + be executed. An optional function SUBST can + be given which will be executed before FUNC.'b'%#'u'%#'b'%b'u'%b'b'%f'u'%f'b'%h'u'%h'b'%k'u'%k'b'%s'u'%s'b'%t'u'%t'b'%w'u'%w'b'%x'u'%x'b'%y'u'%y'b'%A'u'%A'b'%E'u'%E'b'%K'u'%K'b'%N'u'%N'b'%W'u'%W'b'%T'u'%T'b'%X'u'%X'b'%Y'u'%Y'b'%D'u'%D'b'Tk changed behavior in 8.4.2, returning "??" rather more often.'u'Tk changed behavior in 8.4.2, returning "??" rather more often.'b'Call Tcl configure command and return the result as a dict.'u'Call Tcl configure command and return the result as a dict.'b'Configure resources of a widget. + + The values for resources are specified as keyword + arguments. To get an overview about + the allowed keyword arguments call the method keys. + 'u'Configure resources of a widget. + + The values for resources are specified as keyword + arguments. To get an overview about + the allowed keyword arguments call the method keys. + 'b'configure'u'configure'b'Return the resource value for a KEY given as string.'u'Return the resource value for a KEY given as string.'b'cget'u'cget'b'Return a list of all resource names of this widget.'u'Return a list of all resource names of this widget.'b'Return the window path name of this widget.'u'Return the window path name of this widget.'b'<%s.%s object %s>'u'<%s.%s object %s>'b'_noarg_'u'_noarg_'b'Set or get the status for propagation of geometry information. + + A boolean argument specifies whether the geometry information + of the slaves will determine the size of this widget. If no argument + is given the current setting will be returned. + 'u'Set or get the status for propagation of geometry information. + + A boolean argument specifies whether the geometry information + of the slaves will determine the size of this widget. If no argument + is given the current setting will be returned. + 'b'pack'u'pack'b'propagate'u'propagate'b'Return a list of all slaves of this widget + in its packing order.'u'Return a list of all slaves of this widget + in its packing order.'b'slaves'u'slaves'b'place'u'place'b'The anchor value controls how to place the grid within the + master when no row/column has any weight. + + The default anchor is nw.'u'The anchor value controls how to place the grid within the + master when no row/column has any weight. + + The default anchor is nw.'b'grid'u'grid'b'anchor'u'anchor'b'Return a tuple of integer coordinates for the bounding + box of this widget controlled by the geometry manager grid. + + If COLUMN, ROW is given the bounding box applies from + the cell with row and column 0 to the specified + cell. If COL2 and ROW2 are given the bounding box + starts at that cell. + + The returned integers specify the offset of the upper left + corner in the master widget and the width and height. + 'u'Return a tuple of integer coordinates for the bounding + box of this widget controlled by the geometry manager grid. + + If COLUMN, ROW is given the bounding box applies from + the cell with row and column 0 to the specified + cell. If COL2 and ROW2 are given the bounding box + starts at that cell. + + The returned integers specify the offset of the upper left + corner in the master widget and the width and height. + 'b'bbox'u'bbox'b'Configure column INDEX of a grid. + + Valid resources are minsize (minimum size of the column), + weight (how much does additional space propagate to this column) + and pad (how much space to let additionally).'u'Configure column INDEX of a grid. + + Valid resources are minsize (minimum size of the column), + weight (how much does additional space propagate to this column) + and pad (how much space to let additionally).'b'columnconfigure'u'columnconfigure'b'Return a tuple of column and row which identify the cell + at which the pixel at position X and Y inside the master + widget is located.'u'Return a tuple of column and row which identify the cell + at which the pixel at position X and Y inside the master + widget is located.'b'location'u'location'b'Set or get the status for propagation of geometry information. + + A boolean argument specifies whether the geometry information + of the slaves will determine the size of this widget. If no argument + is given, the current setting will be returned. + 'u'Set or get the status for propagation of geometry information. + + A boolean argument specifies whether the geometry information + of the slaves will determine the size of this widget. If no argument + is given, the current setting will be returned. + 'b'Configure row INDEX of a grid. + + Valid resources are minsize (minimum size of the row), + weight (how much does additional space propagate to this row) + and pad (how much space to let additionally).'u'Configure row INDEX of a grid. + + Valid resources are minsize (minimum size of the row), + weight (how much does additional space propagate to this row) + and pad (how much space to let additionally).'b'rowconfigure'u'rowconfigure'b'Return a tuple of the number of column and rows in the grid.'u'Return a tuple of the number of column and rows in the grid.'b'size'u'size'b'-row'u'-row'b'-column'u'-column'b'Bind a virtual event VIRTUAL (of the form <>) + to an event SEQUENCE such that the virtual event is triggered + whenever SEQUENCE occurs.'u'Bind a virtual event VIRTUAL (of the form <>) + to an event SEQUENCE such that the virtual event is triggered + whenever SEQUENCE occurs.'b'event'u'event'b'Unbind a virtual event VIRTUAL from SEQUENCE.'u'Unbind a virtual event VIRTUAL from SEQUENCE.'b'delete'u'delete'b'Generate an event SEQUENCE. Additional + keyword arguments specify parameter of the event + (e.g. x, y, rootx, rooty).'u'Generate an event SEQUENCE. Additional + keyword arguments specify parameter of the event + (e.g. x, y, rootx, rooty).'b'generate'u'generate'b'-%s'u'-%s'b'Return a list of all virtual events or the information + about the SEQUENCE bound to the virtual event VIRTUAL.'u'Return a list of all virtual events or the information + about the SEQUENCE bound to the virtual event VIRTUAL.'b'Return a list of all existing image names.'u'Return a list of all existing image names.'b'image'u'image'b'names'u'names'b'Return a list of all available image types (e.g. photo bitmap).'u'Return a list of all available image types (e.g. photo bitmap).'b'types'u'types'b'Internal class. Stores function to call when some user + defined Tcl function is called e.g. after an event occurred.'u'Internal class. Stores function to call when some user + defined Tcl function is called e.g. after an event occurred.'b'Store FUNC, SUBST and WIDGET as members.'u'Store FUNC, SUBST and WIDGET as members.'b'Apply first function SUBST to arguments, than FUNC.'u'Apply first function SUBST to arguments, than FUNC.'b'Mix-in class for querying and changing the horizontal position + of a widget's window.'u'Mix-in class for querying and changing the horizontal position + of a widget's window.'b'Query and change the horizontal position of the view.'u'Query and change the horizontal position of the view.'b'xview'u'xview'b'Adjusts the view in the window so that FRACTION of the + total width of the canvas is off-screen to the left.'u'Adjusts the view in the window so that FRACTION of the + total width of the canvas is off-screen to the left.'b'moveto'u'moveto'b'Shift the x-view according to NUMBER which is measured in "units" + or "pages" (WHAT).'u'Shift the x-view according to NUMBER which is measured in "units" + or "pages" (WHAT).'b'scroll'u'scroll'b'Mix-in class for querying and changing the vertical position + of a widget's window.'u'Mix-in class for querying and changing the vertical position + of a widget's window.'b'Query and change the vertical position of the view.'u'Query and change the vertical position of the view.'b'yview'u'yview'b'Adjusts the view in the window so that FRACTION of the + total height of the canvas is off-screen to the top.'u'Adjusts the view in the window so that FRACTION of the + total height of the canvas is off-screen to the top.'b'Shift the y-view according to NUMBER which is measured in + "units" or "pages" (WHAT).'u'Shift the y-view according to NUMBER which is measured in + "units" or "pages" (WHAT).'b'Provides functions for the communication with the window manager.'u'Provides functions for the communication with the window manager.'b'Instruct the window manager to set the aspect ratio (width/height) + of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple + of the actual values if no argument is given.'u'Instruct the window manager to set the aspect ratio (width/height) + of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple + of the actual values if no argument is given.'b'wm'u'wm'b'aspect'u'aspect'b'This subcommand returns or sets platform specific attributes + + The first form returns a list of the platform specific flags and + their values. The second form returns the value for the specific + option. The third form sets one or more of the values. The values + are as follows: + + On Windows, -disabled gets or sets whether the window is in a + disabled state. -toolwindow gets or sets the style of the window + to toolwindow (as defined in the MSDN). -topmost gets or sets + whether this is a topmost window (displays above all other + windows). + + On Macintosh, XXXXX + + On Unix, there are currently no special attribute values. + 'u'This subcommand returns or sets platform specific attributes + + The first form returns a list of the platform specific flags and + their values. The second form returns the value for the specific + option. The third form sets one or more of the values. The values + are as follows: + + On Windows, -disabled gets or sets whether the window is in a + disabled state. -toolwindow gets or sets the style of the window + to toolwindow (as defined in the MSDN). -topmost gets or sets + whether this is a topmost window (displays above all other + windows). + + On Macintosh, XXXXX + + On Unix, there are currently no special attribute values. + 'b'attributes'u'attributes'b'Store NAME in WM_CLIENT_MACHINE property of this widget. Return + current value.'u'Store NAME in WM_CLIENT_MACHINE property of this widget. Return + current value.'b'client'u'client'b'Store list of window names (WLIST) into WM_COLORMAPWINDOWS property + of this widget. This list contains windows whose colormaps differ from their + parents. Return current list of widgets if WLIST is empty.'u'Store list of window names (WLIST) into WM_COLORMAPWINDOWS property + of this widget. This list contains windows whose colormaps differ from their + parents. Return current list of widgets if WLIST is empty.'b'colormapwindows'u'colormapwindows'b'Store VALUE in WM_COMMAND property. It is the command + which shall be used to invoke the application. Return current + command if VALUE is None.'u'Store VALUE in WM_COMMAND property. It is the command + which shall be used to invoke the application. Return current + command if VALUE is None.'b'command'u'command'b'Deiconify this widget. If it was never mapped it will not be mapped. + On Windows it will raise this widget and give it the focus.'u'Deiconify this widget. If it was never mapped it will not be mapped. + On Windows it will raise this widget and give it the focus.'b'deiconify'u'deiconify'b'Set focus model to MODEL. "active" means that this widget will claim + the focus itself, "passive" means that the window manager shall give + the focus. Return current focus model if MODEL is None.'u'Set focus model to MODEL. "active" means that this widget will claim + the focus itself, "passive" means that the window manager shall give + the focus. Return current focus model if MODEL is None.'b'focusmodel'u'focusmodel'b'The window will be unmapped from the screen and will no longer + be managed by wm. toplevel windows will be treated like frame + windows once they are no longer managed by wm, however, the menu + option configuration will be remembered and the menus will return + once the widget is managed again.'u'The window will be unmapped from the screen and will no longer + be managed by wm. toplevel windows will be treated like frame + windows once they are no longer managed by wm, however, the menu + option configuration will be remembered and the menus will return + once the widget is managed again.'b'forget'u'forget'b'Return identifier for decorative frame of this widget if present.'u'Return identifier for decorative frame of this widget if present.'b'Set geometry to NEWGEOMETRY of the form =widthxheight+x+y. Return + current value if None is given.'u'Set geometry to NEWGEOMETRY of the form =widthxheight+x+y. Return + current value if None is given.'b'Instruct the window manager that this widget shall only be + resized on grid boundaries. WIDTHINC and HEIGHTINC are the width and + height of a grid unit in pixels. BASEWIDTH and BASEHEIGHT are the + number of grid units requested in Tk_GeometryRequest.'u'Instruct the window manager that this widget shall only be + resized on grid boundaries. WIDTHINC and HEIGHTINC are the width and + height of a grid unit in pixels. BASEWIDTH and BASEHEIGHT are the + number of grid units requested in Tk_GeometryRequest.'b'Set the group leader widgets for related widgets to PATHNAME. Return + the group leader of this widget if None is given.'u'Set the group leader widgets for related widgets to PATHNAME. Return + the group leader of this widget if None is given.'b'group'u'group'b'Set bitmap for the iconified widget to BITMAP. Return + the bitmap if None is given. + + Under Windows, the DEFAULT parameter can be used to set the icon + for the widget and any descendents that don't have an icon set + explicitly. DEFAULT can be the relative path to a .ico file + (example: root.iconbitmap(default='myicon.ico') ). See Tk + documentation for more information.'u'Set bitmap for the iconified widget to BITMAP. Return + the bitmap if None is given. + + Under Windows, the DEFAULT parameter can be used to set the icon + for the widget and any descendents that don't have an icon set + explicitly. DEFAULT can be the relative path to a .ico file + (example: root.iconbitmap(default='myicon.ico') ). See Tk + documentation for more information.'b'iconbitmap'u'iconbitmap'b'-default'u'-default'b'Display widget as icon.'u'Display widget as icon.'b'iconify'u'iconify'b'Set mask for the icon bitmap of this widget. Return the + mask if None is given.'u'Set mask for the icon bitmap of this widget. Return the + mask if None is given.'b'iconmask'u'iconmask'b'Set the name of the icon for this widget. Return the name if + None is given.'u'Set the name of the icon for this widget. Return the name if + None is given.'b'iconname'u'iconname'b'Sets the titlebar icon for this window based on the named photo + images passed through args. If default is True, this is applied to + all future created toplevels as well. + + The data in the images is taken as a snapshot at the time of + invocation. If the images are later changed, this is not reflected + to the titlebar icons. Multiple images are accepted to allow + different images sizes to be provided. The window manager may scale + provided icons to an appropriate size. + + On Windows, the images are packed into a Windows icon structure. + This will override an icon specified to wm_iconbitmap, and vice + versa. + + On X, the images are arranged into the _NET_WM_ICON X property, + which most modern window managers support. An icon specified by + wm_iconbitmap may exist simultaneously. + + On Macintosh, this currently does nothing.'u'Sets the titlebar icon for this window based on the named photo + images passed through args. If default is True, this is applied to + all future created toplevels as well. + + The data in the images is taken as a snapshot at the time of + invocation. If the images are later changed, this is not reflected + to the titlebar icons. Multiple images are accepted to allow + different images sizes to be provided. The window manager may scale + provided icons to an appropriate size. + + On Windows, the images are packed into a Windows icon structure. + This will override an icon specified to wm_iconbitmap, and vice + versa. + + On X, the images are arranged into the _NET_WM_ICON X property, + which most modern window managers support. An icon specified by + wm_iconbitmap may exist simultaneously. + + On Macintosh, this currently does nothing.'b'iconphoto'u'iconphoto'b'Set the position of the icon of this widget to X and Y. Return + a tuple of the current values of X and X if None is given.'u'Set the position of the icon of this widget to X and Y. Return + a tuple of the current values of X and X if None is given.'b'iconposition'u'iconposition'b'Set widget PATHNAME to be displayed instead of icon. Return the current + value if None is given.'u'Set widget PATHNAME to be displayed instead of icon. Return the current + value if None is given.'b'iconwindow'u'iconwindow'b'The widget specified will become a stand alone top-level window. + The window will be decorated with the window managers title bar, + etc.'u'The widget specified will become a stand alone top-level window. + The window will be decorated with the window managers title bar, + etc.'b'manage'u'manage'b'Set max WIDTH and HEIGHT for this widget. If the window is gridded + the values are given in grid units. Return the current values if None + is given.'u'Set max WIDTH and HEIGHT for this widget. If the window is gridded + the values are given in grid units. Return the current values if None + is given.'b'maxsize'u'maxsize'b'Set min WIDTH and HEIGHT for this widget. If the window is gridded + the values are given in grid units. Return the current values if None + is given.'u'Set min WIDTH and HEIGHT for this widget. If the window is gridded + the values are given in grid units. Return the current values if None + is given.'b'minsize'u'minsize'b'Instruct the window manager to ignore this widget + if BOOLEAN is given with 1. Return the current value if None + is given.'u'Instruct the window manager to ignore this widget + if BOOLEAN is given with 1. Return the current value if None + is given.'b'overrideredirect'u'overrideredirect'b'Instruct the window manager that the position of this widget shall + be defined by the user if WHO is "user", and by its own policy if WHO is + "program".'u'Instruct the window manager that the position of this widget shall + be defined by the user if WHO is "user", and by its own policy if WHO is + "program".'b'positionfrom'u'positionfrom'b'Bind function FUNC to command NAME for this widget. + Return the function bound to NAME if None is given. NAME could be + e.g. "WM_SAVE_YOURSELF" or "WM_DELETE_WINDOW".'u'Bind function FUNC to command NAME for this widget. + Return the function bound to NAME if None is given. NAME could be + e.g. "WM_SAVE_YOURSELF" or "WM_DELETE_WINDOW".'b'protocol'u'protocol'b'Instruct the window manager whether this width can be resized + in WIDTH or HEIGHT. Both values are boolean values.'u'Instruct the window manager whether this width can be resized + in WIDTH or HEIGHT. Both values are boolean values.'b'resizable'u'resizable'b'Instruct the window manager that the size of this widget shall + be defined by the user if WHO is "user", and by its own policy if WHO is + "program".'u'Instruct the window manager that the size of this widget shall + be defined by the user if WHO is "user", and by its own policy if WHO is + "program".'b'sizefrom'u'sizefrom'b'Query or set the state of this widget as one of normal, icon, + iconic (see wm_iconwindow), withdrawn, or zoomed (Windows only).'u'Query or set the state of this widget as one of normal, icon, + iconic (see wm_iconwindow), withdrawn, or zoomed (Windows only).'b'Set the title of this widget.'u'Set the title of this widget.'b'title'u'title'b'Instruct the window manager that this widget is transient + with regard to widget MASTER.'u'Instruct the window manager that this widget is transient + with regard to widget MASTER.'b'transient'u'transient'b'Withdraw this widget from the screen such that it is unmapped + and forgotten by the window manager. Re-draw it with wm_deiconify.'u'Withdraw this widget from the screen such that it is unmapped + and forgotten by the window manager. Re-draw it with wm_deiconify.'b'withdraw'u'withdraw'b'Toplevel widget of Tk which represents mostly the main window + of an application. It has an associated Tcl interpreter.'u'Toplevel widget of Tk which represents mostly the main window + of an application. It has an associated Tcl interpreter.'b'Tk'u'Tk'b'Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will + be created. BASENAME will be used for the identification of the profile file (see + readprofile). + It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME + is the name of the widget class.'u'Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will + be created. BASENAME will be used for the identification of the profile file (see + readprofile). + It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME + is the name of the widget class.'b'.py'u'.py'b'.pyc'u'.pyc'b'tk_version'u'tk_version'b'tk.h version (%s) doesn't match libtk.a version (%s)'u'tk.h version (%s) doesn't match libtk.a version (%s)'b'tcl_version'u'tcl_version'b'tcl.h version (%s) doesn't match libtcl.a version (%s)'u'tcl.h version (%s) doesn't match libtcl.a version (%s)'b'tkerror'u'tkerror'b'exit'u'exit'b'WM_DELETE_WINDOW'u'WM_DELETE_WINDOW'b'Destroy this and all descendants widgets. This will + end the application of this Tcl interpreter.'u'Destroy this and all descendants widgets. This will + end the application of this Tcl interpreter.'b'destroy'u'destroy'b'Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into + the Tcl Interpreter and calls exec on the contents of BASENAME.py and + CLASSNAME.py if such a file exists in the home directory.'u'Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into + the Tcl Interpreter and calls exec on the contents of BASENAME.py and + CLASSNAME.py if such a file exists in the home directory.'b'HOME'u'HOME'b'.%s.tcl'u'.%s.tcl'b'.%s.py'u'.%s.py'b'self'u'self'b'from tkinter import *'u'from tkinter import *'b'source'u'source'b'Report callback exception on sys.stderr. + + Applications may want to override this internal function, and + should when sys.stderr is None.'u'Report callback exception on sys.stderr. + + Applications may want to override this internal function, and + should when sys.stderr is None.'b'Exception in Tkinter callback'u'Exception in Tkinter callback'b'Delegate attribute access to the interpreter object'u'Delegate attribute access to the interpreter object'b'Geometry manager Pack. + + Base class to use the methods pack_* in every widget.'u'Geometry manager Pack. + + Base class to use the methods pack_* in every widget.'b'Pack a widget in the parent widget. Use as options: + after=widget - pack it after you have packed widget + anchor=NSEW (or subset) - position widget according to + given direction + before=widget - pack it before you will pack widget + expand=bool - expand widget if parent size grows + fill=NONE or X or Y or BOTH - fill widget if widget grows + in=master - use master to contain this widget + in_=master - see 'in' option description + ipadx=amount - add internal padding in x direction + ipady=amount - add internal padding in y direction + padx=amount - add padding in x direction + pady=amount - add padding in y direction + side=TOP or BOTTOM or LEFT or RIGHT - where to add this widget. + 'u'Pack a widget in the parent widget. Use as options: + after=widget - pack it after you have packed widget + anchor=NSEW (or subset) - position widget according to + given direction + before=widget - pack it before you will pack widget + expand=bool - expand widget if parent size grows + fill=NONE or X or Y or BOTH - fill widget if widget grows + in=master - use master to contain this widget + in_=master - see 'in' option description + ipadx=amount - add internal padding in x direction + ipady=amount - add internal padding in y direction + padx=amount - add padding in x direction + pady=amount - add padding in y direction + side=TOP or BOTTOM or LEFT or RIGHT - where to add this widget. + 'b'Unmap this widget and do not use it for the packing order.'u'Unmap this widget and do not use it for the packing order.'b'Return information about the packing options + for this widget.'u'Return information about the packing options + for this widget.'b'in'u'in'b'Geometry manager Place. + + Base class to use the methods place_* in every widget.'u'Geometry manager Place. + + Base class to use the methods place_* in every widget.'b'Place a widget in the parent widget. Use as options: + in=master - master relative to which the widget is placed + in_=master - see 'in' option description + x=amount - locate anchor of this widget at position x of master + y=amount - locate anchor of this widget at position y of master + relx=amount - locate anchor of this widget between 0.0 and 1.0 + relative to width of master (1.0 is right edge) + rely=amount - locate anchor of this widget between 0.0 and 1.0 + relative to height of master (1.0 is bottom edge) + anchor=NSEW (or subset) - position anchor according to given direction + width=amount - width of this widget in pixel + height=amount - height of this widget in pixel + relwidth=amount - width of this widget between 0.0 and 1.0 + relative to width of master (1.0 is the same width + as the master) + relheight=amount - height of this widget between 0.0 and 1.0 + relative to height of master (1.0 is the same + height as the master) + bordermode="inside" or "outside" - whether to take border width of + master widget into account + 'u'Place a widget in the parent widget. Use as options: + in=master - master relative to which the widget is placed + in_=master - see 'in' option description + x=amount - locate anchor of this widget at position x of master + y=amount - locate anchor of this widget at position y of master + relx=amount - locate anchor of this widget between 0.0 and 1.0 + relative to width of master (1.0 is right edge) + rely=amount - locate anchor of this widget between 0.0 and 1.0 + relative to height of master (1.0 is bottom edge) + anchor=NSEW (or subset) - position anchor according to given direction + width=amount - width of this widget in pixel + height=amount - height of this widget in pixel + relwidth=amount - width of this widget between 0.0 and 1.0 + relative to width of master (1.0 is the same width + as the master) + relheight=amount - height of this widget between 0.0 and 1.0 + relative to height of master (1.0 is the same + height as the master) + bordermode="inside" or "outside" - whether to take border width of + master widget into account + 'b'Unmap this widget.'u'Unmap this widget.'b'Return information about the placing options + for this widget.'u'Return information about the placing options + for this widget.'b'Geometry manager Grid. + + Base class to use the methods grid_* in every widget.'u'Geometry manager Grid. + + Base class to use the methods grid_* in every widget.'b'Position a widget in the parent widget in a grid. Use as options: + column=number - use cell identified with given column (starting with 0) + columnspan=number - this widget will span several columns + in=master - use master to contain this widget + in_=master - see 'in' option description + ipadx=amount - add internal padding in x direction + ipady=amount - add internal padding in y direction + padx=amount - add padding in x direction + pady=amount - add padding in y direction + row=number - use cell identified with given row (starting with 0) + rowspan=number - this widget will span several rows + sticky=NSEW - if cell is larger on which sides will this + widget stick to the cell boundary + 'u'Position a widget in the parent widget in a grid. Use as options: + column=number - use cell identified with given column (starting with 0) + columnspan=number - this widget will span several columns + in=master - use master to contain this widget + in_=master - see 'in' option description + ipadx=amount - add internal padding in x direction + ipady=amount - add internal padding in y direction + padx=amount - add padding in x direction + pady=amount - add padding in y direction + row=number - use cell identified with given row (starting with 0) + rowspan=number - this widget will span several rows + sticky=NSEW - if cell is larger on which sides will this + widget stick to the cell boundary + 'b'Unmap this widget but remember the grid options.'u'Unmap this widget but remember the grid options.'b'Return information about the options + for positioning this widget in a grid.'u'Return information about the options + for positioning this widget in a grid.'b'Internal class.'u'Internal class.'b'Internal function. Sets up information about children.'u'Internal function. Sets up information about children.'b'!%s'u'!%s'b'!%s%d'u'!%s%d'b'Construct a widget with the parent widget MASTER, a name WIDGETNAME + and appropriate options.'u'Construct a widget with the parent widget MASTER, a name WIDGETNAME + and appropriate options.'b'Destroy this and all descendants widgets.'u'Destroy this and all descendants widgets.'b'Internal class. + + Base class for a widget which can be positioned with the geometry managers + Pack, Place or Grid.'u'Internal class. + + Base class for a widget which can be positioned with the geometry managers + Pack, Place or Grid.'b'Toplevel widget, e.g. for dialogs.'u'Toplevel widget, e.g. for dialogs.'b'Construct a toplevel widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, class, + colormap, container, cursor, height, highlightbackground, + highlightcolor, highlightthickness, menu, relief, screen, takefocus, + use, visual, width.'u'Construct a toplevel widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, class, + colormap, container, cursor, height, highlightbackground, + highlightcolor, highlightthickness, menu, relief, screen, takefocus, + use, visual, width.'b'class_'u'class_'b'colormap'u'colormap'b'Button widget.'u'Button widget.'b'Construct a button widget with the parent MASTER. + + STANDARD OPTIONS + + activebackground, activeforeground, anchor, + background, bitmap, borderwidth, cursor, + disabledforeground, font, foreground + highlightbackground, highlightcolor, + highlightthickness, image, justify, + padx, pady, relief, repeatdelay, + repeatinterval, takefocus, text, + textvariable, underline, wraplength + + WIDGET-SPECIFIC OPTIONS + + command, compound, default, height, + overrelief, state, width + 'u'Construct a button widget with the parent MASTER. + + STANDARD OPTIONS + + activebackground, activeforeground, anchor, + background, bitmap, borderwidth, cursor, + disabledforeground, font, foreground + highlightbackground, highlightcolor, + highlightthickness, image, justify, + padx, pady, relief, repeatdelay, + repeatinterval, takefocus, text, + textvariable, underline, wraplength + + WIDGET-SPECIFIC OPTIONS + + command, compound, default, height, + overrelief, state, width + 'b'button'u'button'b'Flash the button. + + This is accomplished by redisplaying + the button several times, alternating between active and + normal colors. At the end of the flash the button is left + in the same normal/active state as when the command was + invoked. This command is ignored if the button's state is + disabled. + 'u'Flash the button. + + This is accomplished by redisplaying + the button several times, alternating between active and + normal colors. At the end of the flash the button is left + in the same normal/active state as when the command was + invoked. This command is ignored if the button's state is + disabled. + 'b'flash'u'flash'b'Invoke the command associated with the button. + + The return value is the return value from the command, + or an empty string if there is no command associated with + the button. This command is ignored if the button's state + is disabled. + 'u'Invoke the command associated with the button. + + The return value is the return value from the command, + or an empty string if there is no command associated with + the button. This command is ignored if the button's state + is disabled. + 'b'invoke'u'invoke'b'Canvas widget to display graphical elements like lines or text.'u'Canvas widget to display graphical elements like lines or text.'b'Construct a canvas widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, closeenough, + confine, cursor, height, highlightbackground, highlightcolor, + highlightthickness, insertbackground, insertborderwidth, + insertofftime, insertontime, insertwidth, offset, relief, + scrollregion, selectbackground, selectborderwidth, selectforeground, + state, takefocus, width, xscrollcommand, xscrollincrement, + yscrollcommand, yscrollincrement.'u'Construct a canvas widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, closeenough, + confine, cursor, height, highlightbackground, highlightcolor, + highlightthickness, insertbackground, insertborderwidth, + insertofftime, insertontime, insertwidth, offset, relief, + scrollregion, selectbackground, selectborderwidth, selectforeground, + state, takefocus, width, xscrollcommand, xscrollincrement, + yscrollcommand, yscrollincrement.'b'canvas'u'canvas'b'addtag'u'addtag'b'Add tag NEWTAG to all items above TAGORID.'u'Add tag NEWTAG to all items above TAGORID.'b'above'u'above'b'Add tag NEWTAG to all items.'u'Add tag NEWTAG to all items.'b'Add tag NEWTAG to all items below TAGORID.'u'Add tag NEWTAG to all items below TAGORID.'b'below'u'below'b'Add tag NEWTAG to item which is closest to pixel at X, Y. + If several match take the top-most. + All items closer than HALO are considered overlapping (all are + closests). If START is specified the next below this tag is taken.'u'Add tag NEWTAG to item which is closest to pixel at X, Y. + If several match take the top-most. + All items closer than HALO are considered overlapping (all are + closests). If START is specified the next below this tag is taken.'b'closest'u'closest'b'Add tag NEWTAG to all items in the rectangle defined + by X1,Y1,X2,Y2.'u'Add tag NEWTAG to all items in the rectangle defined + by X1,Y1,X2,Y2.'b'enclosed'u'enclosed'b'Add tag NEWTAG to all items which overlap the rectangle + defined by X1,Y1,X2,Y2.'u'Add tag NEWTAG to all items which overlap the rectangle + defined by X1,Y1,X2,Y2.'b'overlapping'u'overlapping'b'Add tag NEWTAG to all items with TAGORID.'u'Add tag NEWTAG to all items with TAGORID.'b'withtag'u'withtag'b'Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle + which encloses all items with tags specified as arguments.'u'Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle + which encloses all items with tags specified as arguments.'b'Unbind for all items with TAGORID for event SEQUENCE the + function identified with FUNCID.'u'Unbind for all items with TAGORID for event SEQUENCE the + function identified with FUNCID.'b'Bind to all items with TAGORID at event SEQUENCE a call to function FUNC. + + An additional boolean parameter ADD specifies whether FUNC will be + called additionally to the other bound function or whether it will + replace the previous function. See bind for the return value.'u'Bind to all items with TAGORID at event SEQUENCE a call to function FUNC. + + An additional boolean parameter ADD specifies whether FUNC will be + called additionally to the other bound function or whether it will + replace the previous function. See bind for the return value.'b'Return the canvas x coordinate of pixel position SCREENX rounded + to nearest multiple of GRIDSPACING units.'u'Return the canvas x coordinate of pixel position SCREENX rounded + to nearest multiple of GRIDSPACING units.'b'canvasx'u'canvasx'b'Return the canvas y coordinate of pixel position SCREENY rounded + to nearest multiple of GRIDSPACING units.'u'Return the canvas y coordinate of pixel position SCREENY rounded + to nearest multiple of GRIDSPACING units.'b'canvasy'u'canvasy'b'Return a list of coordinates for the item given in ARGS.'u'Return a list of coordinates for the item given in ARGS.'b'coords'u'coords'b'create'u'create'b'Create arc shaped region with coordinates x1,y1,x2,y2.'u'Create arc shaped region with coordinates x1,y1,x2,y2.'b'arc'u'arc'b'Create bitmap with coordinates x1,y1.'u'Create bitmap with coordinates x1,y1.'b'bitmap'u'bitmap'b'Create image item with coordinates x1,y1.'u'Create image item with coordinates x1,y1.'b'Create line with coordinates x1,y1,...,xn,yn.'u'Create line with coordinates x1,y1,...,xn,yn.'b'line'u'line'b'Create oval with coordinates x1,y1,x2,y2.'u'Create oval with coordinates x1,y1,x2,y2.'b'oval'u'oval'b'Create polygon with coordinates x1,y1,...,xn,yn.'u'Create polygon with coordinates x1,y1,...,xn,yn.'b'polygon'u'polygon'b'Create rectangle with coordinates x1,y1,x2,y2.'u'Create rectangle with coordinates x1,y1,x2,y2.'b'rectangle'u'rectangle'b'Create text with coordinates x1,y1.'u'Create text with coordinates x1,y1.'b'Create window with coordinates x1,y1,x2,y2.'u'Create window with coordinates x1,y1,x2,y2.'b'Delete characters of text items identified by tag or id in ARGS (possibly + several times) from FIRST to LAST character (including).'u'Delete characters of text items identified by tag or id in ARGS (possibly + several times) from FIRST to LAST character (including).'b'dchars'u'dchars'b'Delete items identified by all tag or ids contained in ARGS.'u'Delete items identified by all tag or ids contained in ARGS.'b'Delete tag or id given as last arguments in ARGS from items + identified by first argument in ARGS.'u'Delete tag or id given as last arguments in ARGS from items + identified by first argument in ARGS.'b'dtag'u'dtag'b'find'u'find'b'Return items above TAGORID.'u'Return items above TAGORID.'b'Return all items.'u'Return all items.'b'Return all items below TAGORID.'u'Return all items below TAGORID.'b'Return item which is closest to pixel at X, Y. + If several match take the top-most. + All items closer than HALO are considered overlapping (all are + closest). If START is specified the next below this tag is taken.'u'Return item which is closest to pixel at X, Y. + If several match take the top-most. + All items closer than HALO are considered overlapping (all are + closest). If START is specified the next below this tag is taken.'b'Return all items in rectangle defined + by X1,Y1,X2,Y2.'u'Return all items in rectangle defined + by X1,Y1,X2,Y2.'b'Return all items which overlap the rectangle + defined by X1,Y1,X2,Y2.'u'Return all items which overlap the rectangle + defined by X1,Y1,X2,Y2.'b'Return all items with TAGORID.'u'Return all items with TAGORID.'b'Set focus to the first item specified in ARGS.'u'Set focus to the first item specified in ARGS.'b'Return tags associated with the first item specified in ARGS.'u'Return tags associated with the first item specified in ARGS.'b'gettags'u'gettags'b'Set cursor at position POS in the item identified by TAGORID. + In ARGS TAGORID must be first.'u'Set cursor at position POS in the item identified by TAGORID. + In ARGS TAGORID must be first.'b'icursor'u'icursor'b'Return position of cursor as integer in item specified in ARGS.'u'Return position of cursor as integer in item specified in ARGS.'b'index'u'index'b'Insert TEXT in item TAGORID at position POS. ARGS must + be TAGORID POS TEXT.'u'Insert TEXT in item TAGORID at position POS. ARGS must + be TAGORID POS TEXT.'b'insert'u'insert'b'Return the resource value for an OPTION for item TAGORID.'u'Return the resource value for an OPTION for item TAGORID.'b'itemcget'u'itemcget'b'Configure resources of an item TAGORID. + + The values for resources are specified as keyword + arguments. To get an overview about + the allowed keyword arguments call the method without arguments. + 'u'Configure resources of an item TAGORID. + + The values for resources are specified as keyword + arguments. To get an overview about + the allowed keyword arguments call the method without arguments. + 'b'itemconfigure'u'itemconfigure'b'Lower an item TAGORID given in ARGS + (optional below another item).'u'Lower an item TAGORID given in ARGS + (optional below another item).'b'Move an item TAGORID given in ARGS.'u'Move an item TAGORID given in ARGS.'b'move'u'move'b'Move the items given by TAGORID in the canvas coordinate + space so that the first coordinate pair of the bottommost + item with tag TAGORID is located at position (X,Y). + X and Y may be the empty string, in which case the + corresponding coordinate will be unchanged. All items matching + TAGORID remain in the same positions relative to each other.'u'Move the items given by TAGORID in the canvas coordinate + space so that the first coordinate pair of the bottommost + item with tag TAGORID is located at position (X,Y). + X and Y may be the empty string, in which case the + corresponding coordinate will be unchanged. All items matching + TAGORID remain in the same positions relative to each other.'b'Print the contents of the canvas to a postscript + file. Valid options: colormap, colormode, file, fontmap, + height, pageanchor, pageheight, pagewidth, pagex, pagey, + rotate, width, x, y.'u'Print the contents of the canvas to a postscript + file. Valid options: colormap, colormode, file, fontmap, + height, pageanchor, pageheight, pagewidth, pagex, pagey, + rotate, width, x, y.'b'postscript'u'postscript'b'Raise an item TAGORID given in ARGS + (optional above another item).'u'Raise an item TAGORID given in ARGS + (optional above another item).'b'Scale item TAGORID with XORIGIN, YORIGIN, XSCALE, YSCALE.'u'Scale item TAGORID with XORIGIN, YORIGIN, XSCALE, YSCALE.'b'scale'u'scale'b'Remember the current X, Y coordinates.'u'Remember the current X, Y coordinates.'b'scan'u'scan'b'mark'u'mark'b'Adjust the view of the canvas to GAIN times the + difference between X and Y and the coordinates given in + scan_mark.'u'Adjust the view of the canvas to GAIN times the + difference between X and Y and the coordinates given in + scan_mark.'b'dragto'u'dragto'b'Adjust the end of the selection near the cursor of an item TAGORID to index.'u'Adjust the end of the selection near the cursor of an item TAGORID to index.'b'select'u'select'b'adjust'u'adjust'b'Clear the selection if it is in this widget.'u'Clear the selection if it is in this widget.'b'Set the fixed end of a selection in item TAGORID to INDEX.'u'Set the fixed end of a selection in item TAGORID to INDEX.'b'from'u'from'b'Return the item which has the selection.'u'Return the item which has the selection.'b'item'u'item'b'Set the variable end of a selection in item TAGORID to INDEX.'u'Set the variable end of a selection in item TAGORID to INDEX.'b'to'u'to'b'Return the type of the item TAGORID.'u'Return the type of the item TAGORID.'b'Checkbutton widget which is either in on- or off-state.'u'Checkbutton widget which is either in on- or off-state.'b'Construct a checkbutton widget with the parent MASTER. + + Valid resource names: activebackground, activeforeground, anchor, + background, bd, bg, bitmap, borderwidth, command, cursor, + disabledforeground, fg, font, foreground, height, + highlightbackground, highlightcolor, highlightthickness, image, + indicatoron, justify, offvalue, onvalue, padx, pady, relief, + selectcolor, selectimage, state, takefocus, text, textvariable, + underline, variable, width, wraplength.'u'Construct a checkbutton widget with the parent MASTER. + + Valid resource names: activebackground, activeforeground, anchor, + background, bd, bg, bitmap, borderwidth, command, cursor, + disabledforeground, fg, font, foreground, height, + highlightbackground, highlightcolor, highlightthickness, image, + indicatoron, justify, offvalue, onvalue, padx, pady, relief, + selectcolor, selectimage, state, takefocus, text, textvariable, + underline, variable, width, wraplength.'b'checkbutton'u'checkbutton'b'Put the button in off-state.'u'Put the button in off-state.'b'deselect'u'deselect'b'Flash the button.'u'Flash the button.'b'Toggle the button and invoke a command if given as resource.'u'Toggle the button and invoke a command if given as resource.'b'Put the button in on-state.'u'Put the button in on-state.'b'Toggle the button.'u'Toggle the button.'b'toggle'u'toggle'b'Entry widget which allows displaying simple text.'u'Entry widget which allows displaying simple text.'b'Construct an entry widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, cursor, + exportselection, fg, font, foreground, highlightbackground, + highlightcolor, highlightthickness, insertbackground, + insertborderwidth, insertofftime, insertontime, insertwidth, + invalidcommand, invcmd, justify, relief, selectbackground, + selectborderwidth, selectforeground, show, state, takefocus, + textvariable, validate, validatecommand, vcmd, width, + xscrollcommand.'u'Construct an entry widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, cursor, + exportselection, fg, font, foreground, highlightbackground, + highlightcolor, highlightthickness, insertbackground, + insertborderwidth, insertofftime, insertontime, insertwidth, + invalidcommand, invcmd, justify, relief, selectbackground, + selectborderwidth, selectforeground, show, state, takefocus, + textvariable, validate, validatecommand, vcmd, width, + xscrollcommand.'b'entry'u'entry'b'Delete text from FIRST to LAST (not included).'u'Delete text from FIRST to LAST (not included).'b'Return the text.'u'Return the text.'b'Insert cursor at INDEX.'u'Insert cursor at INDEX.'b'Return position of cursor.'u'Return position of cursor.'b'Insert STRING at INDEX.'u'Insert STRING at INDEX.'b'Adjust the view of the canvas to 10 times the + difference between X and Y and the coordinates given in + scan_mark.'u'Adjust the view of the canvas to 10 times the + difference between X and Y and the coordinates given in + scan_mark.'b'Adjust the end of the selection near the cursor to INDEX.'u'Adjust the end of the selection near the cursor to INDEX.'b'Set the fixed end of a selection to INDEX.'u'Set the fixed end of a selection to INDEX.'b'Return True if there are characters selected in the entry, False + otherwise.'u'Return True if there are characters selected in the entry, False + otherwise.'b'present'u'present'b'Set the selection from START to END (not included).'u'Set the selection from START to END (not included).'b'range'u'range'b'Set the variable end of a selection to INDEX.'u'Set the variable end of a selection to INDEX.'b'Frame widget which may contain other widgets and can have a 3D border.'u'Frame widget which may contain other widgets and can have a 3D border.'b'Construct a frame widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, class, + colormap, container, cursor, height, highlightbackground, + highlightcolor, highlightthickness, relief, takefocus, visual, width.'u'Construct a frame widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, class, + colormap, container, cursor, height, highlightbackground, + highlightcolor, highlightthickness, relief, takefocus, visual, width.'b'-class'u'-class'b'Label widget which can display text and bitmaps.'u'Label widget which can display text and bitmaps.'b'Construct a label widget with the parent MASTER. + + STANDARD OPTIONS + + activebackground, activeforeground, anchor, + background, bitmap, borderwidth, cursor, + disabledforeground, font, foreground, + highlightbackground, highlightcolor, + highlightthickness, image, justify, + padx, pady, relief, takefocus, text, + textvariable, underline, wraplength + + WIDGET-SPECIFIC OPTIONS + + height, state, width + + 'u'Construct a label widget with the parent MASTER. + + STANDARD OPTIONS + + activebackground, activeforeground, anchor, + background, bitmap, borderwidth, cursor, + disabledforeground, font, foreground, + highlightbackground, highlightcolor, + highlightthickness, image, justify, + padx, pady, relief, takefocus, text, + textvariable, underline, wraplength + + WIDGET-SPECIFIC OPTIONS + + height, state, width + + 'b'label'u'label'b'Listbox widget which can display a list of strings.'u'Listbox widget which can display a list of strings.'b'Construct a listbox widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, cursor, + exportselection, fg, font, foreground, height, highlightbackground, + highlightcolor, highlightthickness, relief, selectbackground, + selectborderwidth, selectforeground, selectmode, setgrid, takefocus, + width, xscrollcommand, yscrollcommand, listvariable.'u'Construct a listbox widget with the parent MASTER. + + Valid resource names: background, bd, bg, borderwidth, cursor, + exportselection, fg, font, foreground, height, highlightbackground, + highlightcolor, highlightthickness, relief, selectbackground, + selectborderwidth, selectforeground, selectmode, setgrid, takefocus, + width, xscrollcommand, yscrollcommand, listvariable.'b'listbox'u'listbox'b'Activate item identified by INDEX.'u'Activate item identified by INDEX.'b'activate'u'activate'b'Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle + which encloses the item identified by the given index.'u'Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle + which encloses the item identified by the given index.'b'Return the indices of currently selected item.'u'Return the indices of currently selected item.'b'curselection'u'curselection'b'Delete items from FIRST to LAST (included).'u'Delete items from FIRST to LAST (included).'b'Get list of items from FIRST to LAST (included).'u'Get list of items from FIRST to LAST (included).'b'Return index of item identified with INDEX.'u'Return index of item identified with INDEX.'b'Insert ELEMENTS at INDEX.'u'Insert ELEMENTS at INDEX.'b'Get index of item which is nearest to y coordinate Y.'u'Get index of item which is nearest to y coordinate Y.'b'nearest'u'nearest'b'Adjust the view of the listbox to 10 times the + difference between X and Y and the coordinates given in + scan_mark.'u'Adjust the view of the listbox to 10 times the + difference between X and Y and the coordinates given in + scan_mark.'b'Scroll such that INDEX is visible.'u'Scroll such that INDEX is visible.'b'see'u'see'b'Set the fixed end oft the selection to INDEX.'u'Set the fixed end oft the selection to INDEX.'b'Clear the selection from FIRST to LAST (included).'u'Clear the selection from FIRST to LAST (included).'b'Return True if INDEX is part of the selection.'u'Return True if INDEX is part of the selection.'b'includes'u'includes'b'Set the selection from FIRST to LAST (included) without + changing the currently selected elements.'u'Set the selection from FIRST to LAST (included) without + changing the currently selected elements.'b'Return the number of elements in the listbox.'u'Return the number of elements in the listbox.'b'Return the resource value for an ITEM and an OPTION.'u'Return the resource value for an ITEM and an OPTION.'b'Configure resources of an ITEM. + + The values for resources are specified as keyword arguments. + To get an overview about the allowed keyword arguments + call the method without arguments. + Valid resource names: background, bg, foreground, fg, + selectbackground, selectforeground.'u'Configure resources of an ITEM. + + The values for resources are specified as keyword arguments. + To get an overview about the allowed keyword arguments + call the method without arguments. + Valid resource names: background, bg, foreground, fg, + selectbackground, selectforeground.'b'Menu widget which allows displaying menu bars, pull-down menus and pop-up menus.'u'Menu widget which allows displaying menu bars, pull-down menus and pop-up menus.'b'Construct menu widget with the parent MASTER. + + Valid resource names: activebackground, activeborderwidth, + activeforeground, background, bd, bg, borderwidth, cursor, + disabledforeground, fg, font, foreground, postcommand, relief, + selectcolor, takefocus, tearoff, tearoffcommand, title, type.'u'Construct menu widget with the parent MASTER. + + Valid resource names: activebackground, activeborderwidth, + activeforeground, background, bd, bg, borderwidth, cursor, + disabledforeground, fg, font, foreground, postcommand, relief, + selectcolor, takefocus, tearoff, tearoffcommand, title, type.'b'menu'u'menu'b'Post the menu at position X,Y with entry ENTRY.'u'Post the menu at position X,Y with entry ENTRY.'b'tk_popup'u'tk_popup'b'Activate entry at INDEX.'u'Activate entry at INDEX.'b'Add hierarchical menu item.'u'Add hierarchical menu item.'b'cascade'u'cascade'b'Add checkbutton menu item.'u'Add checkbutton menu item.'b'Add command menu item.'u'Add command menu item.'b'Addd radio menu item.'u'Addd radio menu item.'b'radiobutton'u'radiobutton'b'Add separator.'u'Add separator.'b'separator'u'separator'b'Add hierarchical menu item at INDEX.'u'Add hierarchical menu item at INDEX.'b'Add checkbutton menu item at INDEX.'u'Add checkbutton menu item at INDEX.'b'Add command menu item at INDEX.'u'Add command menu item at INDEX.'b'Addd radio menu item at INDEX.'u'Addd radio menu item at INDEX.'b'Add separator at INDEX.'u'Add separator at INDEX.'b'Delete menu items between INDEX1 and INDEX2 (included).'u'Delete menu items between INDEX1 and INDEX2 (included).'b'Return the resource value of a menu item for OPTION at INDEX.'u'Return the resource value of a menu item for OPTION at INDEX.'b'entrycget'u'entrycget'b'Configure a menu item at INDEX.'u'Configure a menu item at INDEX.'b'entryconfigure'u'entryconfigure'b'Return the index of a menu item identified by INDEX.'u'Return the index of a menu item identified by INDEX.'b'Invoke a menu item identified by INDEX and execute + the associated command.'u'Invoke a menu item identified by INDEX and execute + the associated command.'b'Display a menu at position X,Y.'u'Display a menu at position X,Y.'b'post'u'post'b'Return the type of the menu item at INDEX.'u'Return the type of the menu item at INDEX.'b'Unmap a menu.'u'Unmap a menu.'b'unpost'u'unpost'b'Return the x-position of the leftmost pixel of the menu item + at INDEX.'u'Return the x-position of the leftmost pixel of the menu item + at INDEX.'b'xposition'u'xposition'b'Return the y-position of the topmost pixel of the menu item at INDEX.'u'Return the y-position of the topmost pixel of the menu item at INDEX.'b'yposition'u'yposition'b'Menubutton widget, obsolete since Tk8.0.'u'Menubutton widget, obsolete since Tk8.0.'b'menubutton'u'menubutton'b'Message widget to display multiline text. Obsolete since Label does it too.'u'Message widget to display multiline text. Obsolete since Label does it too.'b'message'u'message'b'Radiobutton widget which shows only one of several buttons in on-state.'u'Radiobutton widget which shows only one of several buttons in on-state.'b'Construct a radiobutton widget with the parent MASTER. + + Valid resource names: activebackground, activeforeground, anchor, + background, bd, bg, bitmap, borderwidth, command, cursor, + disabledforeground, fg, font, foreground, height, + highlightbackground, highlightcolor, highlightthickness, image, + indicatoron, justify, padx, pady, relief, selectcolor, selectimage, + state, takefocus, text, textvariable, underline, value, variable, + width, wraplength.'u'Construct a radiobutton widget with the parent MASTER. + + Valid resource names: activebackground, activeforeground, anchor, + background, bd, bg, bitmap, borderwidth, command, cursor, + disabledforeground, fg, font, foreground, height, + highlightbackground, highlightcolor, highlightthickness, image, + indicatoron, justify, padx, pady, relief, selectcolor, selectimage, + state, takefocus, text, textvariable, underline, value, variable, + width, wraplength.'b'Scale widget which can display a numerical scale.'u'Scale widget which can display a numerical scale.'b'Construct a scale widget with the parent MASTER. + + Valid resource names: activebackground, background, bigincrement, bd, + bg, borderwidth, command, cursor, digits, fg, font, foreground, from, + highlightbackground, highlightcolor, highlightthickness, label, + length, orient, relief, repeatdelay, repeatinterval, resolution, + showvalue, sliderlength, sliderrelief, state, takefocus, + tickinterval, to, troughcolor, variable, width.'u'Construct a scale widget with the parent MASTER. + + Valid resource names: activebackground, background, bigincrement, bd, + bg, borderwidth, command, cursor, digits, fg, font, foreground, from, + highlightbackground, highlightcolor, highlightthickness, label, + length, orient, relief, repeatdelay, repeatinterval, resolution, + showvalue, sliderlength, sliderrelief, state, takefocus, + tickinterval, to, troughcolor, variable, width.'b'Get the current value as integer or float.'u'Get the current value as integer or float.'b'Set the value to VALUE.'u'Set the value to VALUE.'b'Return a tuple (X,Y) of the point along the centerline of the + trough that corresponds to VALUE or the current value if None is + given.'u'Return a tuple (X,Y) of the point along the centerline of the + trough that corresponds to VALUE or the current value if None is + given.'b'Return where the point X,Y lies. Valid return values are "slider", + "though1" and "though2".'u'Return where the point X,Y lies. Valid return values are "slider", + "though1" and "though2".'b'identify'u'identify'b'Scrollbar widget which displays a slider at a certain position.'u'Scrollbar widget which displays a slider at a certain position.'b'Construct a scrollbar widget with the parent MASTER. + + Valid resource names: activebackground, activerelief, + background, bd, bg, borderwidth, command, cursor, + elementborderwidth, highlightbackground, + highlightcolor, highlightthickness, jump, orient, + relief, repeatdelay, repeatinterval, takefocus, + troughcolor, width.'u'Construct a scrollbar widget with the parent MASTER. + + Valid resource names: activebackground, activerelief, + background, bd, bg, borderwidth, command, cursor, + elementborderwidth, highlightbackground, + highlightcolor, highlightthickness, jump, orient, + relief, repeatdelay, repeatinterval, takefocus, + troughcolor, width.'b'scrollbar'u'scrollbar'b'Marks the element indicated by index as active. + The only index values understood by this method are "arrow1", + "slider", or "arrow2". If any other value is specified then no + element of the scrollbar will be active. If index is not specified, + the method returns the name of the element that is currently active, + or None if no element is active.'u'Marks the element indicated by index as active. + The only index values understood by this method are "arrow1", + "slider", or "arrow2". If any other value is specified then no + element of the scrollbar will be active. If index is not specified, + the method returns the name of the element that is currently active, + or None if no element is active.'b'Return the fractional change of the scrollbar setting if it + would be moved by DELTAX or DELTAY pixels.'u'Return the fractional change of the scrollbar setting if it + would be moved by DELTAX or DELTAY pixels.'b'Return the fractional value which corresponds to a slider + position of X,Y.'u'Return the fractional value which corresponds to a slider + position of X,Y.'b'fraction'u'fraction'b'Return the element under position X,Y as one of + "arrow1","slider","arrow2" or "".'u'Return the element under position X,Y as one of + "arrow1","slider","arrow2" or "".'b'Return the current fractional values (upper and lower end) + of the slider position.'u'Return the current fractional values (upper and lower end) + of the slider position.'b'Set the fractional values of the slider position (upper and + lower ends as value between 0 and 1).'u'Set the fractional values of the slider position (upper and + lower ends as value between 0 and 1).'b'Text widget which can display text in various forms.'u'Text widget which can display text in various forms.'b'Construct a text widget with the parent MASTER. + + STANDARD OPTIONS + + background, borderwidth, cursor, + exportselection, font, foreground, + highlightbackground, highlightcolor, + highlightthickness, insertbackground, + insertborderwidth, insertofftime, + insertontime, insertwidth, padx, pady, + relief, selectbackground, + selectborderwidth, selectforeground, + setgrid, takefocus, + xscrollcommand, yscrollcommand, + + WIDGET-SPECIFIC OPTIONS + + autoseparators, height, maxundo, + spacing1, spacing2, spacing3, + state, tabs, undo, width, wrap, + + 'u'Construct a text widget with the parent MASTER. + + STANDARD OPTIONS + + background, borderwidth, cursor, + exportselection, font, foreground, + highlightbackground, highlightcolor, + highlightthickness, insertbackground, + insertborderwidth, insertofftime, + insertontime, insertwidth, padx, pady, + relief, selectbackground, + selectborderwidth, selectforeground, + setgrid, takefocus, + xscrollcommand, yscrollcommand, + + WIDGET-SPECIFIC OPTIONS + + autoseparators, height, maxundo, + spacing1, spacing2, spacing3, + state, tabs, undo, width, wrap, + + 'b'Return a tuple of (x,y,width,height) which gives the bounding + box of the visible part of the character at the given index.'u'Return a tuple of (x,y,width,height) which gives the bounding + box of the visible part of the character at the given index.'b'Return whether between index INDEX1 and index INDEX2 the + relation OP is satisfied. OP is one of <, <=, ==, >=, >, or !=.'u'Return whether between index INDEX1 and index INDEX2 the + relation OP is satisfied. OP is one of <, <=, ==, >=, >, or !=.'b'compare'u'compare'b'Counts the number of relevant things between the two indices. + If index1 is after index2, the result will be a negative number + (and this holds for each of the possible options). + + The actual items which are counted depends on the options given by + args. The result is a list of integers, one for the result of each + counting option given. Valid counting options are "chars", + "displaychars", "displayindices", "displaylines", "indices", + "lines", "xpixels" and "ypixels". There is an additional possible + option "update", which if given then all subsequent options ensure + that any possible out of date information is recalculated.'u'Counts the number of relevant things between the two indices. + If index1 is after index2, the result will be a negative number + (and this holds for each of the possible options). + + The actual items which are counted depends on the options given by + args. The result is a list of integers, one for the result of each + counting option given. Valid counting options are "chars", + "displaychars", "displayindices", "displaylines", "indices", + "lines", "xpixels" and "ypixels". There is an additional possible + option "update", which if given then all subsequent options ensure + that any possible out of date information is recalculated.'b'count'u'count'b'Turn on the internal consistency checks of the B-Tree inside the text + widget according to BOOLEAN.'u'Turn on the internal consistency checks of the B-Tree inside the text + widget according to BOOLEAN.'b'debug'u'debug'b'Delete the characters between INDEX1 and INDEX2 (not included).'u'Delete the characters between INDEX1 and INDEX2 (not included).'b'Return tuple (x,y,width,height,baseline) giving the bounding box + and baseline position of the visible part of the line containing + the character at INDEX.'u'Return tuple (x,y,width,height,baseline) giving the bounding box + and baseline position of the visible part of the line containing + the character at INDEX.'b'dlineinfo'u'dlineinfo'b'Return the contents of the widget between index1 and index2. + + The type of contents returned in filtered based on the keyword + parameters; if 'all', 'image', 'mark', 'tag', 'text', or 'window' are + given and true, then the corresponding items are returned. The result + is a list of triples of the form (key, value, index). If none of the + keywords are true then 'all' is used by default. + + If the 'command' argument is given, it is called once for each element + of the list of triples, with the values of each triple serving as the + arguments to the function. In this case the list is not returned.'u'Return the contents of the widget between index1 and index2. + + The type of contents returned in filtered based on the keyword + parameters; if 'all', 'image', 'mark', 'tag', 'text', or 'window' are + given and true, then the corresponding items are returned. The result + is a list of triples of the form (key, value, index). If none of the + keywords are true then 'all' is used by default. + + If the 'command' argument is given, it is called once for each element + of the list of triples, with the values of each triple serving as the + arguments to the function. In this case the list is not returned.'b'-command'u'-command'b'Internal method + + This method controls the undo mechanism and + the modified flag. The exact behavior of the + command depends on the option argument that + follows the edit argument. The following forms + of the command are currently supported: + + edit_modified, edit_redo, edit_reset, edit_separator + and edit_undo + + 'u'Internal method + + This method controls the undo mechanism and + the modified flag. The exact behavior of the + command depends on the option argument that + follows the edit argument. The following forms + of the command are currently supported: + + edit_modified, edit_redo, edit_reset, edit_separator + and edit_undo + + 'b'edit'u'edit'b'Get or Set the modified flag + + If arg is not specified, returns the modified + flag of the widget. The insert, delete, edit undo and + edit redo commands or the user can set or clear the + modified flag. If boolean is specified, sets the + modified flag of the widget to arg. + 'u'Get or Set the modified flag + + If arg is not specified, returns the modified + flag of the widget. The insert, delete, edit undo and + edit redo commands or the user can set or clear the + modified flag. If boolean is specified, sets the + modified flag of the widget to arg. + 'b'modified'u'modified'b'Redo the last undone edit + + When the undo option is true, reapplies the last + undone edits provided no other edits were done since + then. Generates an error when the redo stack is empty. + Does nothing when the undo option is false. + 'u'Redo the last undone edit + + When the undo option is true, reapplies the last + undone edits provided no other edits were done since + then. Generates an error when the redo stack is empty. + Does nothing when the undo option is false. + 'b'redo'u'redo'b'Clears the undo and redo stacks + 'u'Clears the undo and redo stacks + 'b'reset'u'reset'b'Inserts a separator (boundary) on the undo stack. + + Does nothing when the undo option is false + 'u'Inserts a separator (boundary) on the undo stack. + + Does nothing when the undo option is false + 'b'Undoes the last edit action + + If the undo option is true. An edit action is defined + as all the insert and delete commands that are recorded + on the undo stack in between two separators. Generates + an error when the undo stack is empty. Does nothing + when the undo option is false + 'u'Undoes the last edit action + + If the undo option is true. An edit action is defined + as all the insert and delete commands that are recorded + on the undo stack in between two separators. Generates + an error when the undo stack is empty. Does nothing + when the undo option is false + 'b'undo'u'undo'b'Return the text from INDEX1 to INDEX2 (not included).'u'Return the text from INDEX1 to INDEX2 (not included).'b'Return the value of OPTION of an embedded image at INDEX.'u'Return the value of OPTION of an embedded image at INDEX.'b'Configure an embedded image at INDEX.'u'Configure an embedded image at INDEX.'b'Create an embedded image at INDEX.'u'Create an embedded image at INDEX.'b'Return all names of embedded images in this widget.'u'Return all names of embedded images in this widget.'b'Return the index in the form line.char for INDEX.'u'Return the index in the form line.char for INDEX.'b'Insert CHARS before the characters at INDEX. An additional + tag can be given in ARGS. Additional CHARS and tags can follow in ARGS.'u'Insert CHARS before the characters at INDEX. An additional + tag can be given in ARGS. Additional CHARS and tags can follow in ARGS.'b'Change the gravity of a mark MARKNAME to DIRECTION (LEFT or RIGHT). + Return the current value if None is given for DIRECTION.'u'Change the gravity of a mark MARKNAME to DIRECTION (LEFT or RIGHT). + Return the current value if None is given for DIRECTION.'b'gravity'u'gravity'b'Return all mark names.'u'Return all mark names.'b'Set mark MARKNAME before the character at INDEX.'u'Set mark MARKNAME before the character at INDEX.'b'Delete all marks in MARKNAMES.'u'Delete all marks in MARKNAMES.'b'unset'u'unset'b'Return the name of the next mark after INDEX.'u'Return the name of the next mark after INDEX.'b'next'u'next'b'Return the name of the previous mark before INDEX.'u'Return the name of the previous mark before INDEX.'b'previous'u'previous'b'Creates a peer text widget with the given newPathName, and any + optional standard configuration options. By default the peer will + have the same start and end line as the parent widget, but + these can be overridden with the standard configuration options.'u'Creates a peer text widget with the given newPathName, and any + optional standard configuration options. By default the peer will + have the same start and end line as the parent widget, but + these can be overridden with the standard configuration options.'b'peer'u'peer'b'Returns a list of peers of this widget (this does not include + the widget itself).'u'Returns a list of peers of this widget (this does not include + the widget itself).'b'Replaces the range of characters between index1 and index2 with + the given characters and tags specified by args. + + See the method insert for some more information about args, and the + method delete for information about the indices.'u'Replaces the range of characters between index1 and index2 with + the given characters and tags specified by args. + + See the method insert for some more information about args, and the + method delete for information about the indices.'b'replace'u'replace'b'Adjust the view of the text to 10 times the + difference between X and Y and the coordinates given in + scan_mark.'u'Adjust the view of the text to 10 times the + difference between X and Y and the coordinates given in + scan_mark.'b'Search PATTERN beginning from INDEX until STOPINDEX. + Return the index of the first character of a match or an + empty string.'u'Search PATTERN beginning from INDEX until STOPINDEX. + Return the index of the first character of a match or an + empty string.'b'search'u'search'b'-forwards'u'-forwards'b'-backwards'u'-backwards'b'-exact'u'-exact'b'-regexp'u'-regexp'b'-nocase'u'-nocase'b'-elide'u'-elide'b'-count'u'-count'b'Scroll such that the character at INDEX is visible.'u'Scroll such that the character at INDEX is visible.'b'Add tag TAGNAME to all characters between INDEX1 and index2 in ARGS. + Additional pairs of indices may follow in ARGS.'u'Add tag TAGNAME to all characters between INDEX1 and index2 in ARGS. + Additional pairs of indices may follow in ARGS.'b'Unbind for all characters with TAGNAME for event SEQUENCE the + function identified with FUNCID.'u'Unbind for all characters with TAGNAME for event SEQUENCE the + function identified with FUNCID.'b'Bind to all characters with TAGNAME at event SEQUENCE a call to function FUNC. + + An additional boolean parameter ADD specifies whether FUNC will be + called additionally to the other bound function or whether it will + replace the previous function. See bind for the return value.'u'Bind to all characters with TAGNAME at event SEQUENCE a call to function FUNC. + + An additional boolean parameter ADD specifies whether FUNC will be + called additionally to the other bound function or whether it will + replace the previous function. See bind for the return value.'b'Return the value of OPTION for tag TAGNAME.'u'Return the value of OPTION for tag TAGNAME.'b'Configure a tag TAGNAME.'u'Configure a tag TAGNAME.'b'Delete all tags in TAGNAMES.'u'Delete all tags in TAGNAMES.'b'Change the priority of tag TAGNAME such that it is lower + than the priority of BELOWTHIS.'u'Change the priority of tag TAGNAME such that it is lower + than the priority of BELOWTHIS.'b'Return a list of all tag names.'u'Return a list of all tag names.'b'Return a list of start and end index for the first sequence of + characters between INDEX1 and INDEX2 which all have tag TAGNAME. + The text is searched forward from INDEX1.'u'Return a list of start and end index for the first sequence of + characters between INDEX1 and INDEX2 which all have tag TAGNAME. + The text is searched forward from INDEX1.'b'nextrange'u'nextrange'b'Return a list of start and end index for the first sequence of + characters between INDEX1 and INDEX2 which all have tag TAGNAME. + The text is searched backwards from INDEX1.'u'Return a list of start and end index for the first sequence of + characters between INDEX1 and INDEX2 which all have tag TAGNAME. + The text is searched backwards from INDEX1.'b'prevrange'u'prevrange'b'Change the priority of tag TAGNAME such that it is higher + than the priority of ABOVETHIS.'u'Change the priority of tag TAGNAME such that it is higher + than the priority of ABOVETHIS.'b'Return a list of ranges of text which have tag TAGNAME.'u'Return a list of ranges of text which have tag TAGNAME.'b'ranges'u'ranges'b'Remove tag TAGNAME from all characters between INDEX1 and INDEX2.'u'Remove tag TAGNAME from all characters between INDEX1 and INDEX2.'b'Return the value of OPTION of an embedded window at INDEX.'u'Return the value of OPTION of an embedded window at INDEX.'b'Configure an embedded window at INDEX.'u'Configure an embedded window at INDEX.'b'Create a window at INDEX.'u'Create a window at INDEX.'b'Return all names of embedded windows in this widget.'u'Return all names of embedded windows in this widget.'b'Obsolete function, use see.'u'Obsolete function, use see.'b'-pickplace'u'-pickplace'b'Internal class. It wraps the command in the widget OptionMenu.'u'Internal class. It wraps the command in the widget OptionMenu.'b'OptionMenu which allows the user to select a value from a menu.'u'OptionMenu which allows the user to select a value from a menu.'b'Construct an optionmenu widget with the parent MASTER, with + the resource textvariable set to VARIABLE, the initially selected + value VALUE, the other menu values VALUES and an additional + keyword argument command.'u'Construct an optionmenu widget with the parent MASTER, with + the resource textvariable set to VARIABLE, the initially selected + value VALUE, the other menu values VALUES and an additional + keyword argument command.'b'borderwidth'u'borderwidth'b'textvariable'u'textvariable'b'indicatoron'u'indicatoron'b'relief'u'relief'b'c'u'c'b'highlightthickness'u'highlightthickness'b'tk_optionMenu'u'tk_optionMenu'b'unknown option -'u'unknown option -'b'Destroy this widget and the associated menu.'u'Destroy this widget and the associated menu.'b'Base class for images.'u'Base class for images.'b'create image'u'create image'b'pyimage%r'u'pyimage%r'b'Configure the image.'u'Configure the image.'b'config'u'config'b'Return the height of the image.'u'Return the height of the image.'b'Return the type of the image, e.g. "photo" or "bitmap".'u'Return the type of the image, e.g. "photo" or "bitmap".'b'Return the width of the image.'u'Return the width of the image.'b'Widget which can display images in PGM, PPM, GIF, PNG format.'u'Widget which can display images in PGM, PPM, GIF, PNG format.'b'Create an image with NAME. + + Valid resource names: data, format, file, gamma, height, palette, + width.'u'Create an image with NAME. + + Valid resource names: data, format, file, gamma, height, palette, + width.'b'photo'u'photo'b'Display a transparent image.'u'Display a transparent image.'b'blank'u'blank'b'Return the value of OPTION.'u'Return the value of OPTION.'b'Return a new PhotoImage with the same image as this widget.'u'Return a new PhotoImage with the same image as this widget.'b'copy'u'copy'b'Return a new PhotoImage with the same image as this widget + but zoom it with a factor of x in the X direction and y in the Y + direction. If y is not given, the default value is the same as x. + 'u'Return a new PhotoImage with the same image as this widget + but zoom it with a factor of x in the X direction and y in the Y + direction. If y is not given, the default value is the same as x. + 'b'-zoom'u'-zoom'b'Return a new PhotoImage based on the same image as this widget + but use only every Xth or Yth pixel. If y is not given, the + default value is the same as x. + 'u'Return a new PhotoImage based on the same image as this widget + but use only every Xth or Yth pixel. If y is not given, the + default value is the same as x. + 'b'-subsample'u'-subsample'b'Return the color (red, green, blue) of the pixel at X,Y.'u'Return the color (red, green, blue) of the pixel at X,Y.'b'Put row formatted colors to image starting from + position TO, e.g. image.put("{red green} {blue yellow}", to=(4,6))'u'Put row formatted colors to image starting from + position TO, e.g. image.put("{red green} {blue yellow}", to=(4,6))'b'put'u'put'b'-to'u'-to'b'Write image to file FILENAME in FORMAT starting from + position FROM_COORDS.'u'Write image to file FILENAME in FORMAT starting from + position FROM_COORDS.'b'write'u'write'b'-format'u'-format'b'-from'u'-from'b'Return True if the pixel at x,y is transparent.'u'Return True if the pixel at x,y is transparent.'b'transparency'u'transparency'b'Set the transparency of the pixel at x,y.'u'Set the transparency of the pixel at x,y.'b'Widget which can display images in XBM format.'u'Widget which can display images in XBM format.'b'Create a bitmap with NAME. + + Valid resource names: background, data, file, foreground, maskdata, maskfile.'u'Create a bitmap with NAME. + + Valid resource names: background, data, file, foreground, maskdata, maskfile.'b'use image_names()'u'use image_names()'b'use image_types()'u'use image_types()'b'spinbox widget.'u'spinbox widget.'b'Construct a spinbox widget with the parent MASTER. + + STANDARD OPTIONS + + activebackground, background, borderwidth, + cursor, exportselection, font, foreground, + highlightbackground, highlightcolor, + highlightthickness, insertbackground, + insertborderwidth, insertofftime, + insertontime, insertwidth, justify, relief, + repeatdelay, repeatinterval, + selectbackground, selectborderwidth + selectforeground, takefocus, textvariable + xscrollcommand. + + WIDGET-SPECIFIC OPTIONS + + buttonbackground, buttoncursor, + buttondownrelief, buttonuprelief, + command, disabledbackground, + disabledforeground, format, from, + invalidcommand, increment, + readonlybackground, state, to, + validate, validatecommand values, + width, wrap, + 'u'Construct a spinbox widget with the parent MASTER. + + STANDARD OPTIONS + + activebackground, background, borderwidth, + cursor, exportselection, font, foreground, + highlightbackground, highlightcolor, + highlightthickness, insertbackground, + insertborderwidth, insertofftime, + insertontime, insertwidth, justify, relief, + repeatdelay, repeatinterval, + selectbackground, selectborderwidth + selectforeground, takefocus, textvariable + xscrollcommand. + + WIDGET-SPECIFIC OPTIONS + + buttonbackground, buttoncursor, + buttondownrelief, buttonuprelief, + command, disabledbackground, + disabledforeground, format, from, + invalidcommand, increment, + readonlybackground, state, to, + validate, validatecommand values, + width, wrap, + 'b'spinbox'u'spinbox'b'Return a tuple of X1,Y1,X2,Y2 coordinates for a + rectangle which encloses the character given by index. + + The first two elements of the list give the x and y + coordinates of the upper-left corner of the screen + area covered by the character (in pixels relative + to the widget) and the last two elements give the + width and height of the character, in pixels. The + bounding box may refer to a region outside the + visible area of the window. + 'u'Return a tuple of X1,Y1,X2,Y2 coordinates for a + rectangle which encloses the character given by index. + + The first two elements of the list give the x and y + coordinates of the upper-left corner of the screen + area covered by the character (in pixels relative + to the widget) and the last two elements give the + width and height of the character, in pixels. The + bounding box may refer to a region outside the + visible area of the window. + 'b'Delete one or more elements of the spinbox. + + First is the index of the first character to delete, + and last is the index of the character just after + the last one to delete. If last isn't specified it + defaults to first+1, i.e. a single character is + deleted. This command returns an empty string. + 'u'Delete one or more elements of the spinbox. + + First is the index of the first character to delete, + and last is the index of the character just after + the last one to delete. If last isn't specified it + defaults to first+1, i.e. a single character is + deleted. This command returns an empty string. + 'b'Returns the spinbox's string'u'Returns the spinbox's string'b'Alter the position of the insertion cursor. + + The insertion cursor will be displayed just before + the character given by index. Returns an empty string + 'u'Alter the position of the insertion cursor. + + The insertion cursor will be displayed just before + the character given by index. Returns an empty string + 'b'Returns the name of the widget at position x, y + + Return value is one of: none, buttondown, buttonup, entry + 'u'Returns the name of the widget at position x, y + + Return value is one of: none, buttondown, buttonup, entry + 'b'Returns the numerical index corresponding to index + 'u'Returns the numerical index corresponding to index + 'b'Insert string s at index + + Returns an empty string. + 'u'Insert string s at index + + Returns an empty string. + 'b'Causes the specified element to be invoked + + The element could be buttondown or buttonup + triggering the action associated with it. + 'u'Causes the specified element to be invoked + + The element could be buttondown or buttonup + triggering the action associated with it. + 'b'Records x and the current view in the spinbox window; + + used in conjunction with later scan dragto commands. + Typically this command is associated with a mouse button + press in the widget. It returns an empty string. + 'u'Records x and the current view in the spinbox window; + + used in conjunction with later scan dragto commands. + Typically this command is associated with a mouse button + press in the widget. It returns an empty string. + 'b'Compute the difference between the given x argument + and the x argument to the last scan mark command + + It then adjusts the view left or right by 10 times the + difference in x-coordinates. This command is typically + associated with mouse motion events in the widget, to + produce the effect of dragging the spinbox at high speed + through the window. The return value is an empty string. + 'u'Compute the difference between the given x argument + and the x argument to the last scan mark command + + It then adjusts the view left or right by 10 times the + difference in x-coordinates. This command is typically + associated with mouse motion events in the widget, to + produce the effect of dragging the spinbox at high speed + through the window. The return value is an empty string. + 'b'Locate the end of the selection nearest to the character + given by index, + + Then adjust that end of the selection to be at index + (i.e including but not going beyond index). The other + end of the selection is made the anchor point for future + select to commands. If the selection isn't currently in + the spinbox, then a new selection is created to include + the characters between index and the most recent selection + anchor point, inclusive. + 'u'Locate the end of the selection nearest to the character + given by index, + + Then adjust that end of the selection to be at index + (i.e including but not going beyond index). The other + end of the selection is made the anchor point for future + select to commands. If the selection isn't currently in + the spinbox, then a new selection is created to include + the characters between index and the most recent selection + anchor point, inclusive. + 'b'Clear the selection + + If the selection isn't in this widget then the + command has no effect. + 'u'Clear the selection + + If the selection isn't in this widget then the + command has no effect. + 'b'Sets or gets the currently selected element. + + If a spinbutton element is specified, it will be + displayed depressed. + 'u'Sets or gets the currently selected element. + + If a spinbutton element is specified, it will be + displayed depressed. + 'b'element'u'element'b'Return True if there are characters selected in the spinbox, False + otherwise.'u'Return True if there are characters selected in the spinbox, False + otherwise.'b'labelframe widget.'u'labelframe widget.'b'Construct a labelframe widget with the parent MASTER. + + STANDARD OPTIONS + + borderwidth, cursor, font, foreground, + highlightbackground, highlightcolor, + highlightthickness, padx, pady, relief, + takefocus, text + + WIDGET-SPECIFIC OPTIONS + + background, class, colormap, container, + height, labelanchor, labelwidget, + visual, width + 'u'Construct a labelframe widget with the parent MASTER. + + STANDARD OPTIONS + + borderwidth, cursor, font, foreground, + highlightbackground, highlightcolor, + highlightthickness, padx, pady, relief, + takefocus, text + + WIDGET-SPECIFIC OPTIONS + + background, class, colormap, container, + height, labelanchor, labelwidget, + visual, width + 'b'labelframe'u'labelframe'b'panedwindow widget.'u'panedwindow widget.'b'Construct a panedwindow widget with the parent MASTER. + + STANDARD OPTIONS + + background, borderwidth, cursor, height, + orient, relief, width + + WIDGET-SPECIFIC OPTIONS + + handlepad, handlesize, opaqueresize, + sashcursor, sashpad, sashrelief, + sashwidth, showhandle, + 'u'Construct a panedwindow widget with the parent MASTER. + + STANDARD OPTIONS + + background, borderwidth, cursor, height, + orient, relief, width + + WIDGET-SPECIFIC OPTIONS + + handlepad, handlesize, opaqueresize, + sashcursor, sashpad, sashrelief, + sashwidth, showhandle, + 'b'panedwindow'u'panedwindow'b'Add a child widget to the panedwindow in a new pane. + + The child argument is the name of the child widget + followed by pairs of arguments that specify how to + manage the windows. The possible options and values + are the ones accepted by the paneconfigure method. + 'u'Add a child widget to the panedwindow in a new pane. + + The child argument is the name of the child widget + followed by pairs of arguments that specify how to + manage the windows. The possible options and values + are the ones accepted by the paneconfigure method. + 'b'Remove the pane containing child from the panedwindow + + All geometry management options for child will be forgotten. + 'u'Remove the pane containing child from the panedwindow + + All geometry management options for child will be forgotten. + 'b'Identify the panedwindow component at point x, y + + If the point is over a sash or a sash handle, the result + is a two element list containing the index of the sash or + handle, and a word indicating whether it is over a sash + or a handle, such as {0 sash} or {2 handle}. If the point + is over any other part of the panedwindow, the result is + an empty list. + 'u'Identify the panedwindow component at point x, y + + If the point is over a sash or a sash handle, the result + is a two element list containing the index of the sash or + handle, and a word indicating whether it is over a sash + or a handle, such as {0 sash} or {2 handle}. If the point + is over any other part of the panedwindow, the result is + an empty list. + 'b'proxy'u'proxy'b'Return the x and y pair of the most recent proxy location + 'u'Return the x and y pair of the most recent proxy location + 'b'coord'u'coord'b'Remove the proxy from the display. + 'u'Remove the proxy from the display. + 'b'Place the proxy at the given x and y coordinates. + 'u'Place the proxy at the given x and y coordinates. + 'b'sash'u'sash'b'Return the current x and y pair for the sash given by index. + + Index must be an integer between 0 and 1 less than the + number of panes in the panedwindow. The coordinates given are + those of the top left corner of the region containing the sash. + pathName sash dragto index x y This command computes the + difference between the given coordinates and the coordinates + given to the last sash coord command for the given sash. It then + moves that sash the computed difference. The return value is the + empty string. + 'u'Return the current x and y pair for the sash given by index. + + Index must be an integer between 0 and 1 less than the + number of panes in the panedwindow. The coordinates given are + those of the top left corner of the region containing the sash. + pathName sash dragto index x y This command computes the + difference between the given coordinates and the coordinates + given to the last sash coord command for the given sash. It then + moves that sash the computed difference. The return value is the + empty string. + 'b'Records x and y for the sash given by index; + + Used in conjunction with later dragto commands to move the sash. + 'u'Records x and y for the sash given by index; + + Used in conjunction with later dragto commands to move the sash. + 'b'Place the sash given by index at the given coordinates + 'u'Place the sash given by index at the given coordinates + 'b'Query a management option for window. + + Option may be any value allowed by the paneconfigure subcommand + 'u'Query a management option for window. + + Option may be any value allowed by the paneconfigure subcommand + 'b'panecget'u'panecget'b'Query or modify the management options for window. + + If no option is specified, returns a list describing all + of the available options for pathName. If option is + specified with no value, then the command returns a list + describing the one named option (this list will be identical + to the corresponding sublist of the value returned if no + option is specified). If one or more option-value pairs are + specified, then the command modifies the given widget + option(s) to have the given value(s); in this case the + command returns an empty string. The following options + are supported: + + after window + Insert the window after the window specified. window + should be the name of a window already managed by pathName. + before window + Insert the window before the window specified. window + should be the name of a window already managed by pathName. + height size + Specify a height for the window. The height will be the + outer dimension of the window including its border, if + any. If size is an empty string, or if -height is not + specified, then the height requested internally by the + window will be used initially; the height may later be + adjusted by the movement of sashes in the panedwindow. + Size may be any value accepted by Tk_GetPixels. + minsize n + Specifies that the size of the window cannot be made + less than n. This constraint only affects the size of + the widget in the paned dimension -- the x dimension + for horizontal panedwindows, the y dimension for + vertical panedwindows. May be any value accepted by + Tk_GetPixels. + padx n + Specifies a non-negative value indicating how much + extra space to leave on each side of the window in + the X-direction. The value may have any of the forms + accepted by Tk_GetPixels. + pady n + Specifies a non-negative value indicating how much + extra space to leave on each side of the window in + the Y-direction. The value may have any of the forms + accepted by Tk_GetPixels. + sticky style + If a window's pane is larger than the requested + dimensions of the window, this option may be used + to position (or stretch) the window within its pane. + Style is a string that contains zero or more of the + characters n, s, e or w. The string can optionally + contains spaces or commas, but they are ignored. Each + letter refers to a side (north, south, east, or west) + that the window will "stick" to. If both n and s + (or e and w) are specified, the window will be + stretched to fill the entire height (or width) of + its cavity. + width size + Specify a width for the window. The width will be + the outer dimension of the window including its + border, if any. If size is an empty string, or + if -width is not specified, then the width requested + internally by the window will be used initially; the + width may later be adjusted by the movement of sashes + in the panedwindow. Size may be any value accepted by + Tk_GetPixels. + + 'u'Query or modify the management options for window. + + If no option is specified, returns a list describing all + of the available options for pathName. If option is + specified with no value, then the command returns a list + describing the one named option (this list will be identical + to the corresponding sublist of the value returned if no + option is specified). If one or more option-value pairs are + specified, then the command modifies the given widget + option(s) to have the given value(s); in this case the + command returns an empty string. The following options + are supported: + + after window + Insert the window after the window specified. window + should be the name of a window already managed by pathName. + before window + Insert the window before the window specified. window + should be the name of a window already managed by pathName. + height size + Specify a height for the window. The height will be the + outer dimension of the window including its border, if + any. If size is an empty string, or if -height is not + specified, then the height requested internally by the + window will be used initially; the height may later be + adjusted by the movement of sashes in the panedwindow. + Size may be any value accepted by Tk_GetPixels. + minsize n + Specifies that the size of the window cannot be made + less than n. This constraint only affects the size of + the widget in the paned dimension -- the x dimension + for horizontal panedwindows, the y dimension for + vertical panedwindows. May be any value accepted by + Tk_GetPixels. + padx n + Specifies a non-negative value indicating how much + extra space to leave on each side of the window in + the X-direction. The value may have any of the forms + accepted by Tk_GetPixels. + pady n + Specifies a non-negative value indicating how much + extra space to leave on each side of the window in + the Y-direction. The value may have any of the forms + accepted by Tk_GetPixels. + sticky style + If a window's pane is larger than the requested + dimensions of the window, this option may be used + to position (or stretch) the window within its pane. + Style is a string that contains zero or more of the + characters n, s, e or w. The string can optionally + contains spaces or commas, but they are ignored. Each + letter refers to a side (north, south, east, or west) + that the window will "stick" to. If both n and s + (or e and w) are specified, the window will be + stretched to fill the entire height (or width) of + its cavity. + width size + Specify a width for the window. The width will be + the outer dimension of the window including its + border, if any. If size is an empty string, or + if -width is not specified, then the width requested + internally by the window will be used initially; the + width may later be adjusted by the movement of sashes + in the panedwindow. Size may be any value accepted by + Tk_GetPixels. + + 'b'paneconfigure'u'paneconfigure'b'Returns an ordered list of the child panes.'u'Returns an ordered list of the child panes.'b'panes'u'panes'b'This is Tcl/Tk version %s'u'This is Tcl/Tk version %s'b' +This should be a cedilla: ç'u' +This should be a cedilla: ç'b'Click me!'u'Click me!'b'[%s]'u'[%s]'b'QUIT'u'QUIT'u'tkinter.__init__'u'tkinter'Execute computations asynchronously using threads or processes.Brian Quinlan (brian@sweetapp.com)__author__concurrent.futures._baseFIRST_COMPLETEDFIRST_EXCEPTIONALL_COMPLETEDCancelledErrorInvalidStateErrorBrokenExecutorFutureExecutorwaitas_completedProcessPoolExecutorThreadPoolExecutorprocesspethreadtemodule has no attribute # Copyright 2009 Brian Quinlan. All Rights Reserved.b'Execute computations asynchronously using threads or processes.'u'Execute computations asynchronously using threads or processes.'b'Brian Quinlan (brian@sweetapp.com)'u'Brian Quinlan (brian@sweetapp.com)'b'FIRST_COMPLETED'u'FIRST_COMPLETED'b'FIRST_EXCEPTION'u'FIRST_EXCEPTION'b'ALL_COMPLETED'u'ALL_COMPLETED'b'CancelledError'u'CancelledError'b'TimeoutError'u'TimeoutError'b'BrokenExecutor'u'BrokenExecutor'b'Future'u'Future'b'Executor'u'Executor'b'wait'u'wait'b'as_completed'u'as_completed'b'ProcessPoolExecutor'u'ProcessPoolExecutor'b'ThreadPoolExecutor'u'ThreadPoolExecutor'b'__author__'u'__author__'b'__doc__'u'__doc__'b'module 'u'module 'b' has no attribute 'u' has no attribute 'u'concurrent.futures.__init__'u'concurrent.futures'u'concurrent'u'futures.__init__'u'futures'A package for parsing, handling, and generating email messages.base64mimecharsetencodersfeedparserheaderiteratorsmessage_from_filemessage_from_binary_filemessage_from_stringmessage_from_bytesmimequoprimimeutilskwsParse a string into a Message object model. + + Optional _class and strict are passed to the Parser constructor. + email.parserParserparsestrParse a bytes string into a Message object model. + + Optional _class and strict are passed to the Parser constructor. + BytesParserparsebytesfpRead a file and parse its contents into a Message object model. + + Optional _class and strict are passed to the Parser constructor. + Read a binary file and parse its contents into a Message object model. + + Optional _class and strict are passed to the Parser constructor. + # Copyright (C) 2001-2007 Python Software Foundation# Author: Barry Warsaw# Contact: email-sig@python.org# Some convenience routines. Don't import Parser and Message as side-effects# of importing email since those cascadingly import most of the rest of the# email package.b'A package for parsing, handling, and generating email messages.'u'A package for parsing, handling, and generating email messages.'b'base64mime'u'base64mime'b'charset'u'charset'b'encoders'u'encoders'b'errors'u'errors'b'feedparser'u'feedparser'b'generator'u'generator'b'header'u'header'b'iterators'u'iterators'b'message_from_file'u'message_from_file'b'message_from_binary_file'u'message_from_binary_file'b'message_from_string'u'message_from_string'b'message_from_bytes'u'message_from_bytes'b'mime'u'mime'b'parser'u'parser'b'quoprimime'u'quoprimime'b'utils'u'utils'b'Parse a string into a Message object model. + + Optional _class and strict are passed to the Parser constructor. + 'u'Parse a string into a Message object model. + + Optional _class and strict are passed to the Parser constructor. + 'b'Parse a bytes string into a Message object model. + + Optional _class and strict are passed to the Parser constructor. + 'u'Parse a bytes string into a Message object model. + + Optional _class and strict are passed to the Parser constructor. + 'b'Read a file and parse its contents into a Message object model. + + Optional _class and strict are passed to the Parser constructor. + 'u'Read a file and parse its contents into a Message object model. + + Optional _class and strict are passed to the Parser constructor. + 'b'Read a binary file and parse its contents into a Message object model. + + Optional _class and strict are passed to the Parser constructor. + 'u'Read a binary file and parse its contents into a Message object model. + + Optional _class and strict are passed to the Parser constructor. + 'u'email.__init__'u'email'# Dummy file to make this directory a package.distutils + +The main package for the Python Module Distribution Utilities. Normally +used from a setup script as + + from distutils.core import setup + + setup (...) +b'distutils + +The main package for the Python Module Distribution Utilities. Normally +used from a setup script as + + from distutils.core import setup + + setup (...) +'u'distutils + +The main package for the Python Module Distribution Utilities. Normally +used from a setup script as + + from distutils.core import setup + + setup (...) +'u'distutils.__init__'u'distutils'The pgen2 package.# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.b'The pgen2 package.'u'The pgen2 package.'u'lib2to3.pgen2.__init__'u'lib2to3.pgen2'u'lib2to3'u'pgen2.__init__'u'pgen2'#emptyu'lib2to3.__init__' Standard "encodings" Package + + Standard Python encoding modules are stored in this package + directory. + + Codec modules must have names corresponding to normalized encoding + names as defined in the normalize_encoding() function below, e.g. + 'utf-8' must be implemented by the module 'utf_8.py'. + + Each codec module must export the following interface: + + * getregentry() -> codecs.CodecInfo object + The getregentry() API must return a CodecInfo object with encoder, decoder, + incrementalencoder, incrementaldecoder, streamwriter and streamreader + attributes which adhere to the Python Codec Interface Standard. + + In addition, a module may optionally also define the following + APIs which are then used by the package's codec search function: + + * getaliases() -> sequence of encoding name strings to use as aliases + + Alias names returned by getaliases() must be normalized encoding + names as defined by normalize_encoding(). + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +codecsaliases--unknown--_unknown_import_tail_aliasesCodecRegistryErrornormalize_encoding Normalize an encoding name. + + Normalization works as follows: all non-alphanumeric + characters except the dot used for Python package names are + collapsed and replaced with a single underscore, e.g. ' -;#' + becomes '_'. Leading and trailing underscores are removed. + + Note that encoding names should be ASCII only. + + punctsearch_functionnorm_encodingaliased_encodingmodnamesmodnameencodings.fromlistmodgetregentryCodecInfomodule "%s" (%s) failed to registerincompatible codecs in module "%s" (%s)getaliasescodecaliasesaliaswin32_alias_mbcs_winapicp%sGetACPansi_code_pageencodings.mbcsencodingsmbcs#"# Cache lookup# Import the module:# First try to find an alias for the normalized encoding# name and lookup the module using the aliased name, then try to# lookup the module using the standard import scheme, i.e. first# try in the encodings package, then at top-level.# Import is absolute to prevent the possibly malicious import of a# module with side-effects that is not in the 'encodings' package.# ImportError may occur because 'encodings.(modname)' does not exist,# or because it imports a name that does not exist (see mbcs and oem)# Not a codec module# Cache misses# Now ask the module for the registry entry# Cache the codec registry entry# Register its aliases (without overwriting previously registered# aliases)# Return the registry entry# Register the search_function in the Python codec registry# Imports may fail while we are shutting downb' Standard "encodings" Package + + Standard Python encoding modules are stored in this package + directory. + + Codec modules must have names corresponding to normalized encoding + names as defined in the normalize_encoding() function below, e.g. + 'utf-8' must be implemented by the module 'utf_8.py'. + + Each codec module must export the following interface: + + * getregentry() -> codecs.CodecInfo object + The getregentry() API must return a CodecInfo object with encoder, decoder, + incrementalencoder, incrementaldecoder, streamwriter and streamreader + attributes which adhere to the Python Codec Interface Standard. + + In addition, a module may optionally also define the following + APIs which are then used by the package's codec search function: + + * getaliases() -> sequence of encoding name strings to use as aliases + + Alias names returned by getaliases() must be normalized encoding + names as defined by normalize_encoding(). + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +'u' Standard "encodings" Package + + Standard Python encoding modules are stored in this package + directory. + + Codec modules must have names corresponding to normalized encoding + names as defined in the normalize_encoding() function below, e.g. + 'utf-8' must be implemented by the module 'utf_8.py'. + + Each codec module must export the following interface: + + * getregentry() -> codecs.CodecInfo object + The getregentry() API must return a CodecInfo object with encoder, decoder, + incrementalencoder, incrementaldecoder, streamwriter and streamreader + attributes which adhere to the Python Codec Interface Standard. + + In addition, a module may optionally also define the following + APIs which are then used by the package's codec search function: + + * getaliases() -> sequence of encoding name strings to use as aliases + + Alias names returned by getaliases() must be normalized encoding + names as defined by normalize_encoding(). + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +'b'--unknown--'u'--unknown--'b' Normalize an encoding name. + + Normalization works as follows: all non-alphanumeric + characters except the dot used for Python package names are + collapsed and replaced with a single underscore, e.g. ' -;#' + becomes '_'. Leading and trailing underscores are removed. + + Note that encoding names should be ASCII only. + + 'u' Normalize an encoding name. + + Normalization works as follows: all non-alphanumeric + characters except the dot used for Python package names are + collapsed and replaced with a single underscore, e.g. ' -;#' + becomes '_'. Leading and trailing underscores are removed. + + Note that encoding names should be ASCII only. + + 'b'ascii'u'ascii'b'encodings.'u'encodings.'b'module "%s" (%s) failed to register'u'module "%s" (%s) failed to register'b'incompatible codecs in module "%s" (%s)'u'incompatible codecs in module "%s" (%s)'b'win32'u'win32'b'cp%s'u'cp%s'u'encodings.__init__'u'encodings'# This directory is a Python package.u'concurrent.__init__'u'xmlrpc.__init__'u'xmlrpc'# $Id: __init__.py 3375 2008-02-13 08:05:08Z fredrik $# elementtree packageu'xml.etree.__init__'u'etree.__init__' +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python. + +Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +weakrefTemplateFormatterStrFormatterBASIC_FORMATBufferingFormatterCRITICALDEBUGERRORFATALFileHandlerFilterHandlerINFOLogRecordLoggerLoggerAdapterNOTSETNullHandlerStreamHandlerWARNWARNINGaddLevelNamebasicConfigcaptureWarningscriticaldisableexceptionfatalgetLevelNamegetLoggergetLoggerClasslogmakeLogRecordsetLoggerClassshutdownwarninggetLogRecordFactorysetLogRecordFactorylastResortraiseExceptionsthreadingVinay Sajip production__status__0.5.1.207 February 2010__date___startTimelogThreadslogMultiprocessinglogProcesses5040_levelToName_nameToLevel + Return the textual or numeric representation of logging level 'level'. + + If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, + INFO, DEBUG) then you get the corresponding string. If you have + associated levels with names using addLevelName then the name you have + associated with 'level' is returned. + + If a numeric value corresponding to one of the defined levels is passed + in, the corresponding string representation is returned. + + If a string representation of the level is passed in, the corresponding + numeric value is returned. + + If no matching numeric or string value is passed in, the string + 'Level %s' % level is returned. + Level %slevelName + Associate 'levelName' with 'level'. + + This is used when converting levels to text during message formatting. + _acquireLock_releaseLockcurrentframeReturn the frame object for the caller's stack frame.tb_framef_backnormcaseco_filename_srcfile_checkLevelrvUnknown level: %rLevel not an integer or a valid string: %rRLock_lock + Acquire the module-level lock for serializing access to shared data. + + This should be released with _releaseLock(). + acquire + Release the module-level lock acquired by calling _acquireLock(). + register_at_fork_register_at_fork_reinit_lockinstanceWeakSet_at_fork_reinit_lock_weakset_after_at_fork_child_reinit_lockscreateLockIgnoring exception from logging atfork._reinit_lock() method:beforeafter_in_childafter_in_parent + A LogRecord instance represents an event being logged. + + LogRecord instances are created every time something is logged. They + contain all the information pertinent to the event being logged. The + main information passed in is in msg and args, which are combined + using str(msg) % args to create the message field of the record. The + record also includes information such as when the record was created, + the source line where the logging call was made, and any exception + information to be logged. + sinfo + Initialize a logging record with interesting information. + ctlevelnamelevelnoUnknown moduleexc_textstack_infofuncNamecreated1000msecsrelativeCreatedget_identcurrent_threadthreadNameprocessNameMainProcessmultiprocessingmpcurrent_processgetpidgetMessage + Return the message for this LogRecord. + + Return the message for this LogRecord after merging any user-supplied + arguments with the message. + _logRecordFactory + Set the factory to be used when instantiating a log record. + + :param factory: A callable which will be called to instantiate + a log record. + + Return the factory to be used when instantiating a log record. + + Make a LogRecord whose attributes are defined by the specified dictionary, + This function is useful for converting a logging event received over + a socket connection (which is sent as a dictionary) into a LogRecord + instance. + _str_formatterPercentStyle%(message)sdefault_format%(asctime)sasctime_format%(asctime)asctime_search%\(\w+\)[#0+ -]*(\*|\d+)?(\.(\*|\d+))?[diouxefgcrsa%]Ivalidation_patternfmt_fmtusesTimevalidateValidate the input format, ensure it matches the correct styleInvalid format '%s' for '%s' style_formatrecordFormatting field not found in record: %sStrFormatStyle{message}{asctime}{asctime^(.?[<>=^])?[+ -]?#?0?(\d+|{\w+})?[,_]?(\.(\d+|{\w+}))?[bcdefgnosx%]?$fmt_spec^(\d+|\w+)(\.\w+|\[[^]]+\])*$field_specValidate the input format, ensure it is the correct string formatting stylefieldsfieldnameconversioninvalid field name/expression: %rrsainvalid conversion: %rbad specifier: %rinvalid format: %sinvalid format: no fieldsStringTemplateStyle${message}${asctime}_tpl$asctimefinditergroupdictnamedbracedinvalid format: bare '$' not allowedsubstitute%(levelname)s:%(name)s:%(message)s%{levelname}:{name}:{message}${levelname}:${name}:${message}_STYLES + Formatter instances are used to convert a LogRecord to text. + + Formatters need to know how a LogRecord is constructed. They are + responsible for converting a LogRecord to (usually) a string which can + be interpreted by either a human or an external system. The base Formatter + allows a formatting string to be specified. If none is supplied, the + style-dependent default value, "%(message)s", "{message}", or + "${message}", is used. + + The Formatter can be initialized with a format string which makes use of + knowledge of the LogRecord attributes - e.g. the default value mentioned + above makes use of the fact that the user's message and arguments are pre- + formatted into a LogRecord's message attribute. Currently, the useful + attributes in a LogRecord are described by: + + %(name)s Name of the logger (logging channel) + %(levelno)s Numeric logging level for the message (DEBUG, INFO, + WARNING, ERROR, CRITICAL) + %(levelname)s Text logging level for the message ("DEBUG", "INFO", + "WARNING", "ERROR", "CRITICAL") + %(pathname)s Full pathname of the source file where the logging + call was issued (if available) + %(filename)s Filename portion of pathname + %(module)s Module (name portion of filename) + %(lineno)d Source line number where the logging call was issued + (if available) + %(funcName)s Function name + %(created)f Time when the LogRecord was created (time.time() + return value) + %(asctime)s Textual time when the LogRecord was created + %(msecs)d Millisecond portion of the creation time + %(relativeCreated)d Time in milliseconds when the LogRecord was created, + relative to the time the logging module was loaded + (typically at application startup time) + %(thread)d Thread ID (if available) + %(threadName)s Thread name (if available) + %(process)d Process ID (if available) + %(message)s The result of record.getMessage(), computed just as + the record is emitted + localtimeconverterdatefmt + Initialize the formatter with specified format strings. + + Initialize the formatter either with the specified format string, or a + default as described above. Allow for specialized date formatting with + the optional datefmt argument. If datefmt is omitted, you get an + ISO8601-like (or RFC 3339-like) format. + + Use a style parameter of '%', '{' or '$' to specify that you want to + use one of %-formatting, :meth:`str.format` (``{}``) formatting or + :class:`string.Template` formatting in your format string. + + .. versionchanged:: 3.2 + Added the ``style`` parameter. + Style must be one of: %s,_style%Y-%m-%d %H:%M:%Sdefault_time_format%s,%03ddefault_msec_formatformatTime + Return the creation time of the specified LogRecord as formatted text. + + This method should be called from format() by a formatter which + wants to make use of a formatted time. This method can be overridden + in formatters to provide for any specific requirement, but the + basic behaviour is as follows: if datefmt (a string) is specified, + it is used with time.strftime() to format the creation time of the + record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. + The resulting string is returned. This function uses a user-configurable + function to convert the creation time to a tuple. By default, + time.localtime() is used; to change this for a particular formatter + instance, set the 'converter' attribute to a function with the same + signature as time.localtime() or time.gmtime(). To change it for all + formatters, for example if you want all logging times to be shown in GMT, + set the 'converter' attribute in the Formatter class. + strftimeformatExceptionei + Format and return the specified exception information as a string. + + This default implementation just uses + traceback.print_exception() + + Check if the format uses the creation time of the record. + formatMessageformatStack + This method is provided as an extension point for specialized + formatting of stack information. + + The input data is a string as returned from a call to + :func:`traceback.print_stack`, but with the last trailing newline + removed. + + The base implementation just returns the value passed in. + + Format the specified record as text. + + The record's attribute dictionary is used as the operand to a + string formatting operation which yields the returned string. + Before formatting the dictionary, a couple of preparatory steps + are carried out. The message attribute of the record is computed + using LogRecord.getMessage(). If the formatting string uses the + time (as determined by a call to usesTime(), formatTime() is + called to format the event time. If there is exception information, + it is formatted using formatException() and appended to the message. + asctime_defaultFormatter + A formatter suitable for formatting a number of records. + linefmt + Optionally specify a formatter which will be used to format each + individual record. + formatHeaderrecords + Return the header string for the specified records. + formatFooter + Return the footer string for the specified records. + + Format the specified records and return the result as a string. + + Filter instances are used to perform arbitrary filtering of LogRecords. + + Loggers and Handlers can optionally use Filter instances to filter + records as desired. The base filter class only allows events which are + below a certain point in the logger hierarchy. For example, a filter + initialized with "A.B" will allow events logged by loggers "A.B", + "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If + initialized with the empty string, all events are passed. + + Initialize a filter. + + Initialize with the name of the logger which, together with its + children, will have its events allowed through the filter. If no + name is specified, allow every event. + nlen + Determine if the specified record is to be logged. + + Returns True if the record should be logged, or False otherwise. + If deemed appropriate, the record may be modified in-place. + Filterer + A base class for loggers and handlers which allows them to share + common code. + + Initialize the list of filters to be an empty list. + filtersaddFilter + Add the specified filter to this handler. + removeFilter + Remove the specified filter from this handler. + + Determine if a record is loggable by consulting all the filters. + + The default is to allow the record to be logged; any filter can veto + this and the record is then dropped. Returns a zero value if a record + is to be dropped, else non-zero. + + .. versionchanged:: 3.2 + + Allow filters to be just callables. + WeakValueDictionary_handlers_handlerList_removeHandlerRefwr + Remove a handler reference from the internal cleanup list. + handlers_addHandlerRef + Add a handler to the internal cleanup list using a weak reference. + ref + Handler instances dispatch logging events to specific destinations. + + The base handler class. Acts as a placeholder which defines the Handler + interface. Handlers can optionally use Formatter instances to format + records as desired. By default, no formatter is specified; in this case, + the 'raw' message as determined by record.message is logged. + + Initializes the instance - basically setting the formatter to None + and the filter list to empty. + formatterget_nameset_name + Acquire a thread lock for serializing access to the underlying I/O. + + Acquire the I/O thread lock. + + Release the I/O thread lock. + setLevel + Set the logging level of this handler. level must be an int or a str. + + Format the specified record. + + If a formatter is set, use it. Otherwise, use the default formatter + for the module. + emit + Do whatever it takes to actually log the specified logging record. + + This version is intended to be implemented by subclasses and so + raises a NotImplementedError. + emit must be implemented by Handler subclasses'emit must be implemented ''by Handler subclasses' + Conditionally emit the specified logging record. + + Emission depends on filters which may have been added to the handler. + Wrap the actual emission of the record with acquisition/release of + the I/O thread lock. Returns whether the filter passed the record for + emission. + setFormatter + Set the formatter for this handler. + + Ensure all logging output has been flushed. + + This version does nothing and is intended to be implemented by + subclasses. + + Tidy up any resources used by the handler. + + This version removes the handler from an internal map of handlers, + _handlers, which is used for handler lookup by name. Subclasses + should ensure that this gets called from overridden close() + methods. + handleError + Handle errors which occur during an emit() call. + + This method should be called from handlers when an exception is + encountered during an emit() call. If raiseExceptions is false, + exceptions get silently ignored. This is what is mostly wanted + for a logging system - most users will not care about errors in + the logging system, they are more interested in application errors. + You could, however, replace this with a custom handler if you wish. + The record which was being processed is passed in to this method. + --- Logging error --- +Call stack: +f_codeprint_stackLogged from file %s, line %s +Message: %r +Arguments: %s +'Message: %r\n''Arguments: %s\n'Unable to print the message and arguments - possible formatting error. +Use the traceback above to help find the error. +'Unable to print the message and arguments'' - possible formatting error.\nUse the'' traceback above to help find the error.\n'<%s (%s)> + A handler class which writes logging records, appropriately formatted, + to a stream. Note that this class does not close the stream, as + sys.stdout or sys.stderr may be used. + terminator + Initialize the handler. + + If stream is not specified, sys.stderr is used. + + Flushes the stream. + + Emit a record. + + If a formatter is specified, it is used to format the record. + The record is then written to the stream with a trailing newline. If + exception information is present, it is formatted using + traceback.print_exception and appended to the stream. If the stream + has an 'encoding' attribute, it is used to determine how to do the + output to the stream. + setStream + Sets the StreamHandler's stream to the specified value, + if it is different. + + Returns the old stream, if the stream was changed, or None + if it wasn't. + <%s %s(%s)> + A handler class which writes formatted logging records to disk files. + adelay + Open the specified file and use it as the stream for logging. + fspathabspathbaseFilename_open + Closes the stream. + + Open the current base file with the (original) mode and encoding. + Return the resulting stream. + + Emit a record. + + If the stream was not opened because 'delay' was specified in the + constructor, open it before calling the superclass's emit. + <%s %s (%s)>_StderrHandler + This class is like a StreamHandler using sys.stderr, but always uses + whatever sys.stderr is currently set to rather than the value of + sys.stderr at handler construction time. + + Initialize the handler. + _defaultLastResortPlaceHolder + PlaceHolder instances are used in the Manager logger hierarchy to take + the place of nodes for which no loggers have been defined. This class is + intended for internal use only and not as part of the public API. + alogger + Initialize with the specified logger being a child of this placeholder. + loggerMap + Add the specified logger as a child of this placeholder. + klass + Set the class to be used when instantiating a logger. The class should + define __init__() such that only a name argument is required, and the + __init__() should call Logger.__init__() + logger not derived from logging.Logger: _loggerClass + Return the class to be used when instantiating a logger. + Manager + There is [under normal circumstances] just one Manager instance, which + holds the hierarchy of loggers. + rootnode + Initialize the manager with the root node of the logger hierarchy. + emittedNoHandlerWarningloggerDictloggerClasslogRecordFactory_disable + Get a logger with the specified name (channel name), creating it + if it doesn't yet exist. This name is a dot-separated hierarchical + name, such as "a", "a.b", "a.b.c" or similar. + + If a PlaceHolder existed for the specified name [i.e. the logger + didn't exist but a child of it did], replace it with the created + logger and fix up the parent/child references which pointed to the + placeholder to now point to the logger. + A logger name must be a stringph_fixupChildren_fixupParents + Set the class to be used when instantiating a logger with this Manager. + + Set the factory to be used when instantiating a log record with this + Manager. + + Ensure that there are either loggers or placeholders all the way + from the specified logger to the root of the logger hierarchy. + substr + Ensure that children of the placeholder ph are connected to the + specified logger. + namelen_clear_cache + Clear the cache for all loggers in loggerDict + Called when level changes are made + logger + Instances of the Logger class represent a single logging channel. A + "logging channel" indicates an area of an application. Exactly how an + "area" is defined is up to the application developer. Since an + application can have any number of areas, logging channels are identified + by a unique string. Application areas can be nested (e.g. an area + of "input processing" might include sub-areas "read CSV files", "read + XLS files" and "read Gnumeric files"). To cater for this natural nesting, + channel names are organized into a namespace hierarchy where levels are + separated by periods, much like the Java or Python package namespace. So + in the instance given above, channel names might be "input" for the upper + level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. + There is no arbitrary limit to the depth of nesting. + + Initialize the logger with a name and an optional level. + disabled + Set the logging level of this logger. level must be an int or a str. + + Log 'msg % args' with severity 'DEBUG'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) + isEnabledFor_log + Log 'msg % args' with severity 'INFO'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.info("Houston, we have a %s", "interesting problem", exc_info=1) + + Log 'msg % args' with severity 'WARNING'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) + The 'warn' method is deprecated, use 'warning' instead"The 'warn' method is deprecated, ""use 'warning' instead" + Log 'msg % args' with severity 'ERROR'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.error("Houston, we have a %s", "major problem", exc_info=1) + + Convenience method for logging an ERROR with exception information. + + Log 'msg % args' with severity 'CRITICAL'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.critical("Houston, we have a %s", "major disaster", exc_info=1) + + Log 'msg % args' with the integer severity 'level'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.log(level, "We have a %s", "mysterious problem", exc_info=1) + level must be an integerfindCaller + Find the stack frame of the caller so that we can note the source + file name, line number and function name. + orig_f(unknown file)(unknown function)coStack (most recent call last): +f_linenoco_namemakeRecordfnlno + A factory method which can be overridden in subclasses to create + specialized LogRecords. + Attempt to overwrite %r in LogRecord + Low-level logging routine which creates a LogRecord and then calls + all the handlers of this logger to handle the record. + + Call the handlers for the specified record. + + This method is used for unpickled records received from a socket, as + well as those created locally. Logger-level filtering is applied. + callHandlersaddHandlerhdlr + Add the specified handler to this logger. + removeHandler + Remove the specified handler from this logger. + hasHandlers + See if this logger has any handlers configured. + + Loop through all handlers for this logger and its parents in the + logger hierarchy. Return True if a handler was found, else False. + Stop searching up the hierarchy whenever a logger with the "propagate" + attribute set to zero is found - that will be the last logger which + is checked for the existence of handlers. + + Pass a record to all relevant handlers. + + Loop through all handlers for this logger and its parents in the + logger hierarchy. If no handler was found, output a one-off error + message to sys.stderr. Stop searching up the hierarchy whenever a + logger with the "propagate" attribute set to zero is found - that + will be the last logger whose handlers are called. + foundNo handlers could be found for logger "%s" +"No handlers could be found for logger"" \"%s\"\n"getEffectiveLevel + Get the effective level for this logger. + + Loop through this logger and its parents in the logger hierarchy, + looking for a non-zero logging level. Return the first one found. + + Is this logger enabled for level 'level'? + is_enabledgetChild + Get a logger which is a descendant to this one. + + This is a convenience method, such that + + logging.getLogger('abc').getChild('def.ghi') + + is the same as + + logging.getLogger('abc.def.ghi') + + It's useful, for example, when the parent logger is named using + __name__ rather than a literal string. + picklePicklingErrorlogger cannot be pickledRootLogger + A root logger is not that different to any other logger, except that + it must have a logging level and there is only one instance of it in + the hierarchy. + + Initialize the logger with the name "root". + + An adapter for loggers which makes it easier to specify contextual + information in logging output. + + Initialize the adapter with a logger and a dict-like object which + provides contextual information. This constructor signature allows + easy stacking of LoggerAdapters, if so desired. + + You can effectively pass keyword arguments as shown in the + following example: + + adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) + + Process the logging message and keyword arguments passed in to + a logging call to insert contextual information. You can either + manipulate the message itself, the keyword args or both. Return + the message and kwargs modified (or not) to suit your needs. + + Normally, you'll only need to override this one method in a + LoggerAdapter subclass for your specific needs. + + Delegate a debug call to the underlying logger. + + Delegate an info call to the underlying logger. + + Delegate a warning call to the underlying logger. + + Delegate an error call to the underlying logger. + + Delegate an exception call to the underlying logger. + + Delegate a critical call to the underlying logger. + + Delegate a log call to the underlying logger, after adding + contextual information from this adapter instance. + + Set the specified level on the underlying logger. + + Get the effective level for the underlying logger. + + See if the underlying logger has any handlers. + + Low-level log implementation, proxied to allow nested logger adapters. + + Do basic configuration for the logging system. + + This function does nothing if the root logger already has handlers + configured, unless the keyword argument *force* is set to ``True``. + It is a convenience method intended for use by simple scripts + to do one-shot configuration of the logging package. + + The default behaviour is to create a StreamHandler which writes to + sys.stderr, set a formatter using the BASIC_FORMAT format string, and + add the handler to the root logger. + + A number of optional keyword arguments may be specified, which can alter + the default behaviour. + + filename Specifies that a FileHandler be created, using the specified + filename, rather than a StreamHandler. + filemode Specifies the mode to open the file, if filename is specified + (if filemode is unspecified, it defaults to 'a'). + format Use the specified format string for the handler. + datefmt Use the specified date/time format. + style If a format string is specified, use this to specify the + type of format string (possible values '%', '{', '$', for + %-formatting, :meth:`str.format` and :class:`string.Template` + - defaults to '%'). + level Set the root logger level to the specified level. + stream Use the specified stream to initialize the StreamHandler. Note + that this argument is incompatible with 'filename' - if both + are present, 'stream' is ignored. + handlers If specified, this should be an iterable of already created + handlers, which will be added to the root handler. Any handler + in the list which does not have a formatter assigned will be + assigned the formatter created in this function. + force If this keyword is specified as true, any existing handlers + attached to the root logger are removed and closed, before + carrying out the configuration as specified by the other + arguments. + Note that you could specify a stream created using open(filename, mode) + rather than passing the filename and mode in. However, it should be + remembered that StreamHandler does not close its stream (since it may be + using sys.stdout or sys.stderr), whereas FileHandler closes its stream + when the handler is closed. + + .. versionchanged:: 3.8 + Added the ``force`` parameter. + + .. versionchanged:: 3.2 + Added the ``style`` parameter. + + .. versionchanged:: 3.3 + Added the ``handlers`` parameter. A ``ValueError`` is now thrown for + incompatible arguments (e.g. ``handlers`` specified together with + ``filename``/``filemode``, or ``filename``/``filemode`` specified + together with ``stream``, or ``handlers`` specified together with + ``stream``. + force'stream' and 'filename' should not be specified together"'stream' and 'filename' should not be ""specified together"'stream' or 'filename' should not be specified together with 'handlers'"'stream' or 'filename' should not be ""specified together with 'handlers'"filemodedfsfs, Unrecognised argument(s): %s + Return a logger with the specified name, creating it if necessary. + + If no name is specified, return the root logger. + + Log a message with severity 'CRITICAL' on the root logger. If the logger + has no handlers, call basicConfig() to add a console handler with a + pre-defined format. + + Log a message with severity 'ERROR' on the root logger. If the logger has + no handlers, call basicConfig() to add a console handler with a pre-defined + format. + + Log a message with severity 'ERROR' on the root logger, with exception + information. If the logger has no handlers, basicConfig() is called to add + a console handler with a pre-defined format. + + Log a message with severity 'WARNING' on the root logger. If the logger has + no handlers, call basicConfig() to add a console handler with a pre-defined + format. + The 'warn' function is deprecated, use 'warning' instead"The 'warn' function is deprecated, " + Log a message with severity 'INFO' on the root logger. If the logger has + no handlers, call basicConfig() to add a console handler with a pre-defined + format. + + Log a message with severity 'DEBUG' on the root logger. If the logger has + no handlers, call basicConfig() to add a console handler with a pre-defined + format. + + Log 'msg % args' with the integer severity 'level' on the root logger. If + the logger has no handlers, call basicConfig() to add a console handler + with a pre-defined format. + + Disable all logging calls of severity 'level' and below. + handlerList + Perform any cleanup actions in the logging system (e.g. flushing + buffers). + + Should be called at application exit. + atexit + This handler does nothing. It's intended to be used to avoid the + "No handlers could be found for logger XXX" one-off warning. This is + important for library code, which may contain code to log events. If a user + of the library does not configure logging, the one-off warning might be + produced; to avoid this, the library developer simply needs to instantiate + a NullHandler and add it to the top-level logger of the library module or + package. + Stub._warnings_showwarning_showwarningcategory + Implementation of showwarnings which redirects to logging, which will first + check to see if the file parameter is None. If a file is specified, it will + delegate to the original warnings implementation of showwarning. Otherwise, + it will call warnings.formatwarning and will log the resulting string to a + warnings logger named "py.warnings" with level logging.WARNING. + formatwarningpy.warningscapture + If capture is true, redirect all warnings to the logging package. + If capture is False, ensure that warnings are not redirected to logging + but to their original destinations. + showwarning# Copyright 2001-2017 by Vinay Sajip. All Rights Reserved.# Permission to use, copy, modify, and distribute this software and its# documentation for any purpose and without fee is hereby granted,# provided that the above copyright notice appear in all copies and that# both that copyright notice and this permission notice appear in# supporting documentation, and that the name of Vinay Sajip# not be used in advertising or publicity pertaining to distribution# of the software without specific, written prior permission.# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.# The following module attributes are no longer updated.#---------------------------------------------------------------------------# Miscellaneous module data#_startTime is used as the base when calculating the relative time of events#raiseExceptions is used to see if exceptions during handling should be#propagated# If you don't want threading information in the log, set this to zero# If you don't want multiprocessing information in the log, set this to zero# If you don't want process information in the log, set this to zero# Level related stuff# Default levels and level names, these can be replaced with any positive set# of values having corresponding names. There is a pseudo-level, NOTSET, which# is only really there as a lower limit for user-defined levels. Handlers and# loggers are initialized with NOTSET so that they will log all messages, even# at user-defined levels.# See Issues #22386, #27937 and #29220 for why it's this way#unlikely to cause an exception, but you never know...#pragma: no cover# _srcfile is used when walking the stack to check when we've got the first# caller stack frame, by skipping frames whose filename is that of this# module's source. It therefore should contain the filename of this module's# source file.# Ordinarily we would use __file__ for this, but frozen modules don't always# have __file__ set, for some reason (see Issue #21736). Thus, we get the# filename from a handy code object from a function defined in this module.# (There's no particular reason for picking addLevelName.)# _srcfile is only used in conjunction with sys._getframe().# To provide compatibility with older versions of Python, set _srcfile# to None if _getframe() is not available; this value will prevent# findCaller() from being called. You can also do this if you want to avoid# the overhead of fetching caller information, even when _getframe() is# available.#if not hasattr(sys, '_getframe'):# _srcfile = None# Thread-related stuff#_lock is used to serialize access to shared data structures in this module.#This needs to be an RLock because fileConfig() creates and configures#Handlers, and so might arbitrary user threads. Since Handler code updates the#shared dictionary _handlers, it needs to acquire the lock. But if configuring,#the lock would already have been acquired - so we need an RLock.#The same argument applies to Loggers and Manager.loggerDict.# Prevent a held logging lock from blocking a child from logging.# Windows and friends.# no-op when os.register_at_fork does not exist.# A collection of instances with a createLock method (logging.Handler)# to be called in the child after forking. The weakref avoids us keeping# discarded Handler instances alive. A set is used to avoid accumulating# duplicate registrations as createLock() is responsible for registering# a new Handler instance with this set in the first place.# _acquireLock() was called in the parent before forking.# Similar to what PyErr_WriteUnraisable does.# Acquired by os.register_at_fork(before=.# The logging record# The following statement allows passing of a dictionary as a sole# argument, so that you can do something like# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})# Suggested by Stefan Behnel.# Note that without the test for args[0], we get a problem because# during formatting, we test to see if the arg is present using# 'if self.args:'. If the event being logged is e.g. 'Value is %d'# and if the passed arg fails 'if self.args:' then no formatting# is done. For example, logger.warning('Value is %d', 0) would log# 'Value is %d' instead of 'Value is 0'.# For the use case of passing a dictionary, this should not be a# problem.# Issue #21172: a request was made to relax the isinstance check# to hasattr(args[0], '__getitem__'). However, the docs on string# formatting still seem to suggest a mapping object is required.# Thus, while not removing the isinstance check, it does now look# for collections.abc.Mapping rather than, as before, dict.# used to cache the traceback text# pragma: no cover# Errors may occur if multiprocessing has not finished loading# yet - e.g. if a custom import hook causes third-party code# to run when multiprocessing calls import. See issue 8200# for an example# Determine which class to use when instantiating log records.# Formatter classes and functions# See issues #9427, #1553375. Commented out for now.#if getattr(self, 'fullstack', False):# traceback.print_stack(tb.tb_frame.f_back, file=sio)# Cache the traceback text to avoid converting it multiple times# (it's constant anyway)# The default formatter to use when no other is specified# Filter classes and functions# assume callable - will raise if not# Handler classes and functions#map of handler names to handlers# added to allow handlers to be removed in reverse of order initialized# This function can be called during module teardown, when globals are# set to None. It can also be called from another thread. So we need to# pre-emptively grab the necessary globals and check if they're None,# to prevent race conditions and failures during interpreter shutdown.# Add the handler to the global _handlerList (for cleanup on shutdown)#get the module data lock, as we're updating a shared structure.#unlikely to raise an exception, but you never know...# see issue 13807# Walk the stack frame up until we're out of logging,# so as to print the calling context.# couldn't find the right stack frame, for some reason# Issue 18671: output logging message and arguments# See issue 36272# see issue 5971# issue 35046: merged two stream.writes into one.# bpo-36015: name can be an int# Issue #27493: add support for Path objects to be passed in#keep the absolute path, otherwise derived classes which use this#may come a cropper when the current directory changes#We don't open the stream, but we still need to call the#Handler constructor to set level, formatter, lock etc.# Issue #19523: call unconditionally to# prevent a handler leak when delay is set# Manager classes and functions# Determine which class to use when instantiating loggers.#The if means ... if not c.parent.name.startswith(nm)# Logger classes and functions#On some versions of IronPython, currentframe() returns None if#IronPython isn't run with -X:Frames.#IronPython doesn't track Python frames, so findCaller raises an#exception on some versions of IronPython. We trap it here so that#IronPython can use logging.#break out# In general, only the root logger will not be accessible via its name.# However, the root logger's class has its own __reduce__ method.# Boilerplate convenience methods# Configuration classes and functions# Add thread safety in case someone mistakenly calls# basicConfig() from multiple threads# Utility functions at module level.# Basically delegate everything to the root logger.#errors might occur, for example, if files are locked#we just ignore them if raiseExceptions is not set# Ignore errors which might be caused# because handlers have been closed but# references to them are still around at# application exit.# ignore everything, as we're shutting down#else, swallow#Let's try and shutdown automatically on application exit...# Null handler# Warnings integrationb' +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python. + +Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +'u' +Logging package for Python. Based on PEP 282 and comments thereto in +comp.lang.python. + +Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +'b'BASIC_FORMAT'u'BASIC_FORMAT'b'BufferingFormatter'u'BufferingFormatter'b'CRITICAL'u'CRITICAL'b'DEBUG'u'DEBUG'b'ERROR'u'ERROR'b'FATAL'u'FATAL'b'FileHandler'u'FileHandler'b'Filter'u'Filter'b'Formatter'u'Formatter'b'Handler'u'Handler'b'INFO'u'INFO'b'LogRecord'u'LogRecord'b'Logger'u'Logger'b'LoggerAdapter'u'LoggerAdapter'b'NOTSET'u'NOTSET'b'NullHandler'u'NullHandler'b'StreamHandler'u'StreamHandler'b'WARN'u'WARN'b'WARNING'u'WARNING'b'addLevelName'u'addLevelName'b'basicConfig'u'basicConfig'b'captureWarnings'u'captureWarnings'b'critical'u'critical'b'disable'u'disable'b'error'u'error'b'exception'u'exception'b'fatal'u'fatal'b'getLevelName'u'getLevelName'b'getLogger'u'getLogger'b'getLoggerClass'u'getLoggerClass'b'log'u'log'b'makeLogRecord'u'makeLogRecord'b'setLoggerClass'u'setLoggerClass'b'shutdown'u'shutdown'b'warn'u'warn'b'warning'u'warning'b'getLogRecordFactory'u'getLogRecordFactory'b'setLogRecordFactory'u'setLogRecordFactory'b'lastResort'u'lastResort'b'raiseExceptions'u'raiseExceptions'b'Vinay Sajip 'u'Vinay Sajip 'b'production'u'production'b'0.5.1.2'u'0.5.1.2'b'07 February 2010'u'07 February 2010'b' + Return the textual or numeric representation of logging level 'level'. + + If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, + INFO, DEBUG) then you get the corresponding string. If you have + associated levels with names using addLevelName then the name you have + associated with 'level' is returned. + + If a numeric value corresponding to one of the defined levels is passed + in, the corresponding string representation is returned. + + If a string representation of the level is passed in, the corresponding + numeric value is returned. + + If no matching numeric or string value is passed in, the string + 'Level %s' % level is returned. + 'u' + Return the textual or numeric representation of logging level 'level'. + + If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, + INFO, DEBUG) then you get the corresponding string. If you have + associated levels with names using addLevelName then the name you have + associated with 'level' is returned. + + If a numeric value corresponding to one of the defined levels is passed + in, the corresponding string representation is returned. + + If a string representation of the level is passed in, the corresponding + numeric value is returned. + + If no matching numeric or string value is passed in, the string + 'Level %s' % level is returned. + 'b'Level %s'u'Level %s'b' + Associate 'levelName' with 'level'. + + This is used when converting levels to text during message formatting. + 'u' + Associate 'levelName' with 'level'. + + This is used when converting levels to text during message formatting. + 'b'_getframe'u'_getframe'b'Return the frame object for the caller's stack frame.'u'Return the frame object for the caller's stack frame.'b'Unknown level: %r'u'Unknown level: %r'b'Level not an integer or a valid string: %r'u'Level not an integer or a valid string: %r'b' + Acquire the module-level lock for serializing access to shared data. + + This should be released with _releaseLock(). + 'u' + Acquire the module-level lock for serializing access to shared data. + + This should be released with _releaseLock(). + 'b' + Release the module-level lock acquired by calling _acquireLock(). + 'u' + Release the module-level lock acquired by calling _acquireLock(). + 'b'register_at_fork'u'register_at_fork'b'Ignoring exception from logging atfork'u'Ignoring exception from logging atfork'b'._reinit_lock() method:'u'._reinit_lock() method:'b' + A LogRecord instance represents an event being logged. + + LogRecord instances are created every time something is logged. They + contain all the information pertinent to the event being logged. The + main information passed in is in msg and args, which are combined + using str(msg) % args to create the message field of the record. The + record also includes information such as when the record was created, + the source line where the logging call was made, and any exception + information to be logged. + 'u' + A LogRecord instance represents an event being logged. + + LogRecord instances are created every time something is logged. They + contain all the information pertinent to the event being logged. The + main information passed in is in msg and args, which are combined + using str(msg) % args to create the message field of the record. The + record also includes information such as when the record was created, + the source line where the logging call was made, and any exception + information to be logged. + 'b' + Initialize a logging record with interesting information. + 'u' + Initialize a logging record with interesting information. + 'b'Unknown module'u'Unknown module'b'MainProcess'u'MainProcess'b'multiprocessing'b'getpid'u'getpid'b''u''b' + Return the message for this LogRecord. + + Return the message for this LogRecord after merging any user-supplied + arguments with the message. + 'u' + Return the message for this LogRecord. + + Return the message for this LogRecord after merging any user-supplied + arguments with the message. + 'b' + Set the factory to be used when instantiating a log record. + + :param factory: A callable which will be called to instantiate + a log record. + 'u' + Set the factory to be used when instantiating a log record. + + :param factory: A callable which will be called to instantiate + a log record. + 'b' + Return the factory to be used when instantiating a log record. + 'u' + Return the factory to be used when instantiating a log record. + 'b' + Make a LogRecord whose attributes are defined by the specified dictionary, + This function is useful for converting a logging event received over + a socket connection (which is sent as a dictionary) into a LogRecord + instance. + 'u' + Make a LogRecord whose attributes are defined by the specified dictionary, + This function is useful for converting a logging event received over + a socket connection (which is sent as a dictionary) into a LogRecord + instance. + 'b'%(message)s'u'%(message)s'b'%(asctime)s'u'%(asctime)s'b'%(asctime)'u'%(asctime)'b'%\(\w+\)[#0+ -]*(\*|\d+)?(\.(\*|\d+))?[diouxefgcrsa%]'u'%\(\w+\)[#0+ -]*(\*|\d+)?(\.(\*|\d+))?[diouxefgcrsa%]'b'Validate the input format, ensure it matches the correct style'u'Validate the input format, ensure it matches the correct style'b'Invalid format '%s' for '%s' style'u'Invalid format '%s' for '%s' style'b'Formatting field not found in record: %s'u'Formatting field not found in record: %s'b'{message}'u'{message}'b'{asctime}'u'{asctime}'b'{asctime'u'{asctime'b'^(.?[<>=^])?[+ -]?#?0?(\d+|{\w+})?[,_]?(\.(\d+|{\w+}))?[bcdefgnosx%]?$'u'^(.?[<>=^])?[+ -]?#?0?(\d+|{\w+})?[,_]?(\.(\d+|{\w+}))?[bcdefgnosx%]?$'b'^(\d+|\w+)(\.\w+|\[[^]]+\])*$'u'^(\d+|\w+)(\.\w+|\[[^]]+\])*$'b'Validate the input format, ensure it is the correct string formatting style'u'Validate the input format, ensure it is the correct string formatting style'b'invalid field name/expression: %r'u'invalid field name/expression: %r'b'rsa'u'rsa'b'invalid conversion: %r'u'invalid conversion: %r'b'bad specifier: %r'u'bad specifier: %r'b'invalid format: %s'u'invalid format: %s'b'invalid format: no fields'u'invalid format: no fields'b'${message}'u'${message}'b'${asctime}'u'${asctime}'b'$asctime'u'$asctime'b'named'u'named'b'braced'u'braced'b'$'u'$'b'invalid format: bare '$' not allowed'u'invalid format: bare '$' not allowed'b'%(levelname)s:%(name)s:%(message)s'u'%(levelname)s:%(name)s:%(message)s'b'%'u'%'b'{levelname}:{name}:{message}'u'{levelname}:{name}:{message}'b'${levelname}:${name}:${message}'u'${levelname}:${name}:${message}'b' + Formatter instances are used to convert a LogRecord to text. + + Formatters need to know how a LogRecord is constructed. They are + responsible for converting a LogRecord to (usually) a string which can + be interpreted by either a human or an external system. The base Formatter + allows a formatting string to be specified. If none is supplied, the + style-dependent default value, "%(message)s", "{message}", or + "${message}", is used. + + The Formatter can be initialized with a format string which makes use of + knowledge of the LogRecord attributes - e.g. the default value mentioned + above makes use of the fact that the user's message and arguments are pre- + formatted into a LogRecord's message attribute. Currently, the useful + attributes in a LogRecord are described by: + + %(name)s Name of the logger (logging channel) + %(levelno)s Numeric logging level for the message (DEBUG, INFO, + WARNING, ERROR, CRITICAL) + %(levelname)s Text logging level for the message ("DEBUG", "INFO", + "WARNING", "ERROR", "CRITICAL") + %(pathname)s Full pathname of the source file where the logging + call was issued (if available) + %(filename)s Filename portion of pathname + %(module)s Module (name portion of filename) + %(lineno)d Source line number where the logging call was issued + (if available) + %(funcName)s Function name + %(created)f Time when the LogRecord was created (time.time() + return value) + %(asctime)s Textual time when the LogRecord was created + %(msecs)d Millisecond portion of the creation time + %(relativeCreated)d Time in milliseconds when the LogRecord was created, + relative to the time the logging module was loaded + (typically at application startup time) + %(thread)d Thread ID (if available) + %(threadName)s Thread name (if available) + %(process)d Process ID (if available) + %(message)s The result of record.getMessage(), computed just as + the record is emitted + 'u' + Formatter instances are used to convert a LogRecord to text. + + Formatters need to know how a LogRecord is constructed. They are + responsible for converting a LogRecord to (usually) a string which can + be interpreted by either a human or an external system. The base Formatter + allows a formatting string to be specified. If none is supplied, the + style-dependent default value, "%(message)s", "{message}", or + "${message}", is used. + + The Formatter can be initialized with a format string which makes use of + knowledge of the LogRecord attributes - e.g. the default value mentioned + above makes use of the fact that the user's message and arguments are pre- + formatted into a LogRecord's message attribute. Currently, the useful + attributes in a LogRecord are described by: + + %(name)s Name of the logger (logging channel) + %(levelno)s Numeric logging level for the message (DEBUG, INFO, + WARNING, ERROR, CRITICAL) + %(levelname)s Text logging level for the message ("DEBUG", "INFO", + "WARNING", "ERROR", "CRITICAL") + %(pathname)s Full pathname of the source file where the logging + call was issued (if available) + %(filename)s Filename portion of pathname + %(module)s Module (name portion of filename) + %(lineno)d Source line number where the logging call was issued + (if available) + %(funcName)s Function name + %(created)f Time when the LogRecord was created (time.time() + return value) + %(asctime)s Textual time when the LogRecord was created + %(msecs)d Millisecond portion of the creation time + %(relativeCreated)d Time in milliseconds when the LogRecord was created, + relative to the time the logging module was loaded + (typically at application startup time) + %(thread)d Thread ID (if available) + %(threadName)s Thread name (if available) + %(process)d Process ID (if available) + %(message)s The result of record.getMessage(), computed just as + the record is emitted + 'b' + Initialize the formatter with specified format strings. + + Initialize the formatter either with the specified format string, or a + default as described above. Allow for specialized date formatting with + the optional datefmt argument. If datefmt is omitted, you get an + ISO8601-like (or RFC 3339-like) format. + + Use a style parameter of '%', '{' or '$' to specify that you want to + use one of %-formatting, :meth:`str.format` (``{}``) formatting or + :class:`string.Template` formatting in your format string. + + .. versionchanged:: 3.2 + Added the ``style`` parameter. + 'u' + Initialize the formatter with specified format strings. + + Initialize the formatter either with the specified format string, or a + default as described above. Allow for specialized date formatting with + the optional datefmt argument. If datefmt is omitted, you get an + ISO8601-like (or RFC 3339-like) format. + + Use a style parameter of '%', '{' or '$' to specify that you want to + use one of %-formatting, :meth:`str.format` (``{}``) formatting or + :class:`string.Template` formatting in your format string. + + .. versionchanged:: 3.2 + Added the ``style`` parameter. + 'b'Style must be one of: %s'u'Style must be one of: %s'b','u','b'%Y-%m-%d %H:%M:%S'u'%Y-%m-%d %H:%M:%S'b'%s,%03d'u'%s,%03d'b' + Return the creation time of the specified LogRecord as formatted text. + + This method should be called from format() by a formatter which + wants to make use of a formatted time. This method can be overridden + in formatters to provide for any specific requirement, but the + basic behaviour is as follows: if datefmt (a string) is specified, + it is used with time.strftime() to format the creation time of the + record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. + The resulting string is returned. This function uses a user-configurable + function to convert the creation time to a tuple. By default, + time.localtime() is used; to change this for a particular formatter + instance, set the 'converter' attribute to a function with the same + signature as time.localtime() or time.gmtime(). To change it for all + formatters, for example if you want all logging times to be shown in GMT, + set the 'converter' attribute in the Formatter class. + 'u' + Return the creation time of the specified LogRecord as formatted text. + + This method should be called from format() by a formatter which + wants to make use of a formatted time. This method can be overridden + in formatters to provide for any specific requirement, but the + basic behaviour is as follows: if datefmt (a string) is specified, + it is used with time.strftime() to format the creation time of the + record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. + The resulting string is returned. This function uses a user-configurable + function to convert the creation time to a tuple. By default, + time.localtime() is used; to change this for a particular formatter + instance, set the 'converter' attribute to a function with the same + signature as time.localtime() or time.gmtime(). To change it for all + formatters, for example if you want all logging times to be shown in GMT, + set the 'converter' attribute in the Formatter class. + 'b' + Format and return the specified exception information as a string. + + This default implementation just uses + traceback.print_exception() + 'u' + Format and return the specified exception information as a string. + + This default implementation just uses + traceback.print_exception() + 'b' + Check if the format uses the creation time of the record. + 'u' + Check if the format uses the creation time of the record. + 'b' + This method is provided as an extension point for specialized + formatting of stack information. + + The input data is a string as returned from a call to + :func:`traceback.print_stack`, but with the last trailing newline + removed. + + The base implementation just returns the value passed in. + 'u' + This method is provided as an extension point for specialized + formatting of stack information. + + The input data is a string as returned from a call to + :func:`traceback.print_stack`, but with the last trailing newline + removed. + + The base implementation just returns the value passed in. + 'b' + Format the specified record as text. + + The record's attribute dictionary is used as the operand to a + string formatting operation which yields the returned string. + Before formatting the dictionary, a couple of preparatory steps + are carried out. The message attribute of the record is computed + using LogRecord.getMessage(). If the formatting string uses the + time (as determined by a call to usesTime(), formatTime() is + called to format the event time. If there is exception information, + it is formatted using formatException() and appended to the message. + 'u' + Format the specified record as text. + + The record's attribute dictionary is used as the operand to a + string formatting operation which yields the returned string. + Before formatting the dictionary, a couple of preparatory steps + are carried out. The message attribute of the record is computed + using LogRecord.getMessage(). If the formatting string uses the + time (as determined by a call to usesTime(), formatTime() is + called to format the event time. If there is exception information, + it is formatted using formatException() and appended to the message. + 'b' + A formatter suitable for formatting a number of records. + 'u' + A formatter suitable for formatting a number of records. + 'b' + Optionally specify a formatter which will be used to format each + individual record. + 'u' + Optionally specify a formatter which will be used to format each + individual record. + 'b' + Return the header string for the specified records. + 'u' + Return the header string for the specified records. + 'b' + Return the footer string for the specified records. + 'u' + Return the footer string for the specified records. + 'b' + Format the specified records and return the result as a string. + 'u' + Format the specified records and return the result as a string. + 'b' + Filter instances are used to perform arbitrary filtering of LogRecords. + + Loggers and Handlers can optionally use Filter instances to filter + records as desired. The base filter class only allows events which are + below a certain point in the logger hierarchy. For example, a filter + initialized with "A.B" will allow events logged by loggers "A.B", + "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If + initialized with the empty string, all events are passed. + 'u' + Filter instances are used to perform arbitrary filtering of LogRecords. + + Loggers and Handlers can optionally use Filter instances to filter + records as desired. The base filter class only allows events which are + below a certain point in the logger hierarchy. For example, a filter + initialized with "A.B" will allow events logged by loggers "A.B", + "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If + initialized with the empty string, all events are passed. + 'b' + Initialize a filter. + + Initialize with the name of the logger which, together with its + children, will have its events allowed through the filter. If no + name is specified, allow every event. + 'u' + Initialize a filter. + + Initialize with the name of the logger which, together with its + children, will have its events allowed through the filter. If no + name is specified, allow every event. + 'b' + Determine if the specified record is to be logged. + + Returns True if the record should be logged, or False otherwise. + If deemed appropriate, the record may be modified in-place. + 'u' + Determine if the specified record is to be logged. + + Returns True if the record should be logged, or False otherwise. + If deemed appropriate, the record may be modified in-place. + 'b' + A base class for loggers and handlers which allows them to share + common code. + 'u' + A base class for loggers and handlers which allows them to share + common code. + 'b' + Initialize the list of filters to be an empty list. + 'u' + Initialize the list of filters to be an empty list. + 'b' + Add the specified filter to this handler. + 'u' + Add the specified filter to this handler. + 'b' + Remove the specified filter from this handler. + 'u' + Remove the specified filter from this handler. + 'b' + Determine if a record is loggable by consulting all the filters. + + The default is to allow the record to be logged; any filter can veto + this and the record is then dropped. Returns a zero value if a record + is to be dropped, else non-zero. + + .. versionchanged:: 3.2 + + Allow filters to be just callables. + 'u' + Determine if a record is loggable by consulting all the filters. + + The default is to allow the record to be logged; any filter can veto + this and the record is then dropped. Returns a zero value if a record + is to be dropped, else non-zero. + + .. versionchanged:: 3.2 + + Allow filters to be just callables. + 'b'filter'u'filter'b' + Remove a handler reference from the internal cleanup list. + 'u' + Remove a handler reference from the internal cleanup list. + 'b' + Add a handler to the internal cleanup list using a weak reference. + 'u' + Add a handler to the internal cleanup list using a weak reference. + 'b' + Handler instances dispatch logging events to specific destinations. + + The base handler class. Acts as a placeholder which defines the Handler + interface. Handlers can optionally use Formatter instances to format + records as desired. By default, no formatter is specified; in this case, + the 'raw' message as determined by record.message is logged. + 'u' + Handler instances dispatch logging events to specific destinations. + + The base handler class. Acts as a placeholder which defines the Handler + interface. Handlers can optionally use Formatter instances to format + records as desired. By default, no formatter is specified; in this case, + the 'raw' message as determined by record.message is logged. + 'b' + Initializes the instance - basically setting the formatter to None + and the filter list to empty. + 'u' + Initializes the instance - basically setting the formatter to None + and the filter list to empty. + 'b' + Acquire a thread lock for serializing access to the underlying I/O. + 'u' + Acquire a thread lock for serializing access to the underlying I/O. + 'b' + Acquire the I/O thread lock. + 'u' + Acquire the I/O thread lock. + 'b' + Release the I/O thread lock. + 'u' + Release the I/O thread lock. + 'b' + Set the logging level of this handler. level must be an int or a str. + 'u' + Set the logging level of this handler. level must be an int or a str. + 'b' + Format the specified record. + + If a formatter is set, use it. Otherwise, use the default formatter + for the module. + 'u' + Format the specified record. + + If a formatter is set, use it. Otherwise, use the default formatter + for the module. + 'b' + Do whatever it takes to actually log the specified logging record. + + This version is intended to be implemented by subclasses and so + raises a NotImplementedError. + 'u' + Do whatever it takes to actually log the specified logging record. + + This version is intended to be implemented by subclasses and so + raises a NotImplementedError. + 'b'emit must be implemented by Handler subclasses'u'emit must be implemented by Handler subclasses'b' + Conditionally emit the specified logging record. + + Emission depends on filters which may have been added to the handler. + Wrap the actual emission of the record with acquisition/release of + the I/O thread lock. Returns whether the filter passed the record for + emission. + 'u' + Conditionally emit the specified logging record. + + Emission depends on filters which may have been added to the handler. + Wrap the actual emission of the record with acquisition/release of + the I/O thread lock. Returns whether the filter passed the record for + emission. + 'b' + Set the formatter for this handler. + 'u' + Set the formatter for this handler. + 'b' + Ensure all logging output has been flushed. + + This version does nothing and is intended to be implemented by + subclasses. + 'u' + Ensure all logging output has been flushed. + + This version does nothing and is intended to be implemented by + subclasses. + 'b' + Tidy up any resources used by the handler. + + This version removes the handler from an internal map of handlers, + _handlers, which is used for handler lookup by name. Subclasses + should ensure that this gets called from overridden close() + methods. + 'u' + Tidy up any resources used by the handler. + + This version removes the handler from an internal map of handlers, + _handlers, which is used for handler lookup by name. Subclasses + should ensure that this gets called from overridden close() + methods. + 'b' + Handle errors which occur during an emit() call. + + This method should be called from handlers when an exception is + encountered during an emit() call. If raiseExceptions is false, + exceptions get silently ignored. This is what is mostly wanted + for a logging system - most users will not care about errors in + the logging system, they are more interested in application errors. + You could, however, replace this with a custom handler if you wish. + The record which was being processed is passed in to this method. + 'u' + Handle errors which occur during an emit() call. + + This method should be called from handlers when an exception is + encountered during an emit() call. If raiseExceptions is false, + exceptions get silently ignored. This is what is mostly wanted + for a logging system - most users will not care about errors in + the logging system, they are more interested in application errors. + You could, however, replace this with a custom handler if you wish. + The record which was being processed is passed in to this method. + 'b'--- Logging error --- +'u'--- Logging error --- +'b'Call stack: +'u'Call stack: +'b'Logged from file %s, line %s +'u'Logged from file %s, line %s +'b'Message: %r +Arguments: %s +'u'Message: %r +Arguments: %s +'b'Unable to print the message and arguments - possible formatting error. +Use the traceback above to help find the error. +'u'Unable to print the message and arguments - possible formatting error. +Use the traceback above to help find the error. +'b'<%s (%s)>'u'<%s (%s)>'b' + A handler class which writes logging records, appropriately formatted, + to a stream. Note that this class does not close the stream, as + sys.stdout or sys.stderr may be used. + 'u' + A handler class which writes logging records, appropriately formatted, + to a stream. Note that this class does not close the stream, as + sys.stdout or sys.stderr may be used. + 'b' + Initialize the handler. + + If stream is not specified, sys.stderr is used. + 'u' + Initialize the handler. + + If stream is not specified, sys.stderr is used. + 'b' + Flushes the stream. + 'u' + Flushes the stream. + 'b'flush'u'flush'b' + Emit a record. + + If a formatter is specified, it is used to format the record. + The record is then written to the stream with a trailing newline. If + exception information is present, it is formatted using + traceback.print_exception and appended to the stream. If the stream + has an 'encoding' attribute, it is used to determine how to do the + output to the stream. + 'u' + Emit a record. + + If a formatter is specified, it is used to format the record. + The record is then written to the stream with a trailing newline. If + exception information is present, it is formatted using + traceback.print_exception and appended to the stream. If the stream + has an 'encoding' attribute, it is used to determine how to do the + output to the stream. + 'b' + Sets the StreamHandler's stream to the specified value, + if it is different. + + Returns the old stream, if the stream was changed, or None + if it wasn't. + 'u' + Sets the StreamHandler's stream to the specified value, + if it is different. + + Returns the old stream, if the stream was changed, or None + if it wasn't. + 'b'<%s %s(%s)>'u'<%s %s(%s)>'b' + A handler class which writes formatted logging records to disk files. + 'u' + A handler class which writes formatted logging records to disk files. + 'b'a'u'a'b' + Open the specified file and use it as the stream for logging. + 'u' + Open the specified file and use it as the stream for logging. + 'b' + Closes the stream. + 'u' + Closes the stream. + 'b'close'u'close'b' + Open the current base file with the (original) mode and encoding. + Return the resulting stream. + 'u' + Open the current base file with the (original) mode and encoding. + Return the resulting stream. + 'b' + Emit a record. + + If the stream was not opened because 'delay' was specified in the + constructor, open it before calling the superclass's emit. + 'u' + Emit a record. + + If the stream was not opened because 'delay' was specified in the + constructor, open it before calling the superclass's emit. + 'b'<%s %s (%s)>'u'<%s %s (%s)>'b' + This class is like a StreamHandler using sys.stderr, but always uses + whatever sys.stderr is currently set to rather than the value of + sys.stderr at handler construction time. + 'u' + This class is like a StreamHandler using sys.stderr, but always uses + whatever sys.stderr is currently set to rather than the value of + sys.stderr at handler construction time. + 'b' + Initialize the handler. + 'u' + Initialize the handler. + 'b' + PlaceHolder instances are used in the Manager logger hierarchy to take + the place of nodes for which no loggers have been defined. This class is + intended for internal use only and not as part of the public API. + 'u' + PlaceHolder instances are used in the Manager logger hierarchy to take + the place of nodes for which no loggers have been defined. This class is + intended for internal use only and not as part of the public API. + 'b' + Initialize with the specified logger being a child of this placeholder. + 'u' + Initialize with the specified logger being a child of this placeholder. + 'b' + Add the specified logger as a child of this placeholder. + 'u' + Add the specified logger as a child of this placeholder. + 'b' + Set the class to be used when instantiating a logger. The class should + define __init__() such that only a name argument is required, and the + __init__() should call Logger.__init__() + 'u' + Set the class to be used when instantiating a logger. The class should + define __init__() such that only a name argument is required, and the + __init__() should call Logger.__init__() + 'b'logger not derived from logging.Logger: 'u'logger not derived from logging.Logger: 'b' + Return the class to be used when instantiating a logger. + 'u' + Return the class to be used when instantiating a logger. + 'b' + There is [under normal circumstances] just one Manager instance, which + holds the hierarchy of loggers. + 'u' + There is [under normal circumstances] just one Manager instance, which + holds the hierarchy of loggers. + 'b' + Initialize the manager with the root node of the logger hierarchy. + 'u' + Initialize the manager with the root node of the logger hierarchy. + 'b' + Get a logger with the specified name (channel name), creating it + if it doesn't yet exist. This name is a dot-separated hierarchical + name, such as "a", "a.b", "a.b.c" or similar. + + If a PlaceHolder existed for the specified name [i.e. the logger + didn't exist but a child of it did], replace it with the created + logger and fix up the parent/child references which pointed to the + placeholder to now point to the logger. + 'u' + Get a logger with the specified name (channel name), creating it + if it doesn't yet exist. This name is a dot-separated hierarchical + name, such as "a", "a.b", "a.b.c" or similar. + + If a PlaceHolder existed for the specified name [i.e. the logger + didn't exist but a child of it did], replace it with the created + logger and fix up the parent/child references which pointed to the + placeholder to now point to the logger. + 'b'A logger name must be a string'u'A logger name must be a string'b' + Set the class to be used when instantiating a logger with this Manager. + 'u' + Set the class to be used when instantiating a logger with this Manager. + 'b' + Set the factory to be used when instantiating a log record with this + Manager. + 'u' + Set the factory to be used when instantiating a log record with this + Manager. + 'b' + Ensure that there are either loggers or placeholders all the way + from the specified logger to the root of the logger hierarchy. + 'u' + Ensure that there are either loggers or placeholders all the way + from the specified logger to the root of the logger hierarchy. + 'b' + Ensure that children of the placeholder ph are connected to the + specified logger. + 'u' + Ensure that children of the placeholder ph are connected to the + specified logger. + 'b' + Clear the cache for all loggers in loggerDict + Called when level changes are made + 'u' + Clear the cache for all loggers in loggerDict + Called when level changes are made + 'b' + Instances of the Logger class represent a single logging channel. A + "logging channel" indicates an area of an application. Exactly how an + "area" is defined is up to the application developer. Since an + application can have any number of areas, logging channels are identified + by a unique string. Application areas can be nested (e.g. an area + of "input processing" might include sub-areas "read CSV files", "read + XLS files" and "read Gnumeric files"). To cater for this natural nesting, + channel names are organized into a namespace hierarchy where levels are + separated by periods, much like the Java or Python package namespace. So + in the instance given above, channel names might be "input" for the upper + level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. + There is no arbitrary limit to the depth of nesting. + 'u' + Instances of the Logger class represent a single logging channel. A + "logging channel" indicates an area of an application. Exactly how an + "area" is defined is up to the application developer. Since an + application can have any number of areas, logging channels are identified + by a unique string. Application areas can be nested (e.g. an area + of "input processing" might include sub-areas "read CSV files", "read + XLS files" and "read Gnumeric files"). To cater for this natural nesting, + channel names are organized into a namespace hierarchy where levels are + separated by periods, much like the Java or Python package namespace. So + in the instance given above, channel names might be "input" for the upper + level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. + There is no arbitrary limit to the depth of nesting. + 'b' + Initialize the logger with a name and an optional level. + 'u' + Initialize the logger with a name and an optional level. + 'b' + Set the logging level of this logger. level must be an int or a str. + 'u' + Set the logging level of this logger. level must be an int or a str. + 'b' + Log 'msg % args' with severity 'DEBUG'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) + 'u' + Log 'msg % args' with severity 'DEBUG'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) + 'b' + Log 'msg % args' with severity 'INFO'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.info("Houston, we have a %s", "interesting problem", exc_info=1) + 'u' + Log 'msg % args' with severity 'INFO'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.info("Houston, we have a %s", "interesting problem", exc_info=1) + 'b' + Log 'msg % args' with severity 'WARNING'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) + 'u' + Log 'msg % args' with severity 'WARNING'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) + 'b'The 'warn' method is deprecated, use 'warning' instead'u'The 'warn' method is deprecated, use 'warning' instead'b' + Log 'msg % args' with severity 'ERROR'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.error("Houston, we have a %s", "major problem", exc_info=1) + 'u' + Log 'msg % args' with severity 'ERROR'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.error("Houston, we have a %s", "major problem", exc_info=1) + 'b' + Convenience method for logging an ERROR with exception information. + 'u' + Convenience method for logging an ERROR with exception information. + 'b' + Log 'msg % args' with severity 'CRITICAL'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.critical("Houston, we have a %s", "major disaster", exc_info=1) + 'u' + Log 'msg % args' with severity 'CRITICAL'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.critical("Houston, we have a %s", "major disaster", exc_info=1) + 'b' + Log 'msg % args' with the integer severity 'level'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.log(level, "We have a %s", "mysterious problem", exc_info=1) + 'u' + Log 'msg % args' with the integer severity 'level'. + + To pass exception information, use the keyword argument exc_info with + a true value, e.g. + + logger.log(level, "We have a %s", "mysterious problem", exc_info=1) + 'b'level must be an integer'u'level must be an integer'b' + Find the stack frame of the caller so that we can note the source + file name, line number and function name. + 'u' + Find the stack frame of the caller so that we can note the source + file name, line number and function name. + 'b'(unknown file)'u'(unknown file)'b'(unknown function)'u'(unknown function)'b'f_code'u'f_code'b'Stack (most recent call last): +'u'Stack (most recent call last): +'b' + A factory method which can be overridden in subclasses to create + specialized LogRecords. + 'u' + A factory method which can be overridden in subclasses to create + specialized LogRecords. + 'b'asctime'u'asctime'b'Attempt to overwrite %r in LogRecord'u'Attempt to overwrite %r in LogRecord'b' + Low-level logging routine which creates a LogRecord and then calls + all the handlers of this logger to handle the record. + 'u' + Low-level logging routine which creates a LogRecord and then calls + all the handlers of this logger to handle the record. + 'b' + Call the handlers for the specified record. + + This method is used for unpickled records received from a socket, as + well as those created locally. Logger-level filtering is applied. + 'u' + Call the handlers for the specified record. + + This method is used for unpickled records received from a socket, as + well as those created locally. Logger-level filtering is applied. + 'b' + Add the specified handler to this logger. + 'u' + Add the specified handler to this logger. + 'b' + Remove the specified handler from this logger. + 'u' + Remove the specified handler from this logger. + 'b' + See if this logger has any handlers configured. + + Loop through all handlers for this logger and its parents in the + logger hierarchy. Return True if a handler was found, else False. + Stop searching up the hierarchy whenever a logger with the "propagate" + attribute set to zero is found - that will be the last logger which + is checked for the existence of handlers. + 'u' + See if this logger has any handlers configured. + + Loop through all handlers for this logger and its parents in the + logger hierarchy. Return True if a handler was found, else False. + Stop searching up the hierarchy whenever a logger with the "propagate" + attribute set to zero is found - that will be the last logger which + is checked for the existence of handlers. + 'b' + Pass a record to all relevant handlers. + + Loop through all handlers for this logger and its parents in the + logger hierarchy. If no handler was found, output a one-off error + message to sys.stderr. Stop searching up the hierarchy whenever a + logger with the "propagate" attribute set to zero is found - that + will be the last logger whose handlers are called. + 'u' + Pass a record to all relevant handlers. + + Loop through all handlers for this logger and its parents in the + logger hierarchy. If no handler was found, output a one-off error + message to sys.stderr. Stop searching up the hierarchy whenever a + logger with the "propagate" attribute set to zero is found - that + will be the last logger whose handlers are called. + 'b'No handlers could be found for logger "%s" +'u'No handlers could be found for logger "%s" +'b' + Get the effective level for this logger. + + Loop through this logger and its parents in the logger hierarchy, + looking for a non-zero logging level. Return the first one found. + 'u' + Get the effective level for this logger. + + Loop through this logger and its parents in the logger hierarchy, + looking for a non-zero logging level. Return the first one found. + 'b' + Is this logger enabled for level 'level'? + 'u' + Is this logger enabled for level 'level'? + 'b' + Get a logger which is a descendant to this one. + + This is a convenience method, such that + + logging.getLogger('abc').getChild('def.ghi') + + is the same as + + logging.getLogger('abc.def.ghi') + + It's useful, for example, when the parent logger is named using + __name__ rather than a literal string. + 'u' + Get a logger which is a descendant to this one. + + This is a convenience method, such that + + logging.getLogger('abc').getChild('def.ghi') + + is the same as + + logging.getLogger('abc.def.ghi') + + It's useful, for example, when the parent logger is named using + __name__ rather than a literal string. + 'b'logger cannot be pickled'u'logger cannot be pickled'b' + A root logger is not that different to any other logger, except that + it must have a logging level and there is only one instance of it in + the hierarchy. + 'u' + A root logger is not that different to any other logger, except that + it must have a logging level and there is only one instance of it in + the hierarchy. + 'b' + Initialize the logger with the name "root". + 'u' + Initialize the logger with the name "root". + 'b'root'u'root'b' + An adapter for loggers which makes it easier to specify contextual + information in logging output. + 'u' + An adapter for loggers which makes it easier to specify contextual + information in logging output. + 'b' + Initialize the adapter with a logger and a dict-like object which + provides contextual information. This constructor signature allows + easy stacking of LoggerAdapters, if so desired. + + You can effectively pass keyword arguments as shown in the + following example: + + adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) + 'u' + Initialize the adapter with a logger and a dict-like object which + provides contextual information. This constructor signature allows + easy stacking of LoggerAdapters, if so desired. + + You can effectively pass keyword arguments as shown in the + following example: + + adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) + 'b' + Process the logging message and keyword arguments passed in to + a logging call to insert contextual information. You can either + manipulate the message itself, the keyword args or both. Return + the message and kwargs modified (or not) to suit your needs. + + Normally, you'll only need to override this one method in a + LoggerAdapter subclass for your specific needs. + 'u' + Process the logging message and keyword arguments passed in to + a logging call to insert contextual information. You can either + manipulate the message itself, the keyword args or both. Return + the message and kwargs modified (or not) to suit your needs. + + Normally, you'll only need to override this one method in a + LoggerAdapter subclass for your specific needs. + 'b'extra'u'extra'b' + Delegate a debug call to the underlying logger. + 'u' + Delegate a debug call to the underlying logger. + 'b' + Delegate an info call to the underlying logger. + 'u' + Delegate an info call to the underlying logger. + 'b' + Delegate a warning call to the underlying logger. + 'u' + Delegate a warning call to the underlying logger. + 'b' + Delegate an error call to the underlying logger. + 'u' + Delegate an error call to the underlying logger. + 'b' + Delegate an exception call to the underlying logger. + 'u' + Delegate an exception call to the underlying logger. + 'b' + Delegate a critical call to the underlying logger. + 'u' + Delegate a critical call to the underlying logger. + 'b' + Delegate a log call to the underlying logger, after adding + contextual information from this adapter instance. + 'u' + Delegate a log call to the underlying logger, after adding + contextual information from this adapter instance. + 'b' + Set the specified level on the underlying logger. + 'u' + Set the specified level on the underlying logger. + 'b' + Get the effective level for the underlying logger. + 'u' + Get the effective level for the underlying logger. + 'b' + See if the underlying logger has any handlers. + 'u' + See if the underlying logger has any handlers. + 'b' + Low-level log implementation, proxied to allow nested logger adapters. + 'u' + Low-level log implementation, proxied to allow nested logger adapters. + 'b' + Do basic configuration for the logging system. + + This function does nothing if the root logger already has handlers + configured, unless the keyword argument *force* is set to ``True``. + It is a convenience method intended for use by simple scripts + to do one-shot configuration of the logging package. + + The default behaviour is to create a StreamHandler which writes to + sys.stderr, set a formatter using the BASIC_FORMAT format string, and + add the handler to the root logger. + + A number of optional keyword arguments may be specified, which can alter + the default behaviour. + + filename Specifies that a FileHandler be created, using the specified + filename, rather than a StreamHandler. + filemode Specifies the mode to open the file, if filename is specified + (if filemode is unspecified, it defaults to 'a'). + format Use the specified format string for the handler. + datefmt Use the specified date/time format. + style If a format string is specified, use this to specify the + type of format string (possible values '%', '{', '$', for + %-formatting, :meth:`str.format` and :class:`string.Template` + - defaults to '%'). + level Set the root logger level to the specified level. + stream Use the specified stream to initialize the StreamHandler. Note + that this argument is incompatible with 'filename' - if both + are present, 'stream' is ignored. + handlers If specified, this should be an iterable of already created + handlers, which will be added to the root handler. Any handler + in the list which does not have a formatter assigned will be + assigned the formatter created in this function. + force If this keyword is specified as true, any existing handlers + attached to the root logger are removed and closed, before + carrying out the configuration as specified by the other + arguments. + Note that you could specify a stream created using open(filename, mode) + rather than passing the filename and mode in. However, it should be + remembered that StreamHandler does not close its stream (since it may be + using sys.stdout or sys.stderr), whereas FileHandler closes its stream + when the handler is closed. + + .. versionchanged:: 3.8 + Added the ``force`` parameter. + + .. versionchanged:: 3.2 + Added the ``style`` parameter. + + .. versionchanged:: 3.3 + Added the ``handlers`` parameter. A ``ValueError`` is now thrown for + incompatible arguments (e.g. ``handlers`` specified together with + ``filename``/``filemode``, or ``filename``/``filemode`` specified + together with ``stream``, or ``handlers`` specified together with + ``stream``. + 'u' + Do basic configuration for the logging system. + + This function does nothing if the root logger already has handlers + configured, unless the keyword argument *force* is set to ``True``. + It is a convenience method intended for use by simple scripts + to do one-shot configuration of the logging package. + + The default behaviour is to create a StreamHandler which writes to + sys.stderr, set a formatter using the BASIC_FORMAT format string, and + add the handler to the root logger. + + A number of optional keyword arguments may be specified, which can alter + the default behaviour. + + filename Specifies that a FileHandler be created, using the specified + filename, rather than a StreamHandler. + filemode Specifies the mode to open the file, if filename is specified + (if filemode is unspecified, it defaults to 'a'). + format Use the specified format string for the handler. + datefmt Use the specified date/time format. + style If a format string is specified, use this to specify the + type of format string (possible values '%', '{', '$', for + %-formatting, :meth:`str.format` and :class:`string.Template` + - defaults to '%'). + level Set the root logger level to the specified level. + stream Use the specified stream to initialize the StreamHandler. Note + that this argument is incompatible with 'filename' - if both + are present, 'stream' is ignored. + handlers If specified, this should be an iterable of already created + handlers, which will be added to the root handler. Any handler + in the list which does not have a formatter assigned will be + assigned the formatter created in this function. + force If this keyword is specified as true, any existing handlers + attached to the root logger are removed and closed, before + carrying out the configuration as specified by the other + arguments. + Note that you could specify a stream created using open(filename, mode) + rather than passing the filename and mode in. However, it should be + remembered that StreamHandler does not close its stream (since it may be + using sys.stdout or sys.stderr), whereas FileHandler closes its stream + when the handler is closed. + + .. versionchanged:: 3.8 + Added the ``force`` parameter. + + .. versionchanged:: 3.2 + Added the ``style`` parameter. + + .. versionchanged:: 3.3 + Added the ``handlers`` parameter. A ``ValueError`` is now thrown for + incompatible arguments (e.g. ``handlers`` specified together with + ``filename``/``filemode``, or ``filename``/``filemode`` specified + together with ``stream``, or ``handlers`` specified together with + ``stream``. + 'b'force'u'force'b'handlers'u'handlers'b'stream'u'stream'b'filename'u'filename'b''stream' and 'filename' should not be specified together'u''stream' and 'filename' should not be specified together'b''stream' or 'filename' should not be specified together with 'handlers''u''stream' or 'filename' should not be specified together with 'handlers''b'filemode'u'filemode'b'datefmt'u'datefmt'b'format'u'format'b'level'u'level'b', 'u', 'b'Unrecognised argument(s): %s'u'Unrecognised argument(s): %s'b' + Return a logger with the specified name, creating it if necessary. + + If no name is specified, return the root logger. + 'u' + Return a logger with the specified name, creating it if necessary. + + If no name is specified, return the root logger. + 'b' + Log a message with severity 'CRITICAL' on the root logger. If the logger + has no handlers, call basicConfig() to add a console handler with a + pre-defined format. + 'u' + Log a message with severity 'CRITICAL' on the root logger. If the logger + has no handlers, call basicConfig() to add a console handler with a + pre-defined format. + 'b' + Log a message with severity 'ERROR' on the root logger. If the logger has + no handlers, call basicConfig() to add a console handler with a pre-defined + format. + 'u' + Log a message with severity 'ERROR' on the root logger. If the logger has + no handlers, call basicConfig() to add a console handler with a pre-defined + format. + 'b' + Log a message with severity 'ERROR' on the root logger, with exception + information. If the logger has no handlers, basicConfig() is called to add + a console handler with a pre-defined format. + 'u' + Log a message with severity 'ERROR' on the root logger, with exception + information. If the logger has no handlers, basicConfig() is called to add + a console handler with a pre-defined format. + 'b' + Log a message with severity 'WARNING' on the root logger. If the logger has + no handlers, call basicConfig() to add a console handler with a pre-defined + format. + 'u' + Log a message with severity 'WARNING' on the root logger. If the logger has + no handlers, call basicConfig() to add a console handler with a pre-defined + format. + 'b'The 'warn' function is deprecated, use 'warning' instead'u'The 'warn' function is deprecated, use 'warning' instead'b' + Log a message with severity 'INFO' on the root logger. If the logger has + no handlers, call basicConfig() to add a console handler with a pre-defined + format. + 'u' + Log a message with severity 'INFO' on the root logger. If the logger has + no handlers, call basicConfig() to add a console handler with a pre-defined + format. + 'b' + Log a message with severity 'DEBUG' on the root logger. If the logger has + no handlers, call basicConfig() to add a console handler with a pre-defined + format. + 'u' + Log a message with severity 'DEBUG' on the root logger. If the logger has + no handlers, call basicConfig() to add a console handler with a pre-defined + format. + 'b' + Log 'msg % args' with the integer severity 'level' on the root logger. If + the logger has no handlers, call basicConfig() to add a console handler + with a pre-defined format. + 'u' + Log 'msg % args' with the integer severity 'level' on the root logger. If + the logger has no handlers, call basicConfig() to add a console handler + with a pre-defined format. + 'b' + Disable all logging calls of severity 'level' and below. + 'u' + Disable all logging calls of severity 'level' and below. + 'b' + Perform any cleanup actions in the logging system (e.g. flushing + buffers). + + Should be called at application exit. + 'u' + Perform any cleanup actions in the logging system (e.g. flushing + buffers). + + Should be called at application exit. + 'b' + This handler does nothing. It's intended to be used to avoid the + "No handlers could be found for logger XXX" one-off warning. This is + important for library code, which may contain code to log events. If a user + of the library does not configure logging, the one-off warning might be + produced; to avoid this, the library developer simply needs to instantiate + a NullHandler and add it to the top-level logger of the library module or + package. + 'u' + This handler does nothing. It's intended to be used to avoid the + "No handlers could be found for logger XXX" one-off warning. This is + important for library code, which may contain code to log events. If a user + of the library does not configure logging, the one-off warning might be + produced; to avoid this, the library developer simply needs to instantiate + a NullHandler and add it to the top-level logger of the library module or + package. + 'b'Stub.'u'Stub.'b' + Implementation of showwarnings which redirects to logging, which will first + check to see if the file parameter is None. If a file is specified, it will + delegate to the original warnings implementation of showwarning. Otherwise, + it will call warnings.formatwarning and will log the resulting string to a + warnings logger named "py.warnings" with level logging.WARNING. + 'u' + Implementation of showwarnings which redirects to logging, which will first + check to see if the file parameter is None. If a file is specified, it will + delegate to the original warnings implementation of showwarning. Otherwise, + it will call warnings.formatwarning and will log the resulting string to a + warnings logger named "py.warnings" with level logging.WARNING. + 'b'py.warnings'u'py.warnings'b' + If capture is true, redirect all warnings to the logging package. + If capture is False, ensure that warnings are not redirected to logging + but to their original destinations. + 'u' + If capture is true, redirect all warnings to the logging package. + If capture is False, ensure that warnings are not redirected to logging + but to their original destinations. + 'u'logging.__init__'u'logging'Python interfaces to XML parsers. + +This package contains one module: + +expat -- Python wrapper for James Clark's Expat parser, with namespace + support. + +b'Python interfaces to XML parsers. + +This package contains one module: + +expat -- Python wrapper for James Clark's Expat parser, with namespace + support. + +'u'Python interfaces to XML parsers. + +This package contains one module: + +expat -- Python wrapper for James Clark's Expat parser, with namespace + support. + +'u'xml.parsers.__init__'u'xml.parsers'u'parsers.__init__'u'parsers'This module implements specialized container datatypes providing +alternatives to Python's general purpose built-in containers, dict, +list, set, and tuple. + +* namedtuple factory function for creating tuple subclasses with named fields +* deque list-like container with fast appends and pops on either end +* ChainMap dict-like class for creating a single view of multiple mappings +* Counter dict subclass for counting hashable objects +* OrderedDict dict subclass that remembers the order entries were added +* defaultdict dict subclass that calls a factory function to supply missing values +* UserDict wrapper around dictionary objects for easier dict subclassing +* UserList wrapper around list objects for easier list subclassing +* UserString wrapper around string objects for easier string subclassing + +'''defaultdictnamedtupleUserDictUserListUserStringCounterOrderedDictChainMap_collections_abcoperatoritemgetter_itemgettereq_eqkeywordiskeyword_iskeyword_sysheapq_heapq_weakref_proxyitertoolsrepeat_repeatchain_chainstarmap_starmapreprlibrecursive_repr_recursive_repr_collectionsMutableSequenceUsing or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3, and in 3.10 it will stop working"Using or importing the ABCs from 'collections' instead ""of from 'collections.abc' is deprecated since Python 3.3, ""and in 3.10 it will stop working"_OrderedDictKeysViewKeysView_mapping_OrderedDictItemsViewItemsView_OrderedDictValuesViewValuesView_Linkprev__slots__Dictionary that remembers insertion orderkwdsInitialize an ordered dictionary. The signature is the same as + regular dictionaries. Keyword argument order is preserved. + __root__hardroot__map__updatedict_setitemLinkod.__setitem__(i, y) <==> od[i]=ydict_delitemod.__delitem__(y) <==> del od[y]link_prevlink_nextod.__iter__() <==> iter(od)currod.__reversed__() <==> reversed(od)od.clear() -> None. Remove all items from od.Remove and return a (key, value) pair from the dictionary. + + Pairs are returned in LIFO order if last is true or FIFO order if false. + dictionary is emptymove_to_endMove an existing element to the end (or beginning if last is false). + + Raise KeyError if the element does not exist. + soft_linksizeofMutableMappingD.keys() -> a set-like object providing a view on D's keysD.items() -> a set-like object providing a view on D's itemsD.values() -> an object providing a view on D's values__markerod.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + Insert key with a value of default if key is not in the dictionary. + + Return the value for key if key is in the dictionary, else default. + od.__repr__() <==> repr(od)%s()%s(%r)Return state information for picklinginst_dictod.copy() -> a shallow copy of oditerableCreate a new ordered dictionary with keys from iterable and values set to value. + od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + _tuplegetterdoctypenamefield_namesrenamedefaultsReturns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', ['x', 'y']) + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessible by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + seenType names and field names must be stringsType names and field names must be valid identifiers: 'Type names and field names must be valid ''identifiers: 'Type names and field names cannot be a keyword: 'Type names and field names cannot be a ''keyword: 'Field names cannot start with an underscore: 'Field names cannot start with an underscore: '''Encountered duplicate field name: field_defaultsGot more default values than field namesnum_fieldsarg_list(=%r)repr_fmttuple_new_dict_tuple_len_map_zipdef __new__(_cls, ): return _tuple_new(_cls, ())_tuple_newnamedtuple_namespaceCreate new instance of _makeExpected arguments, got Make a new object from a sequence or iterable' object from a sequence ''or iterable'_replaceGot unexpected field names: Return a new object replacing specified fields with new values' object replacing specified ''fields with new values'Return a nicely formatted representation string_asdictReturn a new dict which maps field names to their values._fieldsReturn self as a plain tuple. Used by copy and pickle._field_defaults_fields_defaultsclass_namespaceAlias for field number f_globals_count_elementsmappingTally elements from the iterable.mapping_getDict subclass for counting hashable items. Sometimes called a bag + or multiset. Elements are stored as dictionary keys and their counts + are stored as dictionary values. + + >>> c = Counter('abcdeabcdabcaba') # count elements from a string + + >>> c.most_common(3) # three most common elements + [('a', 5), ('b', 4), ('c', 3)] + >>> sorted(c) # list all unique elements + ['a', 'b', 'c', 'd', 'e'] + >>> ''.join(sorted(c.elements())) # list elements with repetitions + 'aaaaabbbbcccdde' + >>> sum(c.values()) # total of all counts + 15 + + >>> c['a'] # count of letter 'a' + 5 + >>> for elem in 'shazam': # update counts from an iterable + ... c[elem] += 1 # by adding 1 to each element's count + >>> c['a'] # now there are seven 'a' + 7 + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' + 0 + + >>> d = Counter('simsalabim') # make another counter + >>> c.update(d) # add in the second counter + >>> c['a'] # now there are nine 'a' + 9 + + >>> c.clear() # empty the counter + >>> c + Counter() + + Note: If a count is set to zero or reduced to zero, it will remain + in the counter until the entry is deleted or the counter is cleared: + + >>> c = Counter('aaabbc') + >>> c['b'] -= 2 # reduce the count of 'b' by two + >>> c.most_common() # 'b' is still in, but its count is zero + [('a', 3), ('c', 1), ('b', 0)] + + Create a new, empty Counter object. And if given, count elements + from an input iterable. Or, initialize the count from another mapping + of elements to their counts. + + >>> c = Counter() # a new, empty counter + >>> c = Counter('gallahad') # a new counter from an iterable + >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping + >>> c = Counter(a=4, b=2) # a new counter from keyword args + + __missing__The count of elements not in the Counter is zero.most_commonList the n most common elements and their counts from the most + common to the least. If n is None, then list all element counts. + + >>> Counter('abracadabra').most_common(3) + [('a', 5), ('b', 2), ('r', 2)] + + nlargestIterator over elements repeating each as many times as its count. + + >>> c = Counter('ABCABC') + >>> sorted(c.elements()) + ['A', 'A', 'B', 'B', 'C', 'C'] + + # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 + >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) + >>> product = 1 + >>> for factor in prime_factors.elements(): # loop over factors + ... product *= factor # and multiply them + >>> product + 1836 + + Note, if an element's count has been set to zero or is a negative + number, elements() will ignore it. + + from_iterableCounter.fromkeys() is undefined. Use Counter(iterable) instead.Like dict.update() but add counts instead of replacing them. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.update('witch') # add elements from another iterable + >>> d = Counter('watch') + >>> c.update(d) # add elements from another counter + >>> c['h'] # four 'h' in which, witch, and watch + 4 + + self_getsubtractLike dict.update() but subtracts counts instead of replacing them. + Counts can be reduced below zero. Both the inputs and outputs are + allowed to contain zero and negative counts. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.subtract('witch') # subtract elements from another iterable + >>> c.subtract(Counter('watch')) # subtract elements from another counter + >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch + 0 + >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch + -1 + + Return a shallow copy.Like dict.__delitem__() but does not raise KeyError for missing values.%r: %r%s({%s}){0}({1!r})Add counts from two counters. + + >>> Counter('abbb') + Counter('bcc') + Counter({'b': 4, 'c': 2, 'a': 1}) + + newcount Subtract count, but keep only results with positive counts. + + >>> Counter('abbbc') - Counter('bccd') + Counter({'b': 2, 'a': 1}) + + Union is the maximum of value in either of the input counters. + + >>> Counter('abbb') | Counter('bcc') + Counter({'b': 3, 'c': 2, 'a': 1}) + + other_count Intersection is the minimum of corresponding counts. + + >>> Counter('abbb') & Counter('bcc') + Counter({'b': 1}) + + Adds an empty counter, effectively stripping negative and zero countsSubtracts from an empty counter. Strips positive and zero counts, + and flips the sign on negative counts. + + _keep_positiveInternal method to strip elements with a negative or zero countnonpositiveInplace add from another counter, keeping only positive counts. + + >>> c = Counter('abbb') + >>> c += Counter('bcc') + >>> c + Counter({'b': 4, 'c': 2, 'a': 1}) + + Inplace subtract counter, but keep only results with positive counts. + + >>> c = Counter('abbbc') + >>> c -= Counter('bccd') + >>> c + Counter({'b': 2, 'a': 1}) + + Inplace union is the maximum of value from either counter. + + >>> c = Counter('abbb') + >>> c |= Counter('bcc') + >>> c + Counter({'b': 3, 'c': 2, 'a': 1}) + + Inplace intersection is the minimum of corresponding counts. + + >>> c = Counter('abbb') + >>> c &= Counter('bcc') + >>> c + Counter({'b': 1}) + + A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + be accessed or updated using the *maps* attribute. There is no other + state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + mapsInitialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + Create a ChainMap with a single dict created from the iterable.New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]__copy__new_childNew ChainMap with a new map followed by all previous maps. + If no map is provided, an empty dict is used. + parentsNew ChainMap from maps[1:].Key not found in the first mapping: {!r}Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.No keys found in the first mapping.Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].Clear maps[0], leaving maps[1:] intact.descriptor '__init__' of 'UserDict' object needs an argument"descriptor '__init__' of 'UserDict' object ""needs an argument"expected at most 1 arguments, got %dPassing 'dict' as keyword argument is deprecated($self, dict=None, /, **kwargs)instA more or less complete user-defined wrapper around list objects.initlist__castSequence__complex__templatestricttabsizesepoldnewmaxsplitkeepends# For backwards compatibility, continue to make the collections ABCs# through Python 3.6 available through the collections module.# Note, no new collections ABCs were added in Python 3.7################################################################################### OrderedDict# An inherited dict maps keys to values.# The inherited dict provides __getitem__, __len__, __contains__, and get.# The remaining methods are order-aware.# Big-O running times for all methods are the same as regular dictionaries.# The internal self.__map dict maps keys to links in a doubly linked list.# The circular doubly linked list starts and ends with a sentinel element.# The sentinel element never gets deleted (this simplifies the algorithm).# The sentinel is in self.__hardroot with a weakref proxy in self.__root.# The prev links are weakref proxies (to prevent circular references).# Individual links are kept alive by the hard reference in self.__map.# Those hard references disappear when a key is deleted from an OrderedDict.# Setting a new item creates a new link at the end of the linked list,# and the inherited dictionary is updated with the new key/value pair.# Deleting an existing item uses self.__map to find the link which gets# removed by updating the links in the predecessor and successor nodes.# Traverse the linked list in order.# Traverse the linked list in reverse order.# number of links including root# instance dictionary# internal dict and inherited dict# link objects# proxy objects# Leave the pure Python version in place.### namedtuple# Validate the field names. At the user's option, either generate an error# message or automatically replace the field name with a valid name.# Variables used in the methods and docstrings# Create all the named tuple methods to be added to the class namespace# Note: exec() has the side-effect of interning the field names# Modify function metadata to help with introspection and debugging# Build-up the class namespace dictionary# and use type() to build the result class# alternate spelling for backward compatibility# For pickling to work, the __module__ variable needs to be set to the frame# where the named tuple is created. Bypass this step in environments where# sys._getframe is not defined (Jython for example) or sys._getframe is not# defined for arguments greater than 0 (IronPython), or where the user has# specified a particular module.### Counter# Load C helper function if available# References:# http://en.wikipedia.org/wiki/Multiset# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm# http://code.activestate.com/recipes/259174/# Knuth, TAOCP Vol. II section 4.6.3# Needed so that self[missing_item] does not raise KeyError# Emulate Bag.sortedByCount from Smalltalk# Emulate Bag.do from Smalltalk and Multiset.begin from C++.# Override dict methods where necessary# There is no equivalent method for counters because the semantics# would be ambiguous in cases such as Counter.fromkeys('aaabbc', v=2).# Initializing counters to zero values isn't necessary because zero# is already the default value for counter lookups. Initializing# to one is easily accomplished with Counter(set(iterable)). For# more exotic cases, create a dictionary first using a dictionary# comprehension or dict.fromkeys().# The regular dict.update() operation makes no sense here because the# replace behavior results in the some of original untouched counts# being mixed-in with all of the other counts for a mismash that# doesn't have a straight-forward interpretation in most counting# contexts. Instead, we implement straight-addition. Both the inputs# and outputs are allowed to contain zero and negative counts.# fast path when counter is empty# handle case where values are not orderable# Multiset-style mathematical operations discussed in:# Knuth TAOCP Volume II section 4.6.3 exercise 19# and at http://en.wikipedia.org/wiki/Multiset# Outputs guaranteed to only include positive counts.# To strip negative and zero counts, add-in an empty counter:# c += Counter()# Rich comparison operators for multiset subset and superset tests# are deliberately omitted due to semantic conflicts with the# existing inherited dict equality method. Subset and superset# semantics ignore zero counts and require that p≤q ∧ p≥q → p=q;# however, that would not be the case for p=Counter(a=1, b=0)# and q=Counter(a=1) where the dictionaries are not equal.### ChainMap# always at least one map# can't use 'key in mapping' with defaultdict# support subclasses that define __missing__# reuses stored hash values if possible# like Django's Context.push()# like Django's Context.pop()### UserDict# Start by filling-out the abstract methods# Modify __contains__ to work correctly when __missing__ is present# Now, add the methods in dicts but not in MutableMapping# Create a copy and avoid triggering descriptors### UserList# XXX should this accept an arbitrary sequence?### UserString# the following methods are defined in alphabetical order:b'This module implements specialized container datatypes providing +alternatives to Python's general purpose built-in containers, dict, +list, set, and tuple. + +* namedtuple factory function for creating tuple subclasses with named fields +* deque list-like container with fast appends and pops on either end +* ChainMap dict-like class for creating a single view of multiple mappings +* Counter dict subclass for counting hashable objects +* OrderedDict dict subclass that remembers the order entries were added +* defaultdict dict subclass that calls a factory function to supply missing values +* UserDict wrapper around dictionary objects for easier dict subclassing +* UserList wrapper around list objects for easier list subclassing +* UserString wrapper around string objects for easier string subclassing + +'u'This module implements specialized container datatypes providing +alternatives to Python's general purpose built-in containers, dict, +list, set, and tuple. + +* namedtuple factory function for creating tuple subclasses with named fields +* deque list-like container with fast appends and pops on either end +* ChainMap dict-like class for creating a single view of multiple mappings +* Counter dict subclass for counting hashable objects +* OrderedDict dict subclass that remembers the order entries were added +* defaultdict dict subclass that calls a factory function to supply missing values +* UserDict wrapper around dictionary objects for easier dict subclassing +* UserList wrapper around list objects for easier list subclassing +* UserString wrapper around string objects for easier string subclassing + +'b'deque'u'deque'b'defaultdict'u'defaultdict'b'namedtuple'u'namedtuple'b'UserDict'u'UserDict'b'UserList'u'UserList'b'UserString'u'UserString'b'Counter'u'Counter'b'OrderedDict'u'OrderedDict'b'ChainMap'u'ChainMap'b'Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3, and in 3.10 it will stop working'u'Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3, and in 3.10 it will stop working'b'prev'u'prev'b'key'u'key'b'__weakref__'u'__weakref__'b'Dictionary that remembers insertion order'u'Dictionary that remembers insertion order'b'Initialize an ordered dictionary. The signature is the same as + regular dictionaries. Keyword argument order is preserved. + 'u'Initialize an ordered dictionary. The signature is the same as + regular dictionaries. Keyword argument order is preserved. + 'b'od.__setitem__(i, y) <==> od[i]=y'u'od.__setitem__(i, y) <==> od[i]=y'b'od.__delitem__(y) <==> del od[y]'u'od.__delitem__(y) <==> del od[y]'b'od.__iter__() <==> iter(od)'u'od.__iter__() <==> iter(od)'b'od.__reversed__() <==> reversed(od)'u'od.__reversed__() <==> reversed(od)'b'od.clear() -> None. Remove all items from od.'u'od.clear() -> None. Remove all items from od.'b'Remove and return a (key, value) pair from the dictionary. + + Pairs are returned in LIFO order if last is true or FIFO order if false. + 'u'Remove and return a (key, value) pair from the dictionary. + + Pairs are returned in LIFO order if last is true or FIFO order if false. + 'b'dictionary is empty'u'dictionary is empty'b'Move an existing element to the end (or beginning if last is false). + + Raise KeyError if the element does not exist. + 'u'Move an existing element to the end (or beginning if last is false). + + Raise KeyError if the element does not exist. + 'b'D.keys() -> a set-like object providing a view on D's keys'u'D.keys() -> a set-like object providing a view on D's keys'b'D.items() -> a set-like object providing a view on D's items'u'D.items() -> a set-like object providing a view on D's items'b'D.values() -> an object providing a view on D's values'u'D.values() -> an object providing a view on D's values'b'od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + 'u'od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + 'b'Insert key with a value of default if key is not in the dictionary. + + Return the value for key if key is in the dictionary, else default. + 'u'Insert key with a value of default if key is not in the dictionary. + + Return the value for key if key is in the dictionary, else default. + 'b'od.__repr__() <==> repr(od)'u'od.__repr__() <==> repr(od)'b'%s()'u'%s()'b'%s(%r)'u'%s(%r)'b'Return state information for pickling'u'Return state information for pickling'b'od.copy() -> a shallow copy of od'u'od.copy() -> a shallow copy of od'b'Create a new ordered dictionary with keys from iterable and values set to value. + 'u'Create a new ordered dictionary with keys from iterable and values set to value. + 'b'od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + 'u'od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + 'b'Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', ['x', 'y']) + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessible by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + 'u'Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', ['x', 'y']) + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessible by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + 'b'Type names and field names must be strings'u'Type names and field names must be strings'b'Type names and field names must be valid identifiers: 'u'Type names and field names must be valid identifiers: 'b'Type names and field names cannot be a keyword: 'u'Type names and field names cannot be a keyword: 'b'Field names cannot start with an underscore: 'u'Field names cannot start with an underscore: 'b'Encountered duplicate field name: 'u'Encountered duplicate field name: 'b'Got more default values than field names'u'Got more default values than field names'b'('u'('b'=%r'u'=%r'b')'u')'b'def __new__(_cls, 'u'def __new__(_cls, 'b'): return _tuple_new(_cls, ('u'): return _tuple_new(_cls, ('b'))'u'))'b'_tuple_new'u'_tuple_new'b'namedtuple_'u'namedtuple_'b'__name__'u'__name__'b'__new__'u'__new__'b'Create new instance of 'u'Create new instance of 'b'Expected 'u'Expected 'b' arguments, got 'u' arguments, got 'b'Make a new 'u'Make a new 'b' object from a sequence or iterable'u' object from a sequence or iterable'b'Got unexpected field names: 'u'Got unexpected field names: 'b'Return a new 'u'Return a new 'b' object replacing specified fields with new values'u' object replacing specified fields with new values'b'Return a nicely formatted representation string'u'Return a nicely formatted representation string'b'Return a new dict which maps field names to their values.'u'Return a new dict which maps field names to their values.'b'Return self as a plain tuple. Used by copy and pickle.'u'Return self as a plain tuple. Used by copy and pickle.'b'__slots__'u'__slots__'b'_fields'u'_fields'b'_field_defaults'u'_field_defaults'b'_fields_defaults'u'_fields_defaults'b'_make'u'_make'b'_replace'u'_replace'b'__repr__'u'__repr__'b'_asdict'u'_asdict'b'__getnewargs__'u'__getnewargs__'b'Alias for field number 'u'Alias for field number 'b'Tally elements from the iterable.'u'Tally elements from the iterable.'b'Dict subclass for counting hashable items. Sometimes called a bag + or multiset. Elements are stored as dictionary keys and their counts + are stored as dictionary values. + + >>> c = Counter('abcdeabcdabcaba') # count elements from a string + + >>> c.most_common(3) # three most common elements + [('a', 5), ('b', 4), ('c', 3)] + >>> sorted(c) # list all unique elements + ['a', 'b', 'c', 'd', 'e'] + >>> ''.join(sorted(c.elements())) # list elements with repetitions + 'aaaaabbbbcccdde' + >>> sum(c.values()) # total of all counts + 15 + + >>> c['a'] # count of letter 'a' + 5 + >>> for elem in 'shazam': # update counts from an iterable + ... c[elem] += 1 # by adding 1 to each element's count + >>> c['a'] # now there are seven 'a' + 7 + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' + 0 + + >>> d = Counter('simsalabim') # make another counter + >>> c.update(d) # add in the second counter + >>> c['a'] # now there are nine 'a' + 9 + + >>> c.clear() # empty the counter + >>> c + Counter() + + Note: If a count is set to zero or reduced to zero, it will remain + in the counter until the entry is deleted or the counter is cleared: + + >>> c = Counter('aaabbc') + >>> c['b'] -= 2 # reduce the count of 'b' by two + >>> c.most_common() # 'b' is still in, but its count is zero + [('a', 3), ('c', 1), ('b', 0)] + + 'u'Dict subclass for counting hashable items. Sometimes called a bag + or multiset. Elements are stored as dictionary keys and their counts + are stored as dictionary values. + + >>> c = Counter('abcdeabcdabcaba') # count elements from a string + + >>> c.most_common(3) # three most common elements + [('a', 5), ('b', 4), ('c', 3)] + >>> sorted(c) # list all unique elements + ['a', 'b', 'c', 'd', 'e'] + >>> ''.join(sorted(c.elements())) # list elements with repetitions + 'aaaaabbbbcccdde' + >>> sum(c.values()) # total of all counts + 15 + + >>> c['a'] # count of letter 'a' + 5 + >>> for elem in 'shazam': # update counts from an iterable + ... c[elem] += 1 # by adding 1 to each element's count + >>> c['a'] # now there are seven 'a' + 7 + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' + 0 + + >>> d = Counter('simsalabim') # make another counter + >>> c.update(d) # add in the second counter + >>> c['a'] # now there are nine 'a' + 9 + + >>> c.clear() # empty the counter + >>> c + Counter() + + Note: If a count is set to zero or reduced to zero, it will remain + in the counter until the entry is deleted or the counter is cleared: + + >>> c = Counter('aaabbc') + >>> c['b'] -= 2 # reduce the count of 'b' by two + >>> c.most_common() # 'b' is still in, but its count is zero + [('a', 3), ('c', 1), ('b', 0)] + + 'b'Create a new, empty Counter object. And if given, count elements + from an input iterable. Or, initialize the count from another mapping + of elements to their counts. + + >>> c = Counter() # a new, empty counter + >>> c = Counter('gallahad') # a new counter from an iterable + >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping + >>> c = Counter(a=4, b=2) # a new counter from keyword args + + 'u'Create a new, empty Counter object. And if given, count elements + from an input iterable. Or, initialize the count from another mapping + of elements to their counts. + + >>> c = Counter() # a new, empty counter + >>> c = Counter('gallahad') # a new counter from an iterable + >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping + >>> c = Counter(a=4, b=2) # a new counter from keyword args + + 'b'The count of elements not in the Counter is zero.'u'The count of elements not in the Counter is zero.'b'List the n most common elements and their counts from the most + common to the least. If n is None, then list all element counts. + + >>> Counter('abracadabra').most_common(3) + [('a', 5), ('b', 2), ('r', 2)] + + 'u'List the n most common elements and their counts from the most + common to the least. If n is None, then list all element counts. + + >>> Counter('abracadabra').most_common(3) + [('a', 5), ('b', 2), ('r', 2)] + + 'b'Iterator over elements repeating each as many times as its count. + + >>> c = Counter('ABCABC') + >>> sorted(c.elements()) + ['A', 'A', 'B', 'B', 'C', 'C'] + + # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 + >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) + >>> product = 1 + >>> for factor in prime_factors.elements(): # loop over factors + ... product *= factor # and multiply them + >>> product + 1836 + + Note, if an element's count has been set to zero or is a negative + number, elements() will ignore it. + + 'u'Iterator over elements repeating each as many times as its count. + + >>> c = Counter('ABCABC') + >>> sorted(c.elements()) + ['A', 'A', 'B', 'B', 'C', 'C'] + + # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 + >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) + >>> product = 1 + >>> for factor in prime_factors.elements(): # loop over factors + ... product *= factor # and multiply them + >>> product + 1836 + + Note, if an element's count has been set to zero or is a negative + number, elements() will ignore it. + + 'b'Counter.fromkeys() is undefined. Use Counter(iterable) instead.'u'Counter.fromkeys() is undefined. Use Counter(iterable) instead.'b'Like dict.update() but add counts instead of replacing them. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.update('witch') # add elements from another iterable + >>> d = Counter('watch') + >>> c.update(d) # add elements from another counter + >>> c['h'] # four 'h' in which, witch, and watch + 4 + + 'u'Like dict.update() but add counts instead of replacing them. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.update('witch') # add elements from another iterable + >>> d = Counter('watch') + >>> c.update(d) # add elements from another counter + >>> c['h'] # four 'h' in which, witch, and watch + 4 + + 'b'Like dict.update() but subtracts counts instead of replacing them. + Counts can be reduced below zero. Both the inputs and outputs are + allowed to contain zero and negative counts. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.subtract('witch') # subtract elements from another iterable + >>> c.subtract(Counter('watch')) # subtract elements from another counter + >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch + 0 + >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch + -1 + + 'u'Like dict.update() but subtracts counts instead of replacing them. + Counts can be reduced below zero. Both the inputs and outputs are + allowed to contain zero and negative counts. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.subtract('witch') # subtract elements from another iterable + >>> c.subtract(Counter('watch')) # subtract elements from another counter + >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch + 0 + >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch + -1 + + 'b'Return a shallow copy.'u'Return a shallow copy.'b'Like dict.__delitem__() but does not raise KeyError for missing values.'u'Like dict.__delitem__() but does not raise KeyError for missing values.'b'%r: %r'u'%r: %r'b'%s({%s})'u'%s({%s})'b'{0}({1!r})'u'{0}({1!r})'b'Add counts from two counters. + + >>> Counter('abbb') + Counter('bcc') + Counter({'b': 4, 'c': 2, 'a': 1}) + + 'u'Add counts from two counters. + + >>> Counter('abbb') + Counter('bcc') + Counter({'b': 4, 'c': 2, 'a': 1}) + + 'b' Subtract count, but keep only results with positive counts. + + >>> Counter('abbbc') - Counter('bccd') + Counter({'b': 2, 'a': 1}) + + 'u' Subtract count, but keep only results with positive counts. + + >>> Counter('abbbc') - Counter('bccd') + Counter({'b': 2, 'a': 1}) + + 'b'Union is the maximum of value in either of the input counters. + + >>> Counter('abbb') | Counter('bcc') + Counter({'b': 3, 'c': 2, 'a': 1}) + + 'u'Union is the maximum of value in either of the input counters. + + >>> Counter('abbb') | Counter('bcc') + Counter({'b': 3, 'c': 2, 'a': 1}) + + 'b' Intersection is the minimum of corresponding counts. + + >>> Counter('abbb') & Counter('bcc') + Counter({'b': 1}) + + 'u' Intersection is the minimum of corresponding counts. + + >>> Counter('abbb') & Counter('bcc') + Counter({'b': 1}) + + 'b'Adds an empty counter, effectively stripping negative and zero counts'u'Adds an empty counter, effectively stripping negative and zero counts'b'Subtracts from an empty counter. Strips positive and zero counts, + and flips the sign on negative counts. + + 'u'Subtracts from an empty counter. Strips positive and zero counts, + and flips the sign on negative counts. + + 'b'Internal method to strip elements with a negative or zero count'u'Internal method to strip elements with a negative or zero count'b'Inplace add from another counter, keeping only positive counts. + + >>> c = Counter('abbb') + >>> c += Counter('bcc') + >>> c + Counter({'b': 4, 'c': 2, 'a': 1}) + + 'u'Inplace add from another counter, keeping only positive counts. + + >>> c = Counter('abbb') + >>> c += Counter('bcc') + >>> c + Counter({'b': 4, 'c': 2, 'a': 1}) + + 'b'Inplace subtract counter, but keep only results with positive counts. + + >>> c = Counter('abbbc') + >>> c -= Counter('bccd') + >>> c + Counter({'b': 2, 'a': 1}) + + 'u'Inplace subtract counter, but keep only results with positive counts. + + >>> c = Counter('abbbc') + >>> c -= Counter('bccd') + >>> c + Counter({'b': 2, 'a': 1}) + + 'b'Inplace union is the maximum of value from either counter. + + >>> c = Counter('abbb') + >>> c |= Counter('bcc') + >>> c + Counter({'b': 3, 'c': 2, 'a': 1}) + + 'u'Inplace union is the maximum of value from either counter. + + >>> c = Counter('abbb') + >>> c |= Counter('bcc') + >>> c + Counter({'b': 3, 'c': 2, 'a': 1}) + + 'b'Inplace intersection is the minimum of corresponding counts. + + >>> c = Counter('abbb') + >>> c &= Counter('bcc') + >>> c + Counter({'b': 1}) + + 'u'Inplace intersection is the minimum of corresponding counts. + + >>> c = Counter('abbb') + >>> c &= Counter('bcc') + >>> c + Counter({'b': 1}) + + 'b' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + be accessed or updated using the *maps* attribute. There is no other + state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + 'u' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + be accessed or updated using the *maps* attribute. There is no other + state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + 'b'Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + 'u'Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + 'b'Create a ChainMap with a single dict created from the iterable.'u'Create a ChainMap with a single dict created from the iterable.'b'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'u'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'b'New ChainMap with a new map followed by all previous maps. + If no map is provided, an empty dict is used. + 'u'New ChainMap with a new map followed by all previous maps. + If no map is provided, an empty dict is used. + 'b'New ChainMap from maps[1:].'u'New ChainMap from maps[1:].'b'Key not found in the first mapping: {!r}'u'Key not found in the first mapping: {!r}'b'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'u'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'b'No keys found in the first mapping.'u'No keys found in the first mapping.'b'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'u'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'b'Clear maps[0], leaving maps[1:] intact.'u'Clear maps[0], leaving maps[1:] intact.'b'descriptor '__init__' of 'UserDict' object needs an argument'u'descriptor '__init__' of 'UserDict' object needs an argument'b'expected at most 1 arguments, got %d'u'expected at most 1 arguments, got %d'b'dict'u'dict'b'Passing 'dict' as keyword argument is deprecated'u'Passing 'dict' as keyword argument is deprecated'b'($self, dict=None, /, **kwargs)'u'($self, dict=None, /, **kwargs)'b'__missing__'u'__missing__'b'A more or less complete user-defined wrapper around list objects.'u'A more or less complete user-defined wrapper around list objects.'b'strict'u'strict'u'collections.__init__'u'collections'The asyncio package, tracking PEP 3156.base_eventscoroutinesexceptionsfutureslocksprotocolsrunnersqueuesstreamssubprocesstaskstransports_all_tasks_compatwindows_eventsunix_events# flake8: noqa# This relies on each of the submodules having an __all__ variable.# Exposed for _asynciomodule.c to implement now deprecated# Task.all_tasks() method. This function will be removed in 3.9.# NoQAb'The asyncio package, tracking PEP 3156.'u'The asyncio package, tracking PEP 3156.'u'asyncio.__init__'u'asyncio' +Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's +Smalltalk testing framework (used with permission). + +This module contains the core framework classes that form the basis of +specific test cases and suites (TestCase, TestSuite etc.), and also a +text-based utility class for running the tests and reporting the results + (TextTestRunner). + +Simple usage: + + import unittest + + class IntegerArithmeticTestCase(unittest.TestCase): + def testAdd(self): # test method names begin with 'test' + self.assertEqual((1 + 2), 3) + self.assertEqual(0 + 1, 1) + def testMultiply(self): + self.assertEqual((0 * 10), 0) + self.assertEqual((5 * 8), 40) + + if __name__ == '__main__': + unittest.main() + +Further information is available in the bundled documentation, and from + + http://docs.python.org/library/unittest.html + +Copyright (c) 1999-2003 Steve Purcell +Copyright (c) 2003-2010 Python Software Foundation +This module is free software, and you may redistribute it and/or modify +it under the same terms as Python itself, so long as this copyright message +and disclaimer are retained in their original form. + +IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF +THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, +AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, +SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +TestResultTestCaseIsolatedAsyncioTestCaseTestSuiteTextTestRunnerTestLoaderFunctionTestCasemaindefaultTestLoaderSkipTestskipskipIfexpectedFailureTextTestResultinstallHandlerregisterResultremoveResultaddModuleCleanupgetTestCaseNamesmakeSuitefindTestCases__unittestasync_casecasesuiteBaseTestSuiteTestProgramrunnersignals_TextTestResulttestsos.paththis_dirdiscoverstart_dir# Expose obsolete functions for backwards compatibility# deprecated# There are no tests here, so don't try to run anything discovered from# introspecting the symbols (e.g. FunctionTestCase). Instead, all our# tests come from within unittest.test.# top level directory cached on loader instanceb' +Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's +Smalltalk testing framework (used with permission). + +This module contains the core framework classes that form the basis of +specific test cases and suites (TestCase, TestSuite etc.), and also a +text-based utility class for running the tests and reporting the results + (TextTestRunner). + +Simple usage: + + import unittest + + class IntegerArithmeticTestCase(unittest.TestCase): + def testAdd(self): # test method names begin with 'test' + self.assertEqual((1 + 2), 3) + self.assertEqual(0 + 1, 1) + def testMultiply(self): + self.assertEqual((0 * 10), 0) + self.assertEqual((5 * 8), 40) + + if __name__ == '__main__': + unittest.main() + +Further information is available in the bundled documentation, and from + + http://docs.python.org/library/unittest.html + +Copyright (c) 1999-2003 Steve Purcell +Copyright (c) 2003-2010 Python Software Foundation +This module is free software, and you may redistribute it and/or modify +it under the same terms as Python itself, so long as this copyright message +and disclaimer are retained in their original form. + +IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF +THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, +AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, +SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +'u' +Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's +Smalltalk testing framework (used with permission). + +This module contains the core framework classes that form the basis of +specific test cases and suites (TestCase, TestSuite etc.), and also a +text-based utility class for running the tests and reporting the results + (TextTestRunner). + +Simple usage: + + import unittest + + class IntegerArithmeticTestCase(unittest.TestCase): + def testAdd(self): # test method names begin with 'test' + self.assertEqual((1 + 2), 3) + self.assertEqual(0 + 1, 1) + def testMultiply(self): + self.assertEqual((0 * 10), 0) + self.assertEqual((5 * 8), 40) + + if __name__ == '__main__': + unittest.main() + +Further information is available in the bundled documentation, and from + + http://docs.python.org/library/unittest.html + +Copyright (c) 1999-2003 Steve Purcell +Copyright (c) 2003-2010 Python Software Foundation +This module is free software, and you may redistribute it and/or modify +it under the same terms as Python itself, so long as this copyright message +and disclaimer are retained in their original form. + +IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF +THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, +AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, +SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +'b'TestResult'u'TestResult'b'TestCase'u'TestCase'b'IsolatedAsyncioTestCase'u'IsolatedAsyncioTestCase'b'TestSuite'u'TestSuite'b'TextTestRunner'u'TextTestRunner'b'TestLoader'u'TestLoader'b'FunctionTestCase'u'FunctionTestCase'b'main'u'main'b'defaultTestLoader'u'defaultTestLoader'b'SkipTest'u'SkipTest'b'skip'u'skip'b'skipIf'u'skipIf'b'skipUnless'u'skipUnless'b'expectedFailure'u'expectedFailure'b'TextTestResult'u'TextTestResult'b'installHandler'u'installHandler'b'registerResult'u'registerResult'b'removeResult'u'removeResult'b'removeHandler'u'removeHandler'b'addModuleCleanup'u'addModuleCleanup'b'getTestCaseNames'u'getTestCaseNames'b'makeSuite'u'makeSuite'b'findTestCases'u'findTestCases'u'unittest.__init__'u'unittest'Core XML support for Python. + +This package contains four sub-packages: + +dom -- The W3C Document Object Model. This supports DOM Level 1 + + Namespaces. + +parsers -- Python wrappers for XML parsers (currently only supports Expat). + +sax -- The Simple API for XML, developed by XML-Dev, led by David + Megginson and ported to Python by Lars Marius Garshol. This + supports the SAX 2 API. + +etree -- The ElementTree XML library. This is a subset of the full + ElementTree XML release. + +domparserssaxetreeb'Core XML support for Python. + +This package contains four sub-packages: + +dom -- The W3C Document Object Model. This supports DOM Level 1 + + Namespaces. + +parsers -- Python wrappers for XML parsers (currently only supports Expat). + +sax -- The Simple API for XML, developed by XML-Dev, led by David + Megginson and ported to Python by Lars Marius Garshol. This + supports the SAX 2 API. + +etree -- The ElementTree XML library. This is a subset of the full + ElementTree XML release. + +'u'Core XML support for Python. + +This package contains four sub-packages: + +dom -- The W3C Document Object Model. This supports DOM Level 1 + + Namespaces. + +parsers -- Python wrappers for XML parsers (currently only supports Expat). + +sax -- The Simple API for XML, developed by XML-Dev, led by David + Megginson and ported to Python by Lars Marius Garshol. This + supports the SAX 2 API. + +etree -- The ElementTree XML library. This is a subset of the full + ElementTree XML release. + +'b'dom'u'dom'b'parsers'b'sax'u'sax'b'etree'u'xml.__init__' +General functions for HTML manipulation. +_rehtml.entitieshtml5_html5escapeunescapequote + Replace special characters "&", "<" and ">" to HTML-safe sequences. + If the optional flag quote is true (the default), the quotation mark + characters, both double quote (") and single quote (') characters are also + translated. + '�0x000x0d€0x801290x81‚1300x82ƒ1310x83„1320x84…1330x85†1340x86‡1350x87ˆ1360x88‰1370x89Š1380x8a‹1390x8bŒ1400x8c1410x8dŽ1420x8e1430x8f1440x90‘1450x91’1460x92“1470x93”1480x94•1490x95–1500x96—1510x97˜1520x98™1530x99š1540x9a›1550x9bœ1560x9c1570x9dž1580x9eŸ1590x9f_invalid_charrefs0x10x20x30x40x50x60x70x80xe0xf0x100x110x120x130x140x150x160x170x180x190x1a0x1b0x1c0x1d0x1e0x1f1270x7f649760xfdd0649770xfdd1649780xfdd2649790xfdd3649800xfdd4649810xfdd5649820xfdd6649830xfdd7649840xfdd8649850xfdd9649860xfdda649870xfddb649880xfddc649890xfddd649900xfdde649910xfddf649920xfde0649930xfde1649940xfde2649950xfde3649960xfde4649970xfde5649980xfde6649990xfde7650000xfde8650010xfde9650020xfdea650030xfdeb650040xfdec650050xfded650060xfdee650070xfdef0xb655340xfffe655350xffff1310700x1fffe1310710x1ffff1966060x2fffe1966070x2ffff2621420x3fffe2621430x3ffff3276780x4fffe3276790x4ffff3932140x5fffe3932150x5ffff4587500x6fffe4587510x6ffff5242860x7fffe5242870x7ffff5898220x8fffe5898230x8ffff6553580x9fffe6553590x9ffff7208940xafffe7208950xaffff7864300xbfffe7864310xbffff8519660xcfffe8519670xcffff9175020xdfffe9175030xdffff9830380xefffe9830390xeffff10485740xffffe10485750xfffff11141100x10fffe0x10ffff_invalid_codepoints_replace_charrefxX;552960xD800573430xDFFF0x10FFFF&(#[0-9]+;?|#[xX][0-9a-fA-F]+;?|[^\t\n\f <&#;]{1,32};?)r'&(#[0-9]+;?'r'|#[xX][0-9a-fA-F]+;?'r'|[^\t\n\f <&#;]{1,32};?)'_charref + Convert all named and numeric character references (e.g. >, >, + &x3e;) in the string s to the corresponding unicode characters. + This function uses the rules defined by the HTML 5 standard + for both valid and invalid character references, and the list of + HTML 5 named character references defined in html.entities.html5. + # Must be done first!# see http://www.w3.org/TR/html5/syntax.html#tokenizing-character-references# REPLACEMENT CHARACTER# CARRIAGE RETURN# EURO SIGN# # SINGLE LOW-9 QUOTATION MARK# LATIN SMALL LETTER F WITH HOOK# DOUBLE LOW-9 QUOTATION MARK# HORIZONTAL ELLIPSIS# DAGGER# DOUBLE DAGGER# MODIFIER LETTER CIRCUMFLEX ACCENT# PER MILLE SIGN# LATIN CAPITAL LETTER S WITH CARON# SINGLE LEFT-POINTING ANGLE QUOTATION MARK# LATIN CAPITAL LIGATURE OE# LATIN CAPITAL LETTER Z WITH CARON# LEFT SINGLE QUOTATION MARK# RIGHT SINGLE QUOTATION MARK# LEFT DOUBLE QUOTATION MARK# RIGHT DOUBLE QUOTATION MARK# BULLET# EN DASH# EM DASH# SMALL TILDE# TRADE MARK SIGN# LATIN SMALL LETTER S WITH CARON# SINGLE RIGHT-POINTING ANGLE QUOTATION MARK# LATIN SMALL LIGATURE OE# LATIN SMALL LETTER Z WITH CARON# LATIN CAPITAL LETTER Y WITH DIAERESIS# 0x0001 to 0x0008# 0x000E to 0x001F# 0x007F to 0x009F# 0xFDD0 to 0xFDEF# others# numeric charref# named charref# find the longest matching name (as defined by the standard)b' +General functions for HTML manipulation. +'u' +General functions for HTML manipulation. +'b'escape'u'escape'b'unescape'u'unescape'b' + Replace special characters "&", "<" and ">" to HTML-safe sequences. + If the optional flag quote is true (the default), the quotation mark + characters, both double quote (") and single quote (') characters are also + translated. + 'u' + Replace special characters "&", "<" and ">" to HTML-safe sequences. + If the optional flag quote is true (the default), the quotation mark + characters, both double quote (") and single quote (') characters are also + translated. + 'b'''u'''u'�'u'€'b''u''u'‚'u'ƒ'u'„'u'…'u'†'u'‡'u'ˆ'u'‰'u'Š'u'‹'u'Œ'b''u''u'Ž'b''u''b''u''u'‘'u'’'u'“'u'”'u'•'u'–'u'—'u'˜'u'™'u'š'u'›'u'œ'b''u''u'ž'u'Ÿ'b'#'u'#'b'xX'u'xX'b';'u';'b'&(#[0-9]+;?|#[xX][0-9a-fA-F]+;?|[^\t\n\f <&#;]{1,32};?)'u'&(#[0-9]+;?|#[xX][0-9a-fA-F]+;?|[^\t\n\f <&#;]{1,32};?)'b' + Convert all named and numeric character references (e.g. >, >, + &x3e;) in the string s to the corresponding unicode characters. + This function uses the rules defined by the HTML 5 standard + for both valid and invalid character references, and the list of + HTML 5 named character references defined in html.entities.html5. + 'u' + Convert all named and numeric character references (e.g. >, >, + &x3e;) in the string s to the corresponding unicode characters. + This function uses the rules defined by the HTML 5 standard + for both valid and invalid character references, and the list of + HTML 5 named character references defined in html.entities.html5. + 'u'html.__init__'Supporting definitions for the Python regression tests.test.supportsupport must be imported from the test packageasyncio.eventsasynciofaulthandlerfnmatchfunctoolsgcglobhashlibimportlib.utillogging.handlersloggingnntplibshutilsocketstatstructsysconfigtempfile_threadurllib.errorurllibtestresultget_test_runnermultiprocessing.processzlibgzipbz2lzmaresource_hashlibPIPE_MAX_SIZEmax_memuseuse_resourcesfailfastErrorTestFailedTestDidNotRunResourceDeniedimport_fresh_moduleCleanImportunloadrecord_original_stdoutget_original_stdoutcaptured_stdoutcaptured_stdincaptured_stderrTESTFNSAVEDCWDunlinkrmtreetemp_cwdfindfilecreate_empty_filecan_symlinkfs_is_case_insensitiveis_resource_enabledrequiresrequires_freebsd_versionrequires_linux_versionrequires_mac_verrequires_hashdigestcheck_syntax_errorcheck_syntax_warningTransientResourcetime_outsocket_peer_resetioerror_peer_resettransient_internetBasicTestRunnerrun_unittestrun_doctestskip_unless_symlinkrequires_gziprequires_bz2requires_lzmabigmemtestbigaddrspacetestcpython_onlyget_attributerequires_IEEE_754skip_unless_xattrrequires_zlibanticipate_failuredetect_api_mismatchcheck__all__skip_unless_bind_unix_socketskip_if_buggy_ucrt_strfptimeignore_warningsis_jythonis_androidcheck_impl_detailunix_shellHOSTIPV6_ENABLEDfind_unused_portbind_portopen_urlresourcebind_unix_sockettemp_umaskreap_childrenTestHandlerthreading_setupthreading_cleanupreap_threadsstart_threadscheck_warningscheck_no_resource_warningcheck_no_warningsEnvironmentVarGuardrun_with_localeswap_itemswap_attrMatcherset_memlimitSuppressCrashReportsortdictrun_with_tzPGOmissing_compiler_executablefd_countALWAYS_EQLARGESTSMALLESTBase class for regression test exceptions.Test failed.Test did not run any subtests.Test skipped because it requested a disallowed resource. + + This is raised when a test calls requires() for a resource that + has not be enabled. It is used to distinguish between expected + and unexpected skips. + _ignore_deprecated_importsignoreContext manager to suppress package and module deprecation + warnings when importing them. + + If ignore is False, this context manager has no effect. + catch_warningsfilterwarnings.+ (module|package)Decorator to suppress deprecation warnings. + + Use of context managers to hide warnings make diffs + more noisy and tools like 'git blame' less useful. + decoratorwrapswrappersimplefilterdeprecatedrequired_onImport and return the module to be tested, raising SkipTest if + it is not available. + + If deprecated is True, any module or package deprecation messages + will be suppressed. If a module is required on a platform but optional for + others, set required_on to an iterable of platform prefixes which will be + compared against sys.platform. + _save_and_remove_moduleorig_modulesHelper function to save and remove a module from sys.modules + + Raise ImportError if the module can't be imported. + _save_and_block_moduleHelper function to save and block a module in sys.modules + + Return True if the module was in sys.modules, False otherwise. + savedconditionDecorator to mark a test that is known to be broken in some cases + + Any use of this decorator should have a comment identifying the + associated tracker issue. + pkg_dirstandard_testsGeneric load_tests implementation for simple test packages. + + Most packages can implement load_tests using this function as follows: + + def load_tests(*args): + return load_package_tests(os.path.dirname(__file__), *args) + test*top_dirtop_level_dirpackage_testsaddTestsfreshblockedImport and return a module, deliberately bypassing sys.modules. + + This function imports and returns a fresh copy of the named Python module + by removing the named module from sys.modules before doing the import. + Note that unlike reload, the original module is not affected by + this operation. + + *fresh* is an iterable of additional module names that are also removed + from the sys.modules cache before doing the import. + + *blocked* is an iterable of module names that are replaced with None + in the module cache during the import to ensure that attempts to import + them raise ImportError. + + The named module and any modules named in the *fresh* and *blocked* + parameters are saved before starting the import and then reinserted into + sys.modules when the fresh import is complete. + + Module and package deprecation messages are suppressed during this import + if *deprecated* is True. + + This function will raise ImportError if the named module cannot be + imported. + names_to_removefresh_nameblocked_namefresh_moduleorig_namename_to_removeGet an attribute, raising SkipTest if AttributeError is raised.attributeobject %r has no attribute %rreal_max_memusejunit_xml_list_original_stdout_force_run%s: %sre-run %s%rchmodS_IRWXUwin_waitforwaitall0.001timeoutlistdirLsleeptests may fail, delete still pending for _unlink_rmdirrmdir_rmtree_rmtree_innerfullnamelstatst_modesupport.rmtree(): os.lstat(%r) failed with %sS_ISDIR_longpathcreate_unicode_bufferwindllkernel32GetLongPathNameWlengthmake_legacy_pycMove a PEP 3147/488 pyc file to its legacy pyc location. + + :param source: The file system path to the source file. The source file + does not need to exist, however the PEP 3147/488 pyc file must exist. + :return: The file system path to the legacy pyc file. + utilcache_from_sourcepyc_fileup_onelegacy_pyc'Forget' a module was ever imported. + + This removes the module from sys.modules and deletes any PEP 3147/488 or + legacy .pyc files. + optimization_is_gui_availablectypes.wintypesUOI_FLAGS0x0001WSF_VISIBLEUSEROBJECTFLAGSStructurefInheritwintypesBOOLfReserveddwFlagsDWORD_fields_user32dllGetProcessWindowStationWinErroruofneededGetUserObjectInformationWbyrefgui not available (WSF_VISIBLE flag not set)cdllc_intpointerctypes.utilfind_libraryLoadLibraryApplicationServicesapp_servicesCGMainDisplayIDgui tests cannot run without OS X window managerProcessSerialNumberhighLongOfPSNlowLongOfPSNpsnpsn_pGetCurrentProcessSetFrontProcesscannot run without OS X gui processtkintererr_string [...]Tk unavailable due to {}: {}Test whether a resource is enabled. + + Known resources are set by regrtest.py. If not running under regrtest.py, + all resources are assumed enabled unless use_resources has been set. + Raise ResourceDenied if the specified resource is not available.Use of the %r resource not enabledgui_requires_unix_versionsysnamemin_versionDecorator raising SkipTest if the OS is `sysname` and the version is less + than `min_version`. + + For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if + the FreeBSD version is less than 7.2. + min_version_txtversion_txt version or higher required, not " or higher required, not """Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD version is + less than `min_version`. + + For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD + version is less than 7.2. + FreeBSDDecorator raising SkipTest if the OS is Linux and the Linux version is + less than `min_version`. + + For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux + version is less than 2.6.32. + LinuxDecorator raising SkipTest if the OS is Mac OS X and the OS X + version if less than min_version. + + For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version + is lesser than 10.5. + mac_verMac OS X %s or higher required, not %sdigestnameopensslDecorator raising SkipTest if a hashing algorithm is not available + + The hashing algorithm could be missing or blocked by a strict crypto + policy. + + If 'openssl' is True, then the decorator checks that OpenSSL provides + the algorithm. Otherwise the check falls back to built-in + implementations. + + ValueError: [digital envelope routines: EVP_DigestInit_ex] disabled for FIPS + ValueError: unsupported hash type md4 + hash digest '' is not available.localhost127.0.0.1HOSTv4::1HOSTv6AF_INETSOCK_STREAMfamilysocktypeReturns an unused port that should be suitable for binding. This is + achieved by creating a temporary socket with the same family and type as + the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to + the specified host address (defaults to 0.0.0.0) with the port set to 0, + eliciting an unused ephemeral port from the OS. The temporary socket is + then closed and deleted, and the ephemeral port is returned. + + Either this method or bind_port() should be used for any tests where a + server socket needs to be bound to a particular port for the duration of + the test. Which one to use depends on whether the calling code is creating + a python socket, or if an unused port needs to be provided in a constructor + or passed to an external program (i.e. the -accept argument to openssl's + s_server mode). Always prefer bind_port() over find_unused_port() where + possible. Hard coded ports should *NEVER* be used. As soon as a server + socket is bound to a hard coded port, the ability to run multiple instances + of the test simultaneously on the same host is compromised, which makes the + test a ticking time bomb in a buildbot environment. On Unix buildbots, this + may simply manifest as a failed test, which can be recovered from without + intervention in most cases, but on Windows, the entire python process can + completely and utterly wedge, requiring someone to log in to the buildbot + and manually kill the affected process. + + (This is easy to reproduce on Windows, unfortunately, and can be traced to + the SO_REUSEADDR socket option having different semantics on Windows versus + Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind, + listen and then accept connections on identical host/ports. An EADDRINUSE + OSError will be raised at some point (depending on the platform and + the order bind and listen were called on each socket). + + However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE + will ever be raised when attempting to bind two identical host/ports. When + accept() is called on each socket, the second caller's process will steal + the port from the first caller, leaving them both in an awkwardly wedged + state where they'll no longer respond to any signals or graceful kills, and + must be forcibly killed via OpenProcess()/TerminateProcess(). + + The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option + instead of SO_REUSEADDR, which effectively affords the same semantics as + SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open + Source world compared to Windows ones, this is a common mistake. A quick + look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when + openssl.exe is called with the 's_server' option, for example. See + http://bugs.python.org/issue2550 for more info. The following site also + has a very thorough description about the implications of both REUSEADDR + and EXCLUSIVEADDRUSE on Windows: + http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx) + + XXX: although this approach is a vast improvement on previous attempts to + elicit unused ports, it rests heavily on the assumption that the ephemeral + port returned to us by the OS won't immediately be dished back out to some + other process when we close and delete our temporary socket but before our + calling code has a chance to bind the returned port. We can deal with this + issue if/when we come across it. + tempsockportsockhostBind the socket to a free port and return the port number. Relies on + ephemeral ports in order to ensure we are using an unbound port. This is + important as many tests may be running simultaneously, especially in a + buildbot environment. This method raises an exception if the sock.family + is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR + or SO_REUSEPORT set on it. Tests should *never* set these socket options + for TCP/IP sockets. The only case for setting these options is testing + multicasting via multiple UDP sockets. + + Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e. + on Windows), it will be set on the socket. This will prevent anyone else + from bind()'ing to our host/port for the duration of the test. + SO_REUSEADDRgetsockoptSOL_SOCKETtests should never set the SO_REUSEADDR socket option on TCP/IP sockets!"tests should never set the SO_REUSEADDR ""socket option on TCP/IP sockets!"SO_REUSEPORTtests should never set the SO_REUSEPORT socket option on TCP/IP sockets!"tests should never set the SO_REUSEPORT "SO_EXCLUSIVEADDRUSEsetsockoptgetsocknameaddrBind a unix socket, raising SkipTest if PermissionError is raised.AF_UNIXcannot bind AF_UNIX sockets_is_ipv6_enabledCheck whether IPv6 is enabled on this host.has_ipv6AF_INET6system_must_validate_certSkip the test on TLS certificate validation failures.decCERTIFICATE_VERIFY_FAILEDsystem does not contain necessary certificates"system does not contain ""necessary certificates"SOCK_MAX_SIZEdoubleIEEEtest requires IEEE 754 doublesrequires zlibrequires gziprequires bz2requires lzmajavagetandroidapilevel/system/bin/sh/bin/sh$test@test{}_{}_tmphttp://www.pythontest.netTEST_HTTP_URLFS_NONASCIIæİŁφКא،تก fsdecodefsencode-àòɘŁğTESTFN_UNICODEunicodedatanormalizeNFDTESTFN_ENCODINGTESTFN_UNENCODABLEntgetwindowsversion-共Ł♡ͣ�WARNING: The filename %r CAN be encoded by the filesystem encoding (%s). Unicode filename tests may not be effective'WARNING: The filename %r CAN be encoded by the filesystem encoding (%s). ''Unicode filename tests may not be effective'ÿb'-ÿsurrogateescapeTESTFN_UNDECODABLEçwð®Õí²€b'\xae\xd5'b'\xed\xb2\x80'í´€˜TESTFN_NONASCIIgetcwdPGO_EXTENDEDtemp_dirReturn a context manager that creates a temporary directory. + + Arguments: + + path: the directory to create temporarily. If omitted or None, + defaults to creating a temporary directory using tempfile.mkdtemp. + + quiet: if False (the default), the context manager raises an exception + on error. Otherwise, if the path is specified and cannot be + created, only a warning is issued. + + dir_createdmkdtemprealpathmkdirtests may fail, unable to create temporary directory 'tests may fail, unable to create ''temporary directory ': pidchange_cwdReturn a context manager that changes the current working directory. + + Arguments: + + path: the directory to use as the temporary current working directory. + + quiet: if False (the default), the context manager raises an exception + on error. Otherwise, it issues only a warning and keeps the current + working directory the same. + + saved_dirchdirtests may fail, unable to change the current working directory to 'tests may fail, unable to change the current working ''directory to 'tempcwd + Context manager that temporarily creates and changes the CWD. + + The function temporarily changes the current working directory + after creating a temporary directory in the current directory with + name *name*. If *name* is None, the temporary directory is + created using tempfile.mkdtemp. + + If *quiet* is False (default) and it is not possible to + create or change the CWD, an error is raised. If *quiet* is True, + only a warning is raised and the original CWD is used. + + temp_pathcwd_dirumaskContext manager that temporarily sets the process umask.oldmaskTEST_SUPPORT_DIRTEST_HOME_DIRTEST_DATA_DIRsubdirTry to find a file on sys.path or in the test directory. If it is not + found the argument passed to the function is returned (this does not + necessarily signal failure; could still be the legitimate path). + + Setting *subdir* indicates a relative path to use to find the file + rather than looking directly in the path directories. + isabsdnCreate an empty file. If the file already exists, truncate it.O_WRONLYO_CREATO_TRUNCfdLike repr(dict), but in sorted order.pairreprpairswithcommasmake_bad_fd + Create an invalid file descriptor by opening and closing a file and return + its fd. + wbtestcasestatementerrtextassertRaisesRegexcmassertIsNotNoneassertEqualwarnsalwaysassertTrueassertRegexurlurllib.requesturllib.parsecheckurlparsecheck_valid_fileurlfetch fetching %s ...requestbuild_openeropeneraddheadersAccept-EncodingheadersContent-EncodingGzipFilefileobjinvalid resource %rWarningsRecorderConvenience wrapper for the warnings list returned on + entry to the warnings.catch_warnings() context manager. + warnings_list_warningsWarningMessage_WARNING_DETAILS%r has no attribute %r_filterwarningsCatch the warnings, then check if all the expected + warnings have been raised and re-raise unexpected warnings. + If 'quiet' is True, only re-raise the unexpected warnings. + __warningregistry__registryreraisemissingcatunhandled warning %sfilter (%r, %s) did not catch any warningContext manager to silence warnings. + + Accept 2-tuples as positional arguments: + ("message regexp", WarningCategory) + + Optional argument: + - if 'quiet' is True, it does not fail if a filter catches nothing + (default True without argument, + default False if some filters are defined) + + Without argument, it defaults to: + check_warnings(("", Warning), quiet=True) + force_gcContext manager to check that no warnings are emitted. + + This context manager enables a given warning within its scope + and checks that no warnings are emitted even with that warning + enabled. + + If force_gc is True, a garbage collection is attempted before checking + for warnings. This may help to catch warnings emitted when objects + are deleted, such as ResourceWarning. + + Other keyword arguments are passed to warnings.filterwarnings(). + gc_collectContext manager to check that no ResourceWarning is emitted. + + Usage: + + with check_no_resource_warning(self): + f = open(...) + ... + del f + + You must remove the object which may emit ResourceWarning before + the end of the context manager. + Context manager to force import to return a new module reference. + + This is useful for testing module-level behaviours, such as + the emission of a DeprecationWarning on import. + + Use like this: + + with CleanImport("foo"): + importlib.import_module("foo") # new reference + module_namesoriginal_modulesmodule_nameignore_excClass to help protect the environment variable properly. Can be used as + a context manager._environ_changedenvvarDirsOnSysPathContext manager to temporarily add directories to sys.path. + + This makes a copy of sys.path, appends any directories given + as positional arguments, then reverts sys.path to the copied + settings when the context ends. + + Note that *all* sys.path modifications in the body of the + context manager, including replacement of the object, + will be reverted at the end of the block. + pathsoriginal_valueoriginal_objectRaise ResourceDenied if an exception is raised while the context manager + is in effect that matches the specified exception and attributes.type_If type_ is a subclass of self.exc and value has attributes matching + self.attrs, raise ResourceDenied. Otherwise let the exception + propagate (if any).attr_valuean optional resource is not availableETIMEDOUTECONNRESETget_socket_conn_refused_errs + Get the different socket error numbers ('errno') which can be received + when a connection is refused. + ECONNREFUSEDENETUNREACHEADDRNOTAVAILEHOSTUNREACHEAFNOSUPPORT30.0resource_nameerrnosReturn a context manager that raises ResourceDenied when various issues + with the Internet connection manifest themselves as exceptions.11110411311099default_errnosEAI_AGAINEAI_FAILEAI_NONAMEEAI_NODATAWSANO_DATA11004default_gai_errnosResource %r is not availabledeniedcaptured_errnosgai_errnosfilter_errorgaierrorHTTPError599URLErrorgetdefaulttimeoutold_timeoutsetdefaulttimeoutNNTPTemporaryErrorcaptured_outputstream_nameReturn a context manager used by captured_stdout/stdin/stderr + that temporarily replaces the sys stream *stream_name* with a StringIO.orig_stdoutCapture the output of sys.stdout: + + with captured_stdout() as stdout: + print("hello") + self.assertEqual(stdout.getvalue(), "hello\n") + Capture the output of sys.stderr: + + with captured_stderr() as stderr: + print("hello", file=sys.stderr) + self.assertEqual(stderr.getvalue(), "hello\n") + Capture the input to sys.stdin: + + with captured_stdin() as stdin: + stdin.write('hello\n') + stdin.seek(0) + # call test code that consumes from sys.stdin + captured = input() + self.assertEqual(captured, "hello") + Force as many objects as possible to be collected. + + In non-CPython implementations of Python, this is needed because timely + deallocation is not guaranteed by the garbage collector. (Even in CPython + this can be the case in case of reference cycles.) This means that __del__ + methods may be called later than expected and weakrefs may remain alive for + longer than expected. This function tries its best to force all garbage + objects to disappear. + collect0.1disable_gcisenabledhave_gcenablepython_is_optimizedFind if Python was built with optimizations.get_config_varPY_CFLAGScflagsfinal_opt-O-O0-OgnP_header0n_aligngetobjects2P0P_vheadercalcobjsizecalcsizecalcvobjsize_TPFLAGS_HAVE_GC_TPFLAGS_HEAPTYPEcheck_sizeofo_testcapiSIZEOF_PYGC_HEADwrong size for %s: got %d, expected %dcatstrlocalesinnersetlocaleorig_localeloctztzsettzset requiredTZorig_tz_1M_1G_2G_4GMAX_Py_ssize_tlimitgsizes(\d+(\.\d+)?) (K|M|G|T)b?$IGNORECASEVERBOSEInvalid memory limit %rmemlimitMemory limit %r too low to be useful_MemoryWatchdogAn object which periodically watches the process' memory consumption + and prints it out. + /proc/{pid}/statmprocfilestartedr/proc not available for stats: {}memory_watchdog.pywatchdog_scriptPopenDEVNULLmem_watchdogterminatememusedry_runDecorator for bigmem tests. + + 'size' is a requested size for the test (in arbitrary, test-interpreted + units.) 'memuse' is the number of bytes per unit for the test, or a good + estimate of it. For example, a test that needs two byte buffers, of 4 GiB + each, could be decorated with @bigmemtest(size=_4G, memuse=2). + + The 'size' argument is normally passed to the decorated test method as an + extra argument. If 'dry_run' is true, the value passed to the test method + may be less than the requested value. If 'dry_run' is false, it means the + test doesn't support dummy runs when -M is not specified. + 5147not enough memory: %.1fG minimum needed ... expected peak memory use: {peak:.1f}GpeakwatchdogDecorator for tests that fill the address space.63not enough memory: try a 32-bit build insteadrun_idrequires_resourceresource {0!r} is not enabled + Decorator for tests only applicable on CPython. + impl_detailcpythonguards_parse_guardsguardnamesimplementation detail not available on {0}implementation detail specific to {0} or is_trueThis function returns True or False depending on the host platform. + Examples: + if check_impl_detail(): # only on CPython (default) + if check_impl_detail(jython=True): # only on Jython + if check_impl_detail(cpython=False): # everywhere except on CPython + python_implementationno_tracingDecorator to temporarily turn off tracing for the duration of a test.original_tracerefcount_testDecorator for tests which involve reference counting. + + To start, the decorator does not run the test if is not run by CPython. + After that, any trace function is unset during the test to prevent + unexpected refcounts caused by the trace function. + + _filter_suitepredRecursively filter test cases in a suite based on a predicate.newtests_tests_run_suiteRun tests from a unittest.TestSuite-derived class.verbositycapture_outputget_xml_elementtestsRunskippedwasSuccessfulfailuresmultiple errors occurred; run in verbose mode for details_match_test_func_accept_test_patterns_ignore_test_patternsmatch_test_is_full_match_test[?*\[\]]set_match_testsaccept_patternsignore_patternsaccept_funcignore_func_compile_match_functionmatch_functiontest_idacceptpatternsregexregex_matchmatch_test_regexRun tests from unittest.TestCase-derived classes.valid_typesaddTeststr arguments must be keys in sys.modules_check_docstringsJust used to check if docstrings are enabledWITH_DOC_STRINGSMISSING_C_DOCSTRINGSHAVE_DOCSTRINGStest requires docstringsrequires_docstringsoptionflagsRun doctest on the given module. Return (#failures, #tests). + + If optional argument verbosity is not specified (or is None), pass + support's belief about verbosity on to doctest. Else doctest's + usual behavior is used (it searches sys.argv for -v). + doctesttestmod%d of %d doctests faileddoctest (%s) ... %d tests with zero failuresprint_warningWarning -- modules_setupmodules_cleanupoldmodulesenvironment_altered_count_danglingoriginal_values_MAX_COUNTdangling_threadsthreading_cleanup() failed to cleanup "threading_cleanup() failed to cleanup " threads (count: " threads ""(count: ", dangling: ", ""dangling: "Dangling thread: 0.01Use this function when threads are being used. This will + ensure that the threads are cleaned up even when the test fails. + wait_threads_exit60.0 + bpo-31234: Context manager to wait until all threads created in the with + statement exit. + + Use _thread.count() to check if threads exited. Indirectly, wait until + threads exit the internal t_bootstrap() C function of the _thread module. + + threading_setup() and threading_cleanup() are designed to emit a warning + if a test leaves running threads in the background. This context manager + is designed to cleanup threads started by the _thread.start_new_thread() + which doesn't allow to wait for thread exit, whereas thread.Thread has a + join() method. + old_countmonotonicstart_timedeadlinedtwait_threads() failed to cleanup threads after " ""threads after " seconds (count: " seconds ", old count: 0.010join_threadJoin a thread. Raise an AssertionError if the thread is still alive + after timeout seconds. + is_alivefailed to join the thread in secondsUse this function at the end of test_main() whenever sub-processes + are started. This will help ensure that no extra children (zombies) + stick around to hog resources and create problems when looking + for refleaks. + waitpidWNOHANGreap_children() reaped child process threadsunlockCan't start %d threads, only %d threads startedendtimestarttime60Unable to join %d threads during a period of %d minutes'Unable to join %d threads during a period of ''%d minutes'dump_tracebackUnable to join %d threadsnew_valTemporary swap out an attribute with a new object. + + Usage: + with swap_attr(obj, "attr", 5): + ... + + This will set obj.attr to 5 for the duration of the with: block, + restoring the old value at the end of the block. If `attr` doesn't + exist on `obj`, it will be created and then deleted at the end of the + block. + + The old value (or None if it doesn't exist) will be assigned to the + target of the "as" clause, if there is one. + real_valTemporary swap out an item with a new object. + + Usage: + with swap_item(obj, "item", 5): + ... + + This will set obj["item"] to 5 for the duration of the with: block, + restoring the old value at the end of the block. If `item` doesn't + exist on `obj`, it will be created and then deleted at the end of the + block. + + The old value (or None if it doesn't exist) will be assigned to the + target of the "as" clause, if there is one. + strip_python_stderrStrip the stderr of a Python process from potential debug output + emitted by the interpreter. + + This will typically be run on the result of the communicate() method + of a subprocess.Popen object. + \[\d+ refs, \d+ blocks\]\r?\n?br"b"getcountstypes are immortal if COUNT_ALLOCS is definedrequires_type_collectingargs_from_interpreter_flagsReturn a list of command-line arguments reproducing the current + settings in sys.flags and sys.warnoptions._args_from_interpreter_flagsoptim_args_from_interpreter_flagsReturn a list of command-line arguments reproducing the current + optimization settings in sys.flags._optim_args_from_interpreter_flagsBufferingHandlermatchershouldFlushmatches + Look for a saved dict whose keys/values match the supplied arguments. + _partial_matches + Try to match a single dict with the supplied arguments. + + Keys whose values are strings and which are in self._partial_matches + will be checked for partial (i.e. substring) matches. You can extend + this scheme to (for example) do regular expression matching, etc. + dvmatch_value + Try to match a single stored value (dv) with a supplied value (v). + _can_symlinksymlink_pathsymlinkcanSkip decorator for tests that require functional symlinkokRequires functional symlink implementation_buggy_ucrt + Skip decorator for tests that use buggy strptime/strftime + + If the UCRT bugs are present time.localtime().tm_zone will be + an empty string, otherwise we assume the UCRT bugs are fixed + + See bpo-37552 [Windows] strptime/strftime return invalid + results with UCRT version 17763.615 + getdefaultlocalecp65001tm_zonebuggy MSVC UCRT strptime/strftimePythonSymlinkCreates a symlink for the current Python executable_linked_also_link_env_platform_specificlexistsGetModuleFileNamedllhandlesrc_dirdest_dirruntimevcruntime*.dllgetenvPYTHONHOMEis_python_buildPYTHONPATHexc_typeexc_valueexc_tbexfailed to clean up {}: {}_callpythonenvreturncodePIPEcommunicateunexpected return code: {0} (0x{0:08X})call_realcall_link_can_xattrcan_xattrsetxattrtmp_dirmkstemptmp_fptmp_nameuser.testtrusted.foo42kernel_version2.6.(\d{1,2})39Skip decorator for tests that require functional extended attributesno non-broken extended attribute supportskip_if_pgo_taskSkip decorator for tests not run in (non-extended) PGO taskNot run for (non-extended) PGO task_bind_nix_socket_errorDecorator for tests requiring a functional bind() for unix sockets.No UNIX Socketscan_bind_unix_socketRequires a functional unix bind(): %sdirectoryDetects if the file system for the specified directory is case-insensitive.NamedTemporaryFilebase_pathcase_pathsamefileref_apiother_apiReturns the set of items in ref_api not in other_api, except for a + defined list of items to be ignored in this check. + + By default this skips private attributes beginning with '_' but + includes all magic methods, i.e. those starting and ending in '__'. + missing_items__test_casename_of_moduleblacklistAssert that the __all__ variable of 'module' contains all public names. + + The module's public names (its API) are detected automatically based on + whether they match the public name convention and were defined in + 'module'. + + The 'name_of_module' argument can specify (as a string or tuple thereof) + what module(s) an API could be defined in in order to be detected as a + public API. One case for this is when 'module' imports part of its public + API from other modules, possibly a C backend (like 'csv' and its '_csv'). + + The 'extra' argument can be a set of names that wouldn't otherwise be + automatically detected as "public", like objects without a proper + '__module__' attribute. If provided, it will be added to the + automatically detected ones. + + The 'blacklist' argument can be a set of names that must not be treated + as part of the public API even though their names indicate otherwise. + + Usage: + import bar + import foo + import unittest + from test import support + + class MiscTestCase(unittest.TestCase): + def test__all__(self): + support.check__all__(self, foo) + + class OtherTestCase(unittest.TestCase): + def test__all__(self): + extra = {'BAR_CONST', 'FOO_CONST'} + blacklist = {'baz'} # Undocumented name. + # bar imports part of its API from _bar. + support.check__all__(self, bar, ('bar', '_bar'), + extra=extra, blacklist=blacklist) + + expectedassertCountEqualsuppress_msvcrt_assertsmsvcrtSetErrorModeSEM_FAILCRITICALERRORSSEM_NOALIGNMENTFAULTEXCEPTSEM_NOGPFAULTERRORBOXSEM_NOOPENFILEERRORBOXCrtSetReportModeCRT_WARNCRT_ERRORCRT_ASSERTCRTDBG_MODE_FILECrtSetReportFileCRTDBG_FILE_STDERRTry to prevent a crash report from popping up. + + On Windows, don't display the Windows Error Reporting dialog. On UNIX, + disable the creation of coredump file. + old_valueold_modesOn Windows, disable Windows Error Reporting dialogs using + SetErrorMode() and CrtSetReportMode(). + + On UNIX, try to save the previous core file size limit, then set + soft limit to 0. + report_typeold_modeold_filegetrlimitRLIMIT_COREsetrlimit/usr/bin/defaultscom.apple.CrashReporterDialogTypeprocdeveloperthis test triggers the Crash Reporter, that is intentional"this test triggers the Crash Reporter, ""that is intentional"Restore Windows ErrorMode or core file behavior to initial value.patchtest_instanceobject_to_patchnew_valueOverride 'object_to_patch'.'attr_name' with 'new_value'. + + Also, add a cleanup procedure to 'test_instance' to restore + 'object_to_patch' value for 'attr_name'. + The 'attr_name' should be a valid attribute for 'object_to_patch'. + + attr_is_localcleanupaddCleanuprun_in_subinterp + Run code in a subinterpreter. Raise unittest.SkipTest if the tracemalloc + module is enabled. + tracemallocis_tracingrun_in_subinterp() cannot be used if tracemalloc module is tracing memory allocations"run_in_subinterp() cannot be used ""if tracemalloc module is tracing ""memory allocations"check_free_after_iteratingdoneassertRaisescmd_namesCheck if the compiler components used to build the interpreter exist. + + Check for the existence of the compiler executables whose names are listed + in 'cmd_names' or all the compiler executables when 'cmd_names' is empty + and return the first missing executable or None when none is found + missing. + + distutilsccompilerspawnnew_compilercompilercustomize_compilerexecutablesthe '%s' executable is not configuredfind_executable_is_android_emulatorinterval1e-051e-5minimum_intervalcheck_outputgetpropro.kernel.qemudisable_faulthandlerall_threadsCount the number of open file descriptors. + linuxfreebsd/proc/self/fd256MAXFDsysconfSC_OPEN_MAXdupfd2EBADFSaveSignals + Save and restore signal handlers. + + This class is only able to save/restore signal handlers registered + by the Python signal module: see bpo-13285 for "external" signal + handlers. + signalvalid_signalssignameSIGKILLSIGSTOPsignumsavegetsignalrestorewith_pymallocWITH_PYMALLOCFakePathSimple implementing of the path protocol. + 'u''b'exec'u'exec'b'always'u'always'b''u''b'check'u'check'b'urlfetch'u'urlfetch'b' fetching %s ...'u' fetching %s ...'b'Accept-Encoding'u'Accept-Encoding'b'gzip'u'gzip'b'Content-Encoding'u'Content-Encoding'b'invalid resource %r'u'invalid resource %r'b'Convenience wrapper for the warnings list returned on + entry to the warnings.catch_warnings() context manager. + 'u'Convenience wrapper for the warnings list returned on + entry to the warnings.catch_warnings() context manager. + 'b'%r has no attribute %r'u'%r has no attribute %r'b'Catch the warnings, then check if all the expected + warnings have been raised and re-raise unexpected warnings. + If 'quiet' is True, only re-raise the unexpected warnings. + 'u'Catch the warnings, then check if all the expected + warnings have been raised and re-raise unexpected warnings. + If 'quiet' is True, only re-raise the unexpected warnings. + 'b'__warningregistry__'u'__warningregistry__'b'warnings'u'warnings'b'unhandled warning %s'u'unhandled warning %s'b'filter (%r, %s) did not catch any warning'u'filter (%r, %s) did not catch any warning'b'Context manager to silence warnings. + + Accept 2-tuples as positional arguments: + ("message regexp", WarningCategory) + + Optional argument: + - if 'quiet' is True, it does not fail if a filter catches nothing + (default True without argument, + default False if some filters are defined) + + Without argument, it defaults to: + check_warnings(("", Warning), quiet=True) + 'u'Context manager to silence warnings. + + Accept 2-tuples as positional arguments: + ("message regexp", WarningCategory) + + Optional argument: + - if 'quiet' is True, it does not fail if a filter catches nothing + (default True without argument, + default False if some filters are defined) + + Without argument, it defaults to: + check_warnings(("", Warning), quiet=True) + 'b'quiet'u'quiet'b'Context manager to check that no warnings are emitted. + + This context manager enables a given warning within its scope + and checks that no warnings are emitted even with that warning + enabled. + + If force_gc is True, a garbage collection is attempted before checking + for warnings. This may help to catch warnings emitted when objects + are deleted, such as ResourceWarning. + + Other keyword arguments are passed to warnings.filterwarnings(). + 'u'Context manager to check that no warnings are emitted. + + This context manager enables a given warning within its scope + and checks that no warnings are emitted even with that warning + enabled. + + If force_gc is True, a garbage collection is attempted before checking + for warnings. This may help to catch warnings emitted when objects + are deleted, such as ResourceWarning. + + Other keyword arguments are passed to warnings.filterwarnings(). + 'b'Context manager to check that no ResourceWarning is emitted. + + Usage: + + with check_no_resource_warning(self): + f = open(...) + ... + del f + + You must remove the object which may emit ResourceWarning before + the end of the context manager. + 'u'Context manager to check that no ResourceWarning is emitted. + + Usage: + + with check_no_resource_warning(self): + f = open(...) + ... + del f + + You must remove the object which may emit ResourceWarning before + the end of the context manager. + 'b'Context manager to force import to return a new module reference. + + This is useful for testing module-level behaviours, such as + the emission of a DeprecationWarning on import. + + Use like this: + + with CleanImport("foo"): + importlib.import_module("foo") # new reference + 'u'Context manager to force import to return a new module reference. + + This is useful for testing module-level behaviours, such as + the emission of a DeprecationWarning on import. + + Use like this: + + with CleanImport("foo"): + importlib.import_module("foo") # new reference + 'b'Class to help protect the environment variable properly. Can be used as + a context manager.'u'Class to help protect the environment variable properly. Can be used as + a context manager.'b'Context manager to temporarily add directories to sys.path. + + This makes a copy of sys.path, appends any directories given + as positional arguments, then reverts sys.path to the copied + settings when the context ends. + + Note that *all* sys.path modifications in the body of the + context manager, including replacement of the object, + will be reverted at the end of the block. + 'u'Context manager to temporarily add directories to sys.path. + + This makes a copy of sys.path, appends any directories given + as positional arguments, then reverts sys.path to the copied + settings when the context ends. + + Note that *all* sys.path modifications in the body of the + context manager, including replacement of the object, + will be reverted at the end of the block. + 'b'Raise ResourceDenied if an exception is raised while the context manager + is in effect that matches the specified exception and attributes.'u'Raise ResourceDenied if an exception is raised while the context manager + is in effect that matches the specified exception and attributes.'b'If type_ is a subclass of self.exc and value has attributes matching + self.attrs, raise ResourceDenied. Otherwise let the exception + propagate (if any).'u'If type_ is a subclass of self.exc and value has attributes matching + self.attrs, raise ResourceDenied. Otherwise let the exception + propagate (if any).'b'an optional resource is not available'u'an optional resource is not available'b' + Get the different socket error numbers ('errno') which can be received + when a connection is refused. + 'u' + Get the different socket error numbers ('errno') which can be received + when a connection is refused. + 'b'ENETUNREACH'u'ENETUNREACH'b'EADDRNOTAVAIL'u'EADDRNOTAVAIL'b'EHOSTUNREACH'u'EHOSTUNREACH'b'Return a context manager that raises ResourceDenied when various issues + with the Internet connection manifest themselves as exceptions.'u'Return a context manager that raises ResourceDenied when various issues + with the Internet connection manifest themselves as exceptions.'b'ECONNREFUSED'u'ECONNREFUSED'b'ECONNRESET'u'ECONNRESET'b'ETIMEDOUT'u'ETIMEDOUT'b'EAI_AGAIN'u'EAI_AGAIN'b'EAI_FAIL'u'EAI_FAIL'b'EAI_NONAME'u'EAI_NONAME'b'EAI_NODATA'u'EAI_NODATA'b'WSANO_DATA'u'WSANO_DATA'b'Resource %r is not available'u'Resource %r is not available'b'errno'b'ConnectionRefusedError'u'ConnectionRefusedError'b'EOFError'u'EOFError'b'Return a context manager used by captured_stdout/stdin/stderr + that temporarily replaces the sys stream *stream_name* with a StringIO.'u'Return a context manager used by captured_stdout/stdin/stderr + that temporarily replaces the sys stream *stream_name* with a StringIO.'b'Capture the output of sys.stdout: + + with captured_stdout() as stdout: + print("hello") + self.assertEqual(stdout.getvalue(), "hello\n") + 'u'Capture the output of sys.stdout: + + with captured_stdout() as stdout: + print("hello") + self.assertEqual(stdout.getvalue(), "hello\n") + 'b'stdout'u'stdout'b'Capture the output of sys.stderr: + + with captured_stderr() as stderr: + print("hello", file=sys.stderr) + self.assertEqual(stderr.getvalue(), "hello\n") + 'u'Capture the output of sys.stderr: + + with captured_stderr() as stderr: + print("hello", file=sys.stderr) + self.assertEqual(stderr.getvalue(), "hello\n") + 'b'stderr'u'stderr'b'Capture the input to sys.stdin: + + with captured_stdin() as stdin: + stdin.write('hello\n') + stdin.seek(0) + # call test code that consumes from sys.stdin + captured = input() + self.assertEqual(captured, "hello") + 'u'Capture the input to sys.stdin: + + with captured_stdin() as stdin: + stdin.write('hello\n') + stdin.seek(0) + # call test code that consumes from sys.stdin + captured = input() + self.assertEqual(captured, "hello") + 'b'stdin'u'stdin'b'Force as many objects as possible to be collected. + + In non-CPython implementations of Python, this is needed because timely + deallocation is not guaranteed by the garbage collector. (Even in CPython + this can be the case in case of reference cycles.) This means that __del__ + methods may be called later than expected and weakrefs may remain alive for + longer than expected. This function tries its best to force all garbage + objects to disappear. + 'u'Force as many objects as possible to be collected. + + In non-CPython implementations of Python, this is needed because timely + deallocation is not guaranteed by the garbage collector. (Even in CPython + this can be the case in case of reference cycles.) This means that __del__ + methods may be called later than expected and weakrefs may remain alive for + longer than expected. This function tries its best to force all garbage + objects to disappear. + 'b'Find if Python was built with optimizations.'u'Find if Python was built with optimizations.'b'PY_CFLAGS'u'PY_CFLAGS'b'-O'u'-O'b'-O0'u'-O0'b'-Og'u'-Og'b'nP'u'nP'b'0n'u'0n'b'getobjects'u'getobjects'b'2P'u'2P'b'0P'u'0P'b'wrong size for %s: got %d, expected %d'u'wrong size for %s: got %d, expected %d'b'tzset required'u'tzset required'b'TZ'u'TZ'b'k'u'k'b'm'u'm'b'g'u'g'b't'u't'b'(\d+(\.\d+)?) (K|M|G|T)b?$'u'(\d+(\.\d+)?) (K|M|G|T)b?$'b'Invalid memory limit %r'u'Invalid memory limit %r'b'Memory limit %r too low to be useful'u'Memory limit %r too low to be useful'b'An object which periodically watches the process' memory consumption + and prints it out. + 'u'An object which periodically watches the process' memory consumption + and prints it out. + 'b'/proc/{pid}/statm'u'/proc/{pid}/statm'b'r'u'r'b'/proc not available for stats: {}'u'/proc not available for stats: {}'b'memory_watchdog.py'u'memory_watchdog.py'b'Decorator for bigmem tests. + + 'size' is a requested size for the test (in arbitrary, test-interpreted + units.) 'memuse' is the number of bytes per unit for the test, or a good + estimate of it. For example, a test that needs two byte buffers, of 4 GiB + each, could be decorated with @bigmemtest(size=_4G, memuse=2). + + The 'size' argument is normally passed to the decorated test method as an + extra argument. If 'dry_run' is true, the value passed to the test method + may be less than the requested value. If 'dry_run' is false, it means the + test doesn't support dummy runs when -M is not specified. + 'u'Decorator for bigmem tests. + + 'size' is a requested size for the test (in arbitrary, test-interpreted + units.) 'memuse' is the number of bytes per unit for the test, or a good + estimate of it. For example, a test that needs two byte buffers, of 4 GiB + each, could be decorated with @bigmemtest(size=_4G, memuse=2). + + The 'size' argument is normally passed to the decorated test method as an + extra argument. If 'dry_run' is true, the value passed to the test method + may be less than the requested value. If 'dry_run' is false, it means the + test doesn't support dummy runs when -M is not specified. + 'b'not enough memory: %.1fG minimum needed'u'not enough memory: %.1fG minimum needed'b' ... expected peak memory use: {peak:.1f}G'u' ... expected peak memory use: {peak:.1f}G'b'Decorator for tests that fill the address space.'u'Decorator for tests that fill the address space.'b'not enough memory: try a 32-bit build instead'u'not enough memory: try a 32-bit build instead'b'resource {0!r} is not enabled'u'resource {0!r} is not enabled'b' + Decorator for tests only applicable on CPython. + 'u' + Decorator for tests only applicable on CPython. + 'b'implementation detail not available on {0}'u'implementation detail not available on {0}'b'implementation detail specific to {0}'u'implementation detail specific to {0}'b' or 'u' or 'b'cpython'u'cpython'b'This function returns True or False depending on the host platform. + Examples: + if check_impl_detail(): # only on CPython (default) + if check_impl_detail(jython=True): # only on Jython + if check_impl_detail(cpython=False): # everywhere except on CPython + 'u'This function returns True or False depending on the host platform. + Examples: + if check_impl_detail(): # only on CPython (default) + if check_impl_detail(jython=True): # only on Jython + if check_impl_detail(cpython=False): # everywhere except on CPython + 'b'Decorator to temporarily turn off tracing for the duration of a test.'u'Decorator to temporarily turn off tracing for the duration of a test.'b'gettrace'u'gettrace'b'Decorator for tests which involve reference counting. + + To start, the decorator does not run the test if is not run by CPython. + After that, any trace function is unset during the test to prevent + unexpected refcounts caused by the trace function. + + 'u'Decorator for tests which involve reference counting. + + To start, the decorator does not run the test if is not run by CPython. + After that, any trace function is unset during the test to prevent + unexpected refcounts caused by the trace function. + + 'b'Recursively filter test cases in a suite based on a predicate.'u'Recursively filter test cases in a suite based on a predicate.'b'Run tests from a unittest.TestSuite-derived class.'u'Run tests from a unittest.TestSuite-derived class.'b'multiple errors occurred'u'multiple errors occurred'b'; run in verbose mode for details'u'; run in verbose mode for details'b'[?*\[\]]'u'[?*\[\]]'b'Run tests from unittest.TestCase-derived classes.'u'Run tests from unittest.TestCase-derived classes.'b'str arguments must be keys in sys.modules'u'str arguments must be keys in sys.modules'b'Just used to check if docstrings are enabled'u'Just used to check if docstrings are enabled'b'WITH_DOC_STRINGS'u'WITH_DOC_STRINGS'b'test requires docstrings'u'test requires docstrings'b'Run doctest on the given module. Return (#failures, #tests). + + If optional argument verbosity is not specified (or is None), pass + support's belief about verbosity on to doctest. Else doctest's + usual behavior is used (it searches sys.argv for -v). + 'u'Run doctest on the given module. Return (#failures, #tests). + + If optional argument verbosity is not specified (or is None), pass + support's belief about verbosity on to doctest. Else doctest's + usual behavior is used (it searches sys.argv for -v). + 'b'%d of %d doctests failed'u'%d of %d doctests failed'b'doctest (%s) ... %d tests with zero failures'u'doctest (%s) ... %d tests with zero failures'b'Warning -- 'u'Warning -- 'b'threading_cleanup() failed to cleanup 'u'threading_cleanup() failed to cleanup 'b' threads (count: 'u' threads (count: 'b', dangling: 'u', dangling: 'b'Dangling thread: 'u'Dangling thread: 'b'Use this function when threads are being used. This will + ensure that the threads are cleaned up even when the test fails. + 'u'Use this function when threads are being used. This will + ensure that the threads are cleaned up even when the test fails. + 'b' + bpo-31234: Context manager to wait until all threads created in the with + statement exit. + + Use _thread.count() to check if threads exited. Indirectly, wait until + threads exit the internal t_bootstrap() C function of the _thread module. + + threading_setup() and threading_cleanup() are designed to emit a warning + if a test leaves running threads in the background. This context manager + is designed to cleanup threads started by the _thread.start_new_thread() + which doesn't allow to wait for thread exit, whereas thread.Thread has a + join() method. + 'u' + bpo-31234: Context manager to wait until all threads created in the with + statement exit. + + Use _thread.count() to check if threads exited. Indirectly, wait until + threads exit the internal t_bootstrap() C function of the _thread module. + + threading_setup() and threading_cleanup() are designed to emit a warning + if a test leaves running threads in the background. This context manager + is designed to cleanup threads started by the _thread.start_new_thread() + which doesn't allow to wait for thread exit, whereas thread.Thread has a + join() method. + 'b'wait_threads() failed to cleanup 'u'wait_threads() failed to cleanup 'b' threads after 'u' threads after 'b' seconds (count: 'u' seconds (count: 'b', old count: 'u', old count: 'b'Join a thread. Raise an AssertionError if the thread is still alive + after timeout seconds. + 'u'Join a thread. Raise an AssertionError if the thread is still alive + after timeout seconds. + 'b'failed to join the thread in 'u'failed to join the thread in 'b' seconds'u' seconds'b'Use this function at the end of test_main() whenever sub-processes + are started. This will help ensure that no extra children (zombies) + stick around to hog resources and create problems when looking + for refleaks. + 'u'Use this function at the end of test_main() whenever sub-processes + are started. This will help ensure that no extra children (zombies) + stick around to hog resources and create problems when looking + for refleaks. + 'b'waitpid'u'waitpid'b'WNOHANG'u'WNOHANG'b'reap_children() reaped child process 'u'reap_children() reaped child process 'b'Can't start %d threads, only %d threads started'u'Can't start %d threads, only %d threads started'b'Unable to join %d threads during a period of %d minutes'u'Unable to join %d threads during a period of %d minutes'b'Unable to join %d threads'u'Unable to join %d threads'b'Temporary swap out an attribute with a new object. + + Usage: + with swap_attr(obj, "attr", 5): + ... + + This will set obj.attr to 5 for the duration of the with: block, + restoring the old value at the end of the block. If `attr` doesn't + exist on `obj`, it will be created and then deleted at the end of the + block. + + The old value (or None if it doesn't exist) will be assigned to the + target of the "as" clause, if there is one. + 'u'Temporary swap out an attribute with a new object. + + Usage: + with swap_attr(obj, "attr", 5): + ... + + This will set obj.attr to 5 for the duration of the with: block, + restoring the old value at the end of the block. If `attr` doesn't + exist on `obj`, it will be created and then deleted at the end of the + block. + + The old value (or None if it doesn't exist) will be assigned to the + target of the "as" clause, if there is one. + 'b'Temporary swap out an item with a new object. + + Usage: + with swap_item(obj, "item", 5): + ... + + This will set obj["item"] to 5 for the duration of the with: block, + restoring the old value at the end of the block. If `item` doesn't + exist on `obj`, it will be created and then deleted at the end of the + block. + + The old value (or None if it doesn't exist) will be assigned to the + target of the "as" clause, if there is one. + 'u'Temporary swap out an item with a new object. + + Usage: + with swap_item(obj, "item", 5): + ... + + This will set obj["item"] to 5 for the duration of the with: block, + restoring the old value at the end of the block. If `item` doesn't + exist on `obj`, it will be created and then deleted at the end of the + block. + + The old value (or None if it doesn't exist) will be assigned to the + target of the "as" clause, if there is one. + 'b'Strip the stderr of a Python process from potential debug output + emitted by the interpreter. + + This will typically be run on the result of the communicate() method + of a subprocess.Popen object. + 'u'Strip the stderr of a Python process from potential debug output + emitted by the interpreter. + + This will typically be run on the result of the communicate() method + of a subprocess.Popen object. + 'b'\[\d+ refs, \d+ blocks\]\r?\n?'b'getcounts'u'getcounts'b'types are immortal if COUNT_ALLOCS is defined'u'types are immortal if COUNT_ALLOCS is defined'b'Return a list of command-line arguments reproducing the current + settings in sys.flags and sys.warnoptions.'u'Return a list of command-line arguments reproducing the current + settings in sys.flags and sys.warnoptions.'b'Return a list of command-line arguments reproducing the current + optimization settings in sys.flags.'u'Return a list of command-line arguments reproducing the current + optimization settings in sys.flags.'b' + Look for a saved dict whose keys/values match the supplied arguments. + 'u' + Look for a saved dict whose keys/values match the supplied arguments. + 'b'msg'u'msg'b' + Try to match a single dict with the supplied arguments. + + Keys whose values are strings and which are in self._partial_matches + will be checked for partial (i.e. substring) matches. You can extend + this scheme to (for example) do regular expression matching, etc. + 'u' + Try to match a single dict with the supplied arguments. + + Keys whose values are strings and which are in self._partial_matches + will be checked for partial (i.e. substring) matches. You can extend + this scheme to (for example) do regular expression matching, etc. + 'b' + Try to match a single stored value (dv) with a supplied value (v). + 'u' + Try to match a single stored value (dv) with a supplied value (v). + 'b'Skip decorator for tests that require functional symlink'u'Skip decorator for tests that require functional symlink'b'Requires functional symlink implementation'u'Requires functional symlink implementation'b' + Skip decorator for tests that use buggy strptime/strftime + + If the UCRT bugs are present time.localtime().tm_zone will be + an empty string, otherwise we assume the UCRT bugs are fixed + + See bpo-37552 [Windows] strptime/strftime return invalid + results with UCRT version 17763.615 + 'u' + Skip decorator for tests that use buggy strptime/strftime + + If the UCRT bugs are present time.localtime().tm_zone will be + an empty string, otherwise we assume the UCRT bugs are fixed + + See bpo-37552 [Windows] strptime/strftime return invalid + results with UCRT version 17763.615 + 'b'cp65001'u'cp65001'b'buggy MSVC UCRT strptime/strftime'u'buggy MSVC UCRT strptime/strftime'b'Creates a symlink for the current Python executable'u'Creates a symlink for the current Python executable'b'vcruntime*.dll'u'vcruntime*.dll'b'PYTHONHOME'u'PYTHONHOME'b'PYTHONPATH'u'PYTHONPATH'b'failed to clean up {}: {}'u'failed to clean up {}: {}'b'unexpected return code: {0} (0x{0:08X})'u'unexpected return code: {0} (0x{0:08X})'b'setxattr'u'setxattr'b'user.test'b'trusted.foo'b'42'b'2.6.(\d{1,2})'u'2.6.(\d{1,2})'b'Skip decorator for tests that require functional extended attributes'u'Skip decorator for tests that require functional extended attributes'b'no non-broken extended attribute support'u'no non-broken extended attribute support'b'Skip decorator for tests not run in (non-extended) PGO task'u'Skip decorator for tests not run in (non-extended) PGO task'b'Not run for (non-extended) PGO task'u'Not run for (non-extended) PGO task'b'Decorator for tests requiring a functional bind() for unix sockets.'u'Decorator for tests requiring a functional bind() for unix sockets.'b'AF_UNIX'u'AF_UNIX'b'No UNIX Sockets'u'No UNIX Sockets'b'can_bind_unix_socket'u'can_bind_unix_socket'b'Requires a functional unix bind(): %s'u'Requires a functional unix bind(): %s'b'Detects if the file system for the specified directory is case-insensitive.'u'Detects if the file system for the specified directory is case-insensitive.'b'Returns the set of items in ref_api not in other_api, except for a + defined list of items to be ignored in this check. + + By default this skips private attributes beginning with '_' but + includes all magic methods, i.e. those starting and ending in '__'. + 'u'Returns the set of items in ref_api not in other_api, except for a + defined list of items to be ignored in this check. + + By default this skips private attributes beginning with '_' but + includes all magic methods, i.e. those starting and ending in '__'. + 'b'__'u'__'b'Assert that the __all__ variable of 'module' contains all public names. + + The module's public names (its API) are detected automatically based on + whether they match the public name convention and were defined in + 'module'. + + The 'name_of_module' argument can specify (as a string or tuple thereof) + what module(s) an API could be defined in in order to be detected as a + public API. One case for this is when 'module' imports part of its public + API from other modules, possibly a C backend (like 'csv' and its '_csv'). + + The 'extra' argument can be a set of names that wouldn't otherwise be + automatically detected as "public", like objects without a proper + '__module__' attribute. If provided, it will be added to the + automatically detected ones. + + The 'blacklist' argument can be a set of names that must not be treated + as part of the public API even though their names indicate otherwise. + + Usage: + import bar + import foo + import unittest + from test import support + + class MiscTestCase(unittest.TestCase): + def test__all__(self): + support.check__all__(self, foo) + + class OtherTestCase(unittest.TestCase): + def test__all__(self): + extra = {'BAR_CONST', 'FOO_CONST'} + blacklist = {'baz'} # Undocumented name. + # bar imports part of its API from _bar. + support.check__all__(self, bar, ('bar', '_bar'), + extra=extra, blacklist=blacklist) + + 'u'Assert that the __all__ variable of 'module' contains all public names. + + The module's public names (its API) are detected automatically based on + whether they match the public name convention and were defined in + 'module'. + + The 'name_of_module' argument can specify (as a string or tuple thereof) + what module(s) an API could be defined in in order to be detected as a + public API. One case for this is when 'module' imports part of its public + API from other modules, possibly a C backend (like 'csv' and its '_csv'). + + The 'extra' argument can be a set of names that wouldn't otherwise be + automatically detected as "public", like objects without a proper + '__module__' attribute. If provided, it will be added to the + automatically detected ones. + + The 'blacklist' argument can be a set of names that must not be treated + as part of the public API even though their names indicate otherwise. + + Usage: + import bar + import foo + import unittest + from test import support + + class MiscTestCase(unittest.TestCase): + def test__all__(self): + support.check__all__(self, foo) + + class OtherTestCase(unittest.TestCase): + def test__all__(self): + extra = {'BAR_CONST', 'FOO_CONST'} + blacklist = {'baz'} # Undocumented name. + # bar imports part of its API from _bar. + support.check__all__(self, bar, ('bar', '_bar'), + extra=extra, blacklist=blacklist) + + 'b'__module__'u'__module__'b'CrtSetReportMode'u'CrtSetReportMode'b'Try to prevent a crash report from popping up. + + On Windows, don't display the Windows Error Reporting dialog. On UNIX, + disable the creation of coredump file. + 'u'Try to prevent a crash report from popping up. + + On Windows, don't display the Windows Error Reporting dialog. On UNIX, + disable the creation of coredump file. + 'b'On Windows, disable Windows Error Reporting dialogs using + SetErrorMode() and CrtSetReportMode(). + + On UNIX, try to save the previous core file size limit, then set + soft limit to 0. + 'u'On Windows, disable Windows Error Reporting dialogs using + SetErrorMode() and CrtSetReportMode(). + + On UNIX, try to save the previous core file size limit, then set + soft limit to 0. + 'b'/usr/bin/defaults'u'/usr/bin/defaults'b'com.apple.CrashReporter'u'com.apple.CrashReporter'b'DialogType'u'DialogType'b'developer'b'this test triggers the Crash Reporter, that is intentional'u'this test triggers the Crash Reporter, that is intentional'b'Restore Windows ErrorMode or core file behavior to initial value.'u'Restore Windows ErrorMode or core file behavior to initial value.'b'Override 'object_to_patch'.'attr_name' with 'new_value'. + + Also, add a cleanup procedure to 'test_instance' to restore + 'object_to_patch' value for 'attr_name'. + The 'attr_name' should be a valid attribute for 'object_to_patch'. + + 'u'Override 'object_to_patch'.'attr_name' with 'new_value'. + + Also, add a cleanup procedure to 'test_instance' to restore + 'object_to_patch' value for 'attr_name'. + The 'attr_name' should be a valid attribute for 'object_to_patch'. + + 'b' + Run code in a subinterpreter. Raise unittest.SkipTest if the tracemalloc + module is enabled. + 'u' + Run code in a subinterpreter. Raise unittest.SkipTest if the tracemalloc + module is enabled. + 'b'run_in_subinterp() cannot be used if tracemalloc module is tracing memory allocations'u'run_in_subinterp() cannot be used if tracemalloc module is tracing memory allocations'b'Check if the compiler components used to build the interpreter exist. + + Check for the existence of the compiler executables whose names are listed + in 'cmd_names' or all the compiler executables when 'cmd_names' is empty + and return the first missing executable or None when none is found + missing. + + 'u'Check if the compiler components used to build the interpreter exist. + + Check for the existence of the compiler executables whose names are listed + in 'cmd_names' or all the compiler executables when 'cmd_names' is empty + and return the first missing executable or None when none is found + missing. + + 'b'the '%s' executable is not configured'u'the '%s' executable is not configured'b'getprop'u'getprop'b'ro.kernel.qemu'u'ro.kernel.qemu'b'Count the number of open file descriptors. + 'u'Count the number of open file descriptors. + 'b'linux'u'linux'b'freebsd'u'freebsd'b'/proc/self/fd'u'/proc/self/fd'b'sysconf'u'sysconf'b'SC_OPEN_MAX'u'SC_OPEN_MAX'b' + Save and restore signal handlers. + + This class is only able to save/restore signal handlers registered + by the Python signal module: see bpo-13285 for "external" signal + handlers. + 'u' + Save and restore signal handlers. + + This class is only able to save/restore signal handlers registered + by the Python signal module: see bpo-13285 for "external" signal + handlers. + 'b'SIGKILL'u'SIGKILL'b'SIGSTOP'u'SIGSTOP'b'Simple implementing of the path protocol. + 'u'Simple implementing of the path protocol. + 'b' character array + create_string_buffer(anInteger) -> character array + create_string_buffer(aBytes, anInteger) -> character array + ctypes.create_string_bufferc_charbuftypebufc_buffer_c_functype_cacheCFUNCTYPErestypeargtypesCFUNCTYPE(restype, *argtypes, + use_errno=False, use_last_error=False) -> function prototype. + + restype: the result type + argtypes: a sequence specifying the argument types + + The function prototype can be called in different ways to create a + callable object: + + prototype(integer address) -> foreign function + prototype(callable) -> create and return a C callable function from callable + prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method + prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal + prototype((function name, dll object)[, paramflags]) -> foreign function exported by name + use_errnouse_last_errorunexpected keyword argument(s) %sCFunctionType_argtypes__restype__flags__dlopenFUNCFLAG_STDCALL_FUNCFLAG_STDCALL_win_functype_cacheWINFUNCTYPEWinFunctionTypedlopenaddressofalignmentresizeget_errnoset_errno_SimpleCData_check_sizetyptypecode_type_actualrequiredsizeof(%s) wrong: %d instead of %dpy_objectO%s()Pc_shortc_ushortHc_longlc_ulongc_uintc_floatc_doublec_longdoubleqc_longlongc_ulonglongQc_ubyteB__ctype_le____ctype_be__c_bytec_char_pz%s(%s)c_void_pfrom_bufferc_voidpc_bool?POINTER_pointer_type_cachec_wchar_pZc_wchar_reset_cachefrom_paramcreate_unicode_buffer(aString) -> character array + create_unicode_buffer(anInteger) -> character array + create_unicode_buffer(aString, anInteger) -> character array + 0xFFFFctypes.create_unicode_bufferSetPointerTypeThis type already exists in the cacheWhat's this???set_typeARRAYCDLLAn instance of this class represents a loaded dll/shared + library, exporting functions using the standard C calling + convention (named 'cdecl' on Windows). + + The exported functions can be accessed as attributes, or by + indexing with the function name. Examples: + + .qsort -> callable object + ['qsort'] -> callable object + + Calling the functions releases the Python GIL during the call and + reacquires it afterwards. + _func_flags__func_restype__handle_FuncPtrwinmodeaixWhen the name contains ".a(" and ends with ")", + e.g., "libFOO.a(libFOO.so)" - this is taken to be an + archive(member) syntax for dlopen(), and the mode is adjusted. + Otherwise, name is presented to dlopen() as a file argument. + .a(RTLD_MEMBERRTLD_NOW_LOAD_LIBRARY_SEARCH_DEFAULT_DIRS_getfullpathname_LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR<%s '%s', handle %x at %#x>name_or_ordinalPyDLLThis class represents the Python library itself. It allows + accessing Python API functions. The GIL is not released, and + Python exceptions are handled correctly. + WinDLLThis class represents a dll exporting functions using the + Windows stdcall calling convention. + _check_HRESULTHRESULT_check_retval_OleDLLThis class represents a dll exporting functions using the + Windows stdcall calling convention, and returning HRESULT. + HRESULT error values are automatically raised as OSError + exceptions. + LibraryLoaderdlltype_dlltypepydllpython dllpythonapicygwinlibpython%d.%d.dlloledllGetLastErrorget_last_errorset_last_errordescrc_size_tc_ssize_t_memmove_addr_memset_addr_string_at_addr_cast_addrmemmovememsetPYFUNCTYPE_cast_string_atstring_atptrstring_at(addr[, size]) -> string + + Return the string at addr._wstring_at_addr_wstring_atwstring_atwstring_at(addr[, size]) -> string + + Return the string at addr.DllGetClassObjectrclsidriidppvcomtypes.server.inprocserverccom2147221231DllCanUnloadNowctypes._endianBigEndianStructureLittleEndianStructurec_int8c_uint8kindc_int16c_int32c_int64c_uint16c_uint32c_uint64# On OS X 10.3, we use RTLD_GLOBAL as default mode# because RTLD_LOCAL does not work at least on some# libraries. OS X 10.3 is Darwin 7, so we check for# that.# WINOLEAPI -> HRESULT# WINOLEAPI_(type)# STDMETHODCALLTYPE# STDMETHOD(name)# STDMETHOD_(type, name)# STDAPICALLTYPE## "deprecated, use create_string_buffer instead"## import warnings## warnings.warn("c_buffer is deprecated, use create_string_buffer instead",## DeprecationWarning, stacklevel=2)# docstring set later (very similar to CFUNCTYPE.__doc__)# Check if sizeof(ctypes_type) against struct.calcsize. This# should protect somewhat against a misconfigured libffi.# Most _type_ codes are the same as used in struct# if int and long have the same size, make c_int an alias for c_long# if long and long long have the same size, make c_longlong an alias for c_long## def from_param(cls, val):## return ('d', float(val), val)## from_param = classmethod(from_param)# backward compatibility:##c_uchar = c_ubyte# backwards compatibility (to a bug)# _SimpleCData.c_wchar_p_from_param# _SimpleCData.c_char_p_from_param# UTF-16 requires a surrogate pair (2 wchar_t) for non-BMP# characters (outside [U+0000; U+FFFF] range). +1 for trailing# NUL character.# 32-bit wchar_t (1 wchar_t per Unicode character). +1 for# trailing NUL character.# XXX Deprecated################################################################# default values for repr# XXX Hm, what about HRESULT as normal parameter?# Mustn't it derive from c_long then?# _check_retval_ is called with the function's result when it# is used as restype. It checks for the FAILED bit, and# raises an OSError if it is set.# The _check_retval_ method is implemented in C, so that the# method definition itself is not included in the traceback# when it raises an error - that is what we want (and Python# doesn't have a way to raise an exception in the caller's# frame).# functions## void *memmove(void *, const void *, size_t);## void *memset(void *, int, size_t)# COM stuff# CLASS_E_CLASSNOTAVAILABLE# S_OK# Fill in specifically-sized typesb'create and manipulate C data types in Python'u'create and manipulate C data types in Python'b'1.1.0'u'1.1.0'b'Version number mismatch'u'Version number mismatch'b'posix'b'create_string_buffer(aBytes) -> character array + create_string_buffer(anInteger) -> character array + create_string_buffer(aBytes, anInteger) -> character array + 'u'create_string_buffer(aBytes) -> character array + create_string_buffer(anInteger) -> character array + create_string_buffer(aBytes, anInteger) -> character array + 'b'ctypes.create_string_buffer'u'ctypes.create_string_buffer'b'CFUNCTYPE(restype, *argtypes, + use_errno=False, use_last_error=False) -> function prototype. + + restype: the result type + argtypes: a sequence specifying the argument types + + The function prototype can be called in different ways to create a + callable object: + + prototype(integer address) -> foreign function + prototype(callable) -> create and return a C callable function from callable + prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method + prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal + prototype((function name, dll object)[, paramflags]) -> foreign function exported by name + 'u'CFUNCTYPE(restype, *argtypes, + use_errno=False, use_last_error=False) -> function prototype. + + restype: the result type + argtypes: a sequence specifying the argument types + + The function prototype can be called in different ways to create a + callable object: + + prototype(integer address) -> foreign function + prototype(callable) -> create and return a C callable function from callable + prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method + prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal + prototype((function name, dll object)[, paramflags]) -> foreign function exported by name + 'b'use_errno'u'use_errno'b'use_last_error'u'use_last_error'b'unexpected keyword argument(s) %s'u'unexpected keyword argument(s) %s'b'CFUNCTYPE'u'CFUNCTYPE'b'WINFUNCTYPE'u'WINFUNCTYPE'b'sizeof(%s) wrong: %d instead of %d'u'sizeof(%s) wrong: %d instead of %d'b'O'u'O'b'%s()'u'%s()'b'P'u'P'b'h'u'h'b'H'u'H'b'l'u'l'b'L'u'L'b'i'u'i'b'I'u'I'b'f'u'f'b'd'u'd'b'q'u'q'b'Q'u'Q'b'B'u'B'b'b'u'b'b'z'u'z'b'%s(%s)'u'%s(%s)'b'?'u'?'b'Z'u'Z'b'u'u'u'b'create_unicode_buffer(aString) -> character array + create_unicode_buffer(anInteger) -> character array + create_unicode_buffer(aString, anInteger) -> character array + 'u'create_unicode_buffer(aString) -> character array + create_unicode_buffer(anInteger) -> character array + create_unicode_buffer(aString, anInteger) -> character array + 'b'ctypes.create_unicode_buffer'u'ctypes.create_unicode_buffer'b'This type already exists in the cache'u'This type already exists in the cache'b'What's this???'u'What's this???'b'An instance of this class represents a loaded dll/shared + library, exporting functions using the standard C calling + convention (named 'cdecl' on Windows). + + The exported functions can be accessed as attributes, or by + indexing with the function name. Examples: + + .qsort -> callable object + ['qsort'] -> callable object + + Calling the functions releases the Python GIL during the call and + reacquires it afterwards. + 'u'An instance of this class represents a loaded dll/shared + library, exporting functions using the standard C calling + convention (named 'cdecl' on Windows). + + The exported functions can be accessed as attributes, or by + indexing with the function name. Examples: + + .qsort -> callable object + ['qsort'] -> callable object + + Calling the functions releases the Python GIL during the call and + reacquires it afterwards. + 'b''u''b'aix'u'aix'b'When the name contains ".a(" and ends with ")", + e.g., "libFOO.a(libFOO.so)" - this is taken to be an + archive(member) syntax for dlopen(), and the mode is adjusted. + Otherwise, name is presented to dlopen() as a file argument. + 'u'When the name contains ".a(" and ends with ")", + e.g., "libFOO.a(libFOO.so)" - this is taken to be an + archive(member) syntax for dlopen(), and the mode is adjusted. + Otherwise, name is presented to dlopen() as a file argument. + 'b'.a('u'.a('b'<%s '%s', handle %x at %#x>'u'<%s '%s', handle %x at %#x>'b'This class represents the Python library itself. It allows + accessing Python API functions. The GIL is not released, and + Python exceptions are handled correctly. + 'u'This class represents the Python library itself. It allows + accessing Python API functions. The GIL is not released, and + Python exceptions are handled correctly. + 'b'This class represents a dll exporting functions using the + Windows stdcall calling convention. + 'u'This class represents a dll exporting functions using the + Windows stdcall calling convention. + 'b'This class represents a dll exporting functions using the + Windows stdcall calling convention, and returning HRESULT. + HRESULT error values are automatically raised as OSError + exceptions. + 'u'This class represents a dll exporting functions using the + Windows stdcall calling convention, and returning HRESULT. + HRESULT error values are automatically raised as OSError + exceptions. + 'b'python dll'u'python dll'b'cygwin'u'cygwin'b'libpython%d.%d.dll'u'libpython%d.%d.dll'b'string_at(addr[, size]) -> string + + Return the string at addr.'u'string_at(addr[, size]) -> string + + Return the string at addr.'b'wstring_at(addr[, size]) -> string + + Return the string at addr.'u'wstring_at(addr[, size]) -> string + + Return the string at addr.'b'comtypes.server.inprocserver'u'comtypes.server.inprocserver'u'ctypes.__init__'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/ctypes/__pycache__u'Module contains faster C implementation of abc.ABCMeta'_abc_init_abc_instancecheck_abc_register_abc_subclasscheck_get_dump_reset_caches_reset_registryget_cache_token_abc +Lib/ctypes.util.find_library() support for AIX +Similar approach as done for Darwin support by using separate files +but unlike Darwin - no extension such as ctypes.macholib.* + +dlopen() is an interface to AIX initAndLoad() - primary documentation at: +https://www.ibm.com/support/knowledgecenter/en/ssw_aix_61/com.ibm.aix.basetrf1/dlopen.htm +https://www.ibm.com/support/knowledgecenter/en/ssw_aix_61/com.ibm.aix.basetrf1/load.htm + +AIX supports two styles for dlopen(): svr4 (System V Release 4) which is common on posix +platforms, but also a BSD style - aka SVR3. + +From AIX 5.3 Difference Addendum (December 2004) +2.9 SVR4 linking affinity +Nowadays, there are two major object file formats used by the operating systems: +XCOFF: The COFF enhanced by IBM and others. The original COFF (Common +Object File Format) was the base of SVR3 and BSD 4.2 systems. +ELF: Executable and Linking Format that was developed by AT&T and is a +base for SVR4 UNIX. + +While the shared library content is identical on AIX - one is located as a filepath name +(svr4 style) and the other is located as a member of an archive (and the archive +is located as a filepath name). + +The key difference arises when supporting multiple abi formats (i.e., 32 and 64 bit). +For svr4 either only one ABI is supported, or there are two directories, or there +are different file names. The most common solution for multiple ABI is multiple +directories. + +For the XCOFF (aka AIX) style - one directory (one archive file) is sufficient +as multiple shared libraries can be in the archive - even sharing the same name. +In documentation the archive is also referred to as the "base" and the shared +library object is referred to as the "member". + +For dlopen() on AIX (read initAndLoad()) the calls are similar. +Default activity occurs when no path information is provided. When path +information is provided dlopen() does not search any other directories. + +For SVR4 - the shared library name is the name of the file expected: libFOO.so +For AIX - the shared library is expressed as base(member). The search is for the +base (e.g., libFOO.a) and once the base is found the shared library - identified by +member (e.g., libFOO.so, or shr.o) is located and loaded. + +The mode bit RTLD_MEMBER tells initAndLoad() that it needs to use the AIX (SVR3) +naming style. +Michael Felt AIX_ABI_last_versionlibnames_num_versionlibnamepartsnumsget_ld_headerld_header./../INDEXget_ld_header_info[0-9]get_ld_headers + Parse the header of the loader section of executable and archives + This function calls /usr/bin/dump -H as a subprocess + and returns a list of (ld_header, ld_header_info) tuples. + ldr_headers/usr/bin/dump-X-Huniversal_newlinesget_sharedld_headers + extract the shareable objects from ld_headers + character "[" is used to strip off the path information. + Note: the "[" and "]" characters that are part of dump -H output + are not removed here. + sharedget_one_matchexprlines + Must be only one match, otherwise result is None. + When there is a match, strip leading "[" and trailing "]" + \[(rf')\]get_legacymembers + This routine provides historical aka legacy naming schemes started + in AIX4 shared library support for library members names. + e.g., in /usr/lib/libc.a the member name shr.o for 32-bit binary and + shr_64.o for 64-bit binary. + shr4?_?64\.omembershr.oshr4.oget_version + Sort list of members and return highest numbered version - if it exists. + This function is called when an unversioned libFOO.a(libFOO.so) has + not been found. + + Versioning for the member name is expected to follow + GNU LIBTOOL conventions: the highest version (x, then X.y, then X.Y.z) + * find [libFoo.so.X] + * find [libFoo.so.X.Y] + * find [libFoo.so.X.Y.Z] + + Before the GNU convention became the standard scheme regardless of + binary size AIX packagers used GNU convention "as-is" for 32-bit + archive members but used an "distinguishing" name for 64-bit members. + This scheme inserted either 64 or _64 between libFOO and .so + - generally libFOO_64.so, but occasionally libFOO64.so + lib\.so\.[0-9]+[0-9.]*_?64\.so\.[0-9]+[0-9.]*exprsversionsget_member + Return an archive member matching the request in name. + Name is the library name without any prefix like lib, suffix like .so, + or version number. + Given a list of members find and return the most appropriate result + Priority is given to generic libXXX.so, then a versioned libXXX.so.a.b.c + and finally, legacy AIX naming scheme. + \.so64\.soget_libpaths + On AIX, the buildtime searchpath is stored in the executable. + as "loader header information". + The command /usr/bin/dump -H extracts this info. + Prefix searched libraries with LD_LIBRARY_PATH (preferred), + or LIBPATH if defined. These paths are appended to the paths + to libraries the python executable is linked with. + This mimics AIX dlopen() behavior. + LD_LIBRARY_PATHlibpathsLIBPATHobjectsfind_shared + paths is a list of directories to search for an archive. + name is the abbreviated name given to find_library(). + Process: search "paths" for archive, and if an archive is found + return the result of get_member(). + If an archive is not found then return None + /lib.aarchiveAIX implementation of ctypes.util.find_library() + Find an archive member that will dlopen(). If not available, + also search for a file (or link) with a .so suffix. + + AIX supports two types of schemes that can be used with dlopen(). + The so-called SystemV Release4 (svr4) format is commonly suffixed + with .so while the (default) AIX scheme has the library (archive) + ending with the suffix .a + As an archive has multiple members (e.g., 32-bit and 64-bit) in one file + the argument passed to dlopen must include both the library and + the member names in a single string. + + find_library() looks first for an archive (.a) with a suitable member. + If no archive+member pair is found, look for a .so file. + .sosonameshlib# Executable bit size - 32 or 64# Used to filter the search in an archive by size, e.g., -X64# "libxyz.so.MAJOR.MINOR" => [MAJOR, MINOR]# "nested-function, but placed at module level# as an ld_header was found, return known paths, archives and members# these lines start with a digit# blank line (separator), consume line and end for loop# get_ld_headers parsing:# 1. Find a line that starts with /, ./, or ../ - set as ld_header# 2. If "INDEX" in occurs in a following line - return ld_header# 3. get info (lines starting with [0-9])# be sure to read to the end-of-file - getting all entries# potential member lines contain "["# otherwise, no processing needed# Strip off trailing colon (:)# member names in the ld_headers output are between square brackets# additional processing to deal with AIX legacy names for 64-bit members# AIX 64-bit member is one of shr64.o, shr_64.o, or shr4_64.o# 32-bit legacy names - both shr.o and shr4.o exist.# shr.o is the preffered name so we look for shr.o first# i.e., shr4.o is returned only when shr.o does not exist# the expression ending for versions must start as# '.so.[0-9]', i.e., *.so.[at least one digit]# while multiple, more specific expressions could be specified# to search for .so.X, .so.X.Y and .so.X.Y.Z# after the first required 'dot' digit# any combination of additional 'dot' digits pairs are accepted# anything more than libFOO.so.digits.digits.digits# should be seen as a member name outside normal expectations# look first for a generic match - prepend lib and append .so# since an exact match with .so as suffix was not found# look for a versioned name# If a versioned name is not found, look for AIX legacy member name# the second (optional) argument is PATH if it includes a /# /lib is a symbolic link to /usr/lib, skip it# "lib" is prefixed to emulate compiler name resolution,# e.g., -lc to libc# To get here, a member in an archive has not been found# In other words, either:# a) a .a file was not found# b) a .a file did not have a suitable member# So, look for a .so file# Check libpaths for .so file# Note, the installation must prepare a link from a .so# to a versioned file# This is common practice by GNU libtool on other platforms# if we are here, we have not found anything plausibleb' +Lib/ctypes.util.find_library() support for AIX +Similar approach as done for Darwin support by using separate files +but unlike Darwin - no extension such as ctypes.macholib.* + +dlopen() is an interface to AIX initAndLoad() - primary documentation at: +https://www.ibm.com/support/knowledgecenter/en/ssw_aix_61/com.ibm.aix.basetrf1/dlopen.htm +https://www.ibm.com/support/knowledgecenter/en/ssw_aix_61/com.ibm.aix.basetrf1/load.htm + +AIX supports two styles for dlopen(): svr4 (System V Release 4) which is common on posix +platforms, but also a BSD style - aka SVR3. + +From AIX 5.3 Difference Addendum (December 2004) +2.9 SVR4 linking affinity +Nowadays, there are two major object file formats used by the operating systems: +XCOFF: The COFF enhanced by IBM and others. The original COFF (Common +Object File Format) was the base of SVR3 and BSD 4.2 systems. +ELF: Executable and Linking Format that was developed by AT&T and is a +base for SVR4 UNIX. + +While the shared library content is identical on AIX - one is located as a filepath name +(svr4 style) and the other is located as a member of an archive (and the archive +is located as a filepath name). + +The key difference arises when supporting multiple abi formats (i.e., 32 and 64 bit). +For svr4 either only one ABI is supported, or there are two directories, or there +are different file names. The most common solution for multiple ABI is multiple +directories. + +For the XCOFF (aka AIX) style - one directory (one archive file) is sufficient +as multiple shared libraries can be in the archive - even sharing the same name. +In documentation the archive is also referred to as the "base" and the shared +library object is referred to as the "member". + +For dlopen() on AIX (read initAndLoad()) the calls are similar. +Default activity occurs when no path information is provided. When path +information is provided dlopen() does not search any other directories. + +For SVR4 - the shared library name is the name of the file expected: libFOO.so +For AIX - the shared library is expressed as base(member). The search is for the +base (e.g., libFOO.a) and once the base is found the shared library - identified by +member (e.g., libFOO.so, or shr.o) is located and loaded. + +The mode bit RTLD_MEMBER tells initAndLoad() that it needs to use the AIX (SVR3) +naming style. +'u' +Lib/ctypes.util.find_library() support for AIX +Similar approach as done for Darwin support by using separate files +but unlike Darwin - no extension such as ctypes.macholib.* + +dlopen() is an interface to AIX initAndLoad() - primary documentation at: +https://www.ibm.com/support/knowledgecenter/en/ssw_aix_61/com.ibm.aix.basetrf1/dlopen.htm +https://www.ibm.com/support/knowledgecenter/en/ssw_aix_61/com.ibm.aix.basetrf1/load.htm + +AIX supports two styles for dlopen(): svr4 (System V Release 4) which is common on posix +platforms, but also a BSD style - aka SVR3. + +From AIX 5.3 Difference Addendum (December 2004) +2.9 SVR4 linking affinity +Nowadays, there are two major object file formats used by the operating systems: +XCOFF: The COFF enhanced by IBM and others. The original COFF (Common +Object File Format) was the base of SVR3 and BSD 4.2 systems. +ELF: Executable and Linking Format that was developed by AT&T and is a +base for SVR4 UNIX. + +While the shared library content is identical on AIX - one is located as a filepath name +(svr4 style) and the other is located as a member of an archive (and the archive +is located as a filepath name). + +The key difference arises when supporting multiple abi formats (i.e., 32 and 64 bit). +For svr4 either only one ABI is supported, or there are two directories, or there +are different file names. The most common solution for multiple ABI is multiple +directories. + +For the XCOFF (aka AIX) style - one directory (one archive file) is sufficient +as multiple shared libraries can be in the archive - even sharing the same name. +In documentation the archive is also referred to as the "base" and the shared +library object is referred to as the "member". + +For dlopen() on AIX (read initAndLoad()) the calls are similar. +Default activity occurs when no path information is provided. When path +information is provided dlopen() does not search any other directories. + +For SVR4 - the shared library name is the name of the file expected: libFOO.so +For AIX - the shared library is expressed as base(member). The search is for the +base (e.g., libFOO.a) and once the base is found the shared library - identified by +member (e.g., libFOO.so, or shr.o) is located and loaded. + +The mode bit RTLD_MEMBER tells initAndLoad() that it needs to use the AIX (SVR3) +naming style. +'b'Michael Felt 'u'Michael Felt 'b'./'u'./'b'../'u'../'b'INDEX'u'INDEX'b'[0-9]'u'[0-9]'b' + Parse the header of the loader section of executable and archives + This function calls /usr/bin/dump -H as a subprocess + and returns a list of (ld_header, ld_header_info) tuples. + 'u' + Parse the header of the loader section of executable and archives + This function calls /usr/bin/dump -H as a subprocess + and returns a list of (ld_header, ld_header_info) tuples. + 'b'/usr/bin/dump'u'/usr/bin/dump'b'-X'u'-X'b'-H'u'-H'b' + extract the shareable objects from ld_headers + character "[" is used to strip off the path information. + Note: the "[" and "]" characters that are part of dump -H output + are not removed here. + 'u' + extract the shareable objects from ld_headers + character "[" is used to strip off the path information. + Note: the "[" and "]" characters that are part of dump -H output + are not removed here. + 'b' + Must be only one match, otherwise result is None. + When there is a match, strip leading "[" and trailing "]" + 'u' + Must be only one match, otherwise result is None. + When there is a match, strip leading "[" and trailing "]" + 'b'\[('u'\[('b')\]'u')\]'b' + This routine provides historical aka legacy naming schemes started + in AIX4 shared library support for library members names. + e.g., in /usr/lib/libc.a the member name shr.o for 32-bit binary and + shr_64.o for 64-bit binary. + 'u' + This routine provides historical aka legacy naming schemes started + in AIX4 shared library support for library members names. + e.g., in /usr/lib/libc.a the member name shr.o for 32-bit binary and + shr_64.o for 64-bit binary. + 'b'shr4?_?64\.o'u'shr4?_?64\.o'b'shr.o'u'shr.o'b'shr4.o'u'shr4.o'b' + Sort list of members and return highest numbered version - if it exists. + This function is called when an unversioned libFOO.a(libFOO.so) has + not been found. + + Versioning for the member name is expected to follow + GNU LIBTOOL conventions: the highest version (x, then X.y, then X.Y.z) + * find [libFoo.so.X] + * find [libFoo.so.X.Y] + * find [libFoo.so.X.Y.Z] + + Before the GNU convention became the standard scheme regardless of + binary size AIX packagers used GNU convention "as-is" for 32-bit + archive members but used an "distinguishing" name for 64-bit members. + This scheme inserted either 64 or _64 between libFOO and .so + - generally libFOO_64.so, but occasionally libFOO64.so + 'u' + Sort list of members and return highest numbered version - if it exists. + This function is called when an unversioned libFOO.a(libFOO.so) has + not been found. + + Versioning for the member name is expected to follow + GNU LIBTOOL conventions: the highest version (x, then X.y, then X.Y.z) + * find [libFoo.so.X] + * find [libFoo.so.X.Y] + * find [libFoo.so.X.Y.Z] + + Before the GNU convention became the standard scheme regardless of + binary size AIX packagers used GNU convention "as-is" for 32-bit + archive members but used an "distinguishing" name for 64-bit members. + This scheme inserted either 64 or _64 between libFOO and .so + - generally libFOO_64.so, but occasionally libFOO64.so + 'b'lib'u'lib'b'\.so\.[0-9]+[0-9.]*'u'\.so\.[0-9]+[0-9.]*'b'_?64\.so\.[0-9]+[0-9.]*'u'_?64\.so\.[0-9]+[0-9.]*'b' + Return an archive member matching the request in name. + Name is the library name without any prefix like lib, suffix like .so, + or version number. + Given a list of members find and return the most appropriate result + Priority is given to generic libXXX.so, then a versioned libXXX.so.a.b.c + and finally, legacy AIX naming scheme. + 'u' + Return an archive member matching the request in name. + Name is the library name without any prefix like lib, suffix like .so, + or version number. + Given a list of members find and return the most appropriate result + Priority is given to generic libXXX.so, then a versioned libXXX.so.a.b.c + and finally, legacy AIX naming scheme. + 'b'\.so'u'\.so'b'64\.so'u'64\.so'b' + On AIX, the buildtime searchpath is stored in the executable. + as "loader header information". + The command /usr/bin/dump -H extracts this info. + Prefix searched libraries with LD_LIBRARY_PATH (preferred), + or LIBPATH if defined. These paths are appended to the paths + to libraries the python executable is linked with. + This mimics AIX dlopen() behavior. + 'u' + On AIX, the buildtime searchpath is stored in the executable. + as "loader header information". + The command /usr/bin/dump -H extracts this info. + Prefix searched libraries with LD_LIBRARY_PATH (preferred), + or LIBPATH if defined. These paths are appended to the paths + to libraries the python executable is linked with. + This mimics AIX dlopen() behavior. + 'b'LD_LIBRARY_PATH'u'LD_LIBRARY_PATH'b'LIBPATH'u'LIBPATH'b' + paths is a list of directories to search for an archive. + name is the abbreviated name given to find_library(). + Process: search "paths" for archive, and if an archive is found + return the result of get_member(). + If an archive is not found then return None + 'u' + paths is a list of directories to search for an archive. + name is the abbreviated name given to find_library(). + Process: search "paths" for archive, and if an archive is found + return the result of get_member(). + If an archive is not found then return None + 'b'/lib'u'/lib'b'.a'u'.a'b'AIX implementation of ctypes.util.find_library() + Find an archive member that will dlopen(). If not available, + also search for a file (or link) with a .so suffix. + + AIX supports two types of schemes that can be used with dlopen(). + The so-called SystemV Release4 (svr4) format is commonly suffixed + with .so while the (default) AIX scheme has the library (archive) + ending with the suffix .a + As an archive has multiple members (e.g., 32-bit and 64-bit) in one file + the argument passed to dlopen must include both the library and + the member names in a single string. + + find_library() looks first for an archive (.a) with a suitable member. + If no archive+member pair is found, look for a .so file. + 'u'AIX implementation of ctypes.util.find_library() + Find an archive member that will dlopen(). If not available, + also search for a file (or link) with a .so suffix. + + AIX supports two types of schemes that can be used with dlopen(). + The so-called SystemV Release4 (svr4) format is commonly suffixed + with .so while the (default) AIX scheme has the library (archive) + ending with the suffix .a + As an archive has multiple members (e.g., 32-bit and 64-bit) in one file + the argument passed to dlopen must include both the library and + the member names in a single string. + + find_library() looks first for an archive (.a) with a suitable member. + If no archive+member pair is found, look for a .so file. + 'b'.so'u'.so'u'ctypes._aix'u'_aix'u'AST.__dict__'_attributes_ast.ASTASTu'operator.__weakref__'_ast.operator_ast.AddAddu'boolop.__weakref__'_ast.boolop_ast.AndAndu'target'u'annotation'u'value'u'simple'u'stmt.__weakref__'u'lineno'u'col_offset'u'end_lineno'u'end_col_offset'_ast.stmt_ast.AnnAssignAnnAssign_ast.AssertAssertu'targets'u'type_comment'_ast.AssignAssignu'iter'u'body'u'orelse'_ast.AsyncForAsyncForu'args'u'decorator_list'u'returns'_ast.AsyncFunctionDefAsyncFunctionDefu'items'_ast.AsyncWithAsyncWithu'attr'u'ctx'u'expr.__weakref__'_ast.expr_ast.AttributeAttributeu'op'_ast.AugAssignAugAssignu'expr_context.__weakref__'_ast.expr_context_ast.AugLoadAugLoad_ast.AugStoreAugStore_ast.AwaitAwaitu'left'u'right'_ast.BinOpBinOp_ast.BitAndBitAnd_ast.BitOrBitOr_ast.BitXorBitXoru'values'_ast.BoolOpBoolOp_ast.BreakBreaku'func'u'keywords'_ast.CallCallu'bases'_ast.ClassDefClassDefu'ops'u'comparators'_ast.CompareCompareu'kind'_ast.ConstantConstant_ast.Continue_ast.DelDel_ast.DeleteDeleteu'keys'_ast.DictDict_ast.DictCompDictComp_ast.DivDivu'cmpop.__weakref__'_ast.cmpop_ast.EqEqu'excepthandler.__weakref__'_ast.excepthandler_ast.ExceptHandlerExceptHandler_ast.ExprExpru'mod.__weakref__'_ast.mod_ast.ExpressionExpressionu'dims'u'slice.__weakref__'_ast.slice_ast.ExtSliceExtSlice_ast.FloorDivFloorDiv_ast.ForForu'conversion'u'format_spec'_ast.FormattedValueFormattedValue_ast.FunctionDefFunctionDefu'argtypes'_ast.FunctionTypeu'elt'_ast.GeneratorExpGeneratorExp_ast.GlobalGlobal_ast.GtGt_ast.GtEGtE_ast.IfIf_ast.IfExpIfExp_ast.ImportImportu'module'_ast.ImportFromImportFrom_ast.InIn_ast.IndexIndex_ast.InteractiveInteractiveu'unaryop.__weakref__'_ast.unaryop_ast.InvertInvert_ast.IsIs_ast.IsNotIsNot_ast.JoinedStrJoinedStr_ast.LShiftLShift_ast.LambdaLambdau'elts'_ast.ListList_ast.ListCompListComp_ast.LoadLoad_ast.LtLt_ast.LtELtE_ast.MatMultMatMult_ast.ModModu'type_ignores'_ast.ModuleModule_ast.MultMult_ast.NameName_ast.NamedExprNamedExpr_ast.NonlocalNonlocal_ast.NotNot_ast.NotEqNotEq_ast.NotInNotIn_ast.OrOr_ast.ParamParam_ast.PassPass_ast.PowPow8192PyCF_ALLOW_TOP_LEVEL_AWAITPyCF_ONLY_AST4096PyCF_TYPE_COMMENTS_ast.RShiftRShiftu'exc'u'cause'_ast.RaiseRaise_ast.ReturnReturn_ast.SetSet_ast.SetCompSetCompu'upper'u'step'_ast.SliceSlice_ast.StarredStarred_ast.StoreStore_ast.SubSubu'slice'_ast.SubscriptSubscript_ast.SuiteSuiteu'finalbody'_ast.TryTry_ast.TupleTupleu'type_ignore.__weakref__'_ast.type_ignore_ast.TypeIgnoreTypeIgnore_ast.UAddUAdd_ast.USubUSubu'operand'_ast.UnaryOpUnaryOp_ast.WhileWhile_ast.WithWith_ast.YieldYield_ast.YieldFromYieldFromu'alias.__weakref__'u'asname'_ast.aliasu'arg.__weakref__'u'arg'_ast.argu'arguments.__weakref__'u'posonlyargs'u'vararg'u'kwonlyargs'u'kw_defaults'u'kwarg'u'defaults'_ast.argumentsargumentsboolopcmpopu'comprehension.__weakref__'u'ifs'u'is_async'_ast.comprehensioncomprehensionexcepthandlerexpr_contextu'keyword.__weakref__'_ast.keywordstmttype_ignoreunaryopu'withitem.__weakref__'u'context_expr'u'optional_vars'_ast.withitemwithitem_ast__await__u'This class is *almost* compatible with concurrent.futures.Future. + + Differences: + + - result() and exception() do not take a timeout argument and + raise an exception when the future isn't done yet. + + - Callbacks registered with add_done_callback() are always called + via the event loop's call_soon_threadsafe(). + + - This class is not compatible with the wait() and as_completed() + methods in the concurrent.futures package.'u'Future._asyncio_future_blocking'_asyncio_future_blockingu'Future._callbacks'_callbacksu'Future._exception'_exceptionu'Future._log_traceback'_log_tracebacku'Future._loop'_loop_repr_infou'Future._result'_resultu'Future._source_traceback'_source_tracebacku'Future._state'_stateadd_done_callbackcancelledget_loopremove_done_callbackset_exceptionset_result_asyncio.Futureu'A coroutine wrapped in a Future.'u'Task._asyncio_future_blocking'u'Task._callbacks'u'Task._coro'_corou'Task._exception'u'Task._fut_waiter'_fut_waiteru'Task._log_destroy_pending'_log_destroy_pendingu'Task._log_traceback'u'Task._loop'u'Task._must_cancel'_must_cancelu'Task._result'u'Task._source_traceback'u'Task._state'all_taskscurrent_taskget_coroget_stack_asyncio.TaskTasku'Accelerator module for asyncio'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_asyncio.cpython-38-darwin.so'u'Loader for extension modules. + + The constructor is designed to work with FileFinder. + + 'u'FileLoader.__dict__'u'Base file loader class which implements the loader protocol methods that + require file system usage.'u'FileLoader.__weakref__'contentsis_resourceopen_resourceresource_path_frozen_importlib_external.FileLoader_frozen_importlib_external.ExtensionFileLoaderu'_asyncio'u'WeakSet.__dict__'u'_weakrefset'u'WeakSet.__weakref__'_commit_removals_weakrefset.WeakSet_all_tasks_current_tasks_enter_task_get_running_loop_leave_task_register_task_set_running_loop_unregister_taskget_event_loopget_running_loop_asyncio_AS_COMPLETEDPENDINGRUNNINGCANCELLEDCANCELLED_AND_NOTIFIEDFINISHED_FUTURE_STATESpendingrunningfinished_STATE_TO_DESCRIPTION_MAPconcurrent.futuresLOGGERBase class for all future-related exceptions.The Future was cancelled.The operation exceeded the given deadline.The operation is not allowed in this state._WaiterProvides the event that wait() and as_completed() block on.finished_futuresadd_resultfutureadd_exceptionadd_cancelled_AsCompletedWaiterUsed by as_completed()._FirstCompletedWaiterUsed by wait(return_when=FIRST_COMPLETED)._AllCompletedWaiterUsed by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).num_pending_callsstop_on_exception_decrement_pending_calls_AcquireFuturesA context manager that does an ordered acquire of Future conditions._condition_create_and_install_waitersreturn_whenwaiterpending_countInvalid return condition: %r_waiters_yield_finished_futuresref_collect + Iterate on the list *fs*, yielding finished futures one by one in + reverse order. + Before yielding a future, *waiter* is removed from its waiters + and the future is removed from each set in the collection of sets + *ref_collect*. + + The aim of this function is to avoid keeping stale references after + the future is yielded and before the iterator resumes. + futures_setAn iterator over the given futures that yields each as it completes. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + iterate over. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator that yields the given Futures as they complete (finished or + cancelled). If any given Futures are duplicated, they will be returned + once. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + end_timetotal_futureswait_timeout%d (of %d) futures unfinishedDoneAndNotDoneFuturesdone not_doneWait for the futures in the given sequence to complete. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + wait upon. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + return_when: Indicates when this function should return. The options + are: + + FIRST_COMPLETED - Return when any future finishes or is + cancelled. + FIRST_EXCEPTION - Return when any future finishes by raising an + exception. If no future raises an exception + then it is equivalent to ALL_COMPLETED. + ALL_COMPLETED - Return when all futures finish or are cancelled. + + Returns: + A named 2-tuple of sets. The first set, named 'done', contains the + futures that completed (is finished or cancelled) before the wait + completed. The second set, named 'not_done', contains uncompleted + futures. + not_doneRepresents the result of an asynchronous computation.Initializes the future. Should not be called by clients.Condition_done_callbacks_invoke_callbacksexception calling callback for %r<%s at %#x state=%s raised %s><%s at %#x state=%s returned %s><%s at %#x state=%s>Cancel the future if possible. + + Returns True if the future was cancelled, False otherwise. A future + cannot be cancelled if it is running or has already completed. + notify_allReturn True if the future was cancelled.Return True if the future is currently executing.Return True of the future was cancelled or finished executing.__get_resultAttaches a callable that will be called when the future finishes. + + Args: + fn: A callable that will be called with this future as its only + argument when the future completes or is cancelled. The callable + will always be called by a thread in the same process in which + it was added. If the future has already completed or been + cancelled then the callable will be called immediately. These + callables are called in the order that they were added. + Return the result of the call that the future represents. + + Args: + timeout: The number of seconds to wait for the result if the future + isn't done. If None, then there is no limit on the wait time. + + Returns: + The result of the call that the future represents. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + Exception: If the call raised then that exception will be raised. + Return the exception raised by the call that the future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + set_running_or_notify_cancelMark the future as running or process any cancel notifications. + + Should only be used by Executor implementations and unit tests. + + If the future has been cancelled (cancel() was called and returned + True) then any threads waiting on the future completing (though calls + to as_completed() or wait()) are notified and False is returned. + + If the future was not cancelled then it is put in the running state + (future calls to running() will return True) and True is returned. + + This method should be called by Executor implementations before + executing the work associated with this future. If this method returns + False then the work should not be executed. + + Returns: + False if the Future was cancelled, True otherwise. + + Raises: + RuntimeError: if this method was already called or if set_result() + or set_exception() was called. + Future %s in unexpected state: %sFuture in unexpected stateSets the return value of work associated with the future. + + Should only be used by Executor implementations and unit tests. + {}: {!r}Sets the result of the future as being the given exception. + + Should only be used by Executor implementations and unit tests. + This is an abstract base class for concrete asynchronous executors.submitSubmits a callable to be executed with the given arguments. + + Schedules the callable to be executed as fn(*args, **kwargs) and returns + a Future instance representing the execution of the callable. + + Returns: + A Future representing the given call. + descriptor 'submit' of 'Executor' object needs an argument"descriptor 'submit' of 'Executor' object "Passing 'fn' as keyword argument is deprecatedsubmit expected at least 1 positional argument, got %d'submit expected at least 1 positional argument, ''got %d'($self, fn, /, *args, **kwargs)iterableschunksizeReturns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: The size of the chunks the iterable will be broken into + before being passed to a child process. This argument is only + used by ProcessPoolExecutor; it is ignored by + ThreadPoolExecutor. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + result_iteratorClean-up the resources associated with the Executor. + + It is safe to call this method several times. Otherwise, no other + methods can be called after this one. + + Args: + wait: If True then shutdown will not return until all running + futures have finished executing and the resources used by the + executor have been reclaimed. + exc_val + Raised when a executor has become non-functional after a severe failure. + # Possible future states (for internal use by the futures package).# The future was cancelled by the user...# ...and _Waiter.add_cancelled() was called by a worker.# Logger for internal use by the futures package.# Careful not to keep a reference to the popped value# reverse to keep finishing order# Remove waiter from unfinished futures# Break a reference cycle with the exception in self._exception# The following methods should only be used by Executors and in tests.# self._condition.notify_all() is not necessary because# self.cancel() triggers a notification.# Yield must be hidden in closure so that the futures are submitted# before the first iterator value is required.# Careful not to keep a reference to the popped futureb'_AS_COMPLETED'u'_AS_COMPLETED'b'PENDING'u'PENDING'b'RUNNING'u'RUNNING'b'CANCELLED'u'CANCELLED'b'CANCELLED_AND_NOTIFIED'u'CANCELLED_AND_NOTIFIED'b'FINISHED'u'FINISHED'b'pending'u'pending'b'running'u'running'b'cancelled'u'cancelled'b'finished'u'finished'b'concurrent.futures'b'Base class for all future-related exceptions.'u'Base class for all future-related exceptions.'b'The Future was cancelled.'u'The Future was cancelled.'b'The operation exceeded the given deadline.'u'The operation exceeded the given deadline.'b'The operation is not allowed in this state.'u'The operation is not allowed in this state.'b'Provides the event that wait() and as_completed() block on.'u'Provides the event that wait() and as_completed() block on.'b'Used by as_completed().'u'Used by as_completed().'b'Used by wait(return_when=FIRST_COMPLETED).'u'Used by wait(return_when=FIRST_COMPLETED).'b'Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).'u'Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).'b'A context manager that does an ordered acquire of Future conditions.'u'A context manager that does an ordered acquire of Future conditions.'b'Invalid return condition: %r'u'Invalid return condition: %r'b' + Iterate on the list *fs*, yielding finished futures one by one in + reverse order. + Before yielding a future, *waiter* is removed from its waiters + and the future is removed from each set in the collection of sets + *ref_collect*. + + The aim of this function is to avoid keeping stale references after + the future is yielded and before the iterator resumes. + 'u' + Iterate on the list *fs*, yielding finished futures one by one in + reverse order. + Before yielding a future, *waiter* is removed from its waiters + and the future is removed from each set in the collection of sets + *ref_collect*. + + The aim of this function is to avoid keeping stale references after + the future is yielded and before the iterator resumes. + 'b'An iterator over the given futures that yields each as it completes. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + iterate over. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator that yields the given Futures as they complete (finished or + cancelled). If any given Futures are duplicated, they will be returned + once. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + 'u'An iterator over the given futures that yields each as it completes. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + iterate over. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator that yields the given Futures as they complete (finished or + cancelled). If any given Futures are duplicated, they will be returned + once. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + 'b'%d (of %d) futures unfinished'u'%d (of %d) futures unfinished'b'DoneAndNotDoneFutures'u'DoneAndNotDoneFutures'b'done not_done'u'done not_done'b'Wait for the futures in the given sequence to complete. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + wait upon. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + return_when: Indicates when this function should return. The options + are: + + FIRST_COMPLETED - Return when any future finishes or is + cancelled. + FIRST_EXCEPTION - Return when any future finishes by raising an + exception. If no future raises an exception + then it is equivalent to ALL_COMPLETED. + ALL_COMPLETED - Return when all futures finish or are cancelled. + + Returns: + A named 2-tuple of sets. The first set, named 'done', contains the + futures that completed (is finished or cancelled) before the wait + completed. The second set, named 'not_done', contains uncompleted + futures. + 'u'Wait for the futures in the given sequence to complete. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + wait upon. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + return_when: Indicates when this function should return. The options + are: + + FIRST_COMPLETED - Return when any future finishes or is + cancelled. + FIRST_EXCEPTION - Return when any future finishes by raising an + exception. If no future raises an exception + then it is equivalent to ALL_COMPLETED. + ALL_COMPLETED - Return when all futures finish or are cancelled. + + Returns: + A named 2-tuple of sets. The first set, named 'done', contains the + futures that completed (is finished or cancelled) before the wait + completed. The second set, named 'not_done', contains uncompleted + futures. + 'b'Represents the result of an asynchronous computation.'u'Represents the result of an asynchronous computation.'b'Initializes the future. Should not be called by clients.'u'Initializes the future. Should not be called by clients.'b'exception calling callback for %r'u'exception calling callback for %r'b'<%s at %#x state=%s raised %s>'u'<%s at %#x state=%s raised %s>'b'<%s at %#x state=%s returned %s>'u'<%s at %#x state=%s returned %s>'b'<%s at %#x state=%s>'u'<%s at %#x state=%s>'b'Cancel the future if possible. + + Returns True if the future was cancelled, False otherwise. A future + cannot be cancelled if it is running or has already completed. + 'u'Cancel the future if possible. + + Returns True if the future was cancelled, False otherwise. A future + cannot be cancelled if it is running or has already completed. + 'b'Return True if the future was cancelled.'u'Return True if the future was cancelled.'b'Return True if the future is currently executing.'u'Return True if the future is currently executing.'b'Return True of the future was cancelled or finished executing.'u'Return True of the future was cancelled or finished executing.'b'Attaches a callable that will be called when the future finishes. + + Args: + fn: A callable that will be called with this future as its only + argument when the future completes or is cancelled. The callable + will always be called by a thread in the same process in which + it was added. If the future has already completed or been + cancelled then the callable will be called immediately. These + callables are called in the order that they were added. + 'u'Attaches a callable that will be called when the future finishes. + + Args: + fn: A callable that will be called with this future as its only + argument when the future completes or is cancelled. The callable + will always be called by a thread in the same process in which + it was added. If the future has already completed or been + cancelled then the callable will be called immediately. These + callables are called in the order that they were added. + 'b'Return the result of the call that the future represents. + + Args: + timeout: The number of seconds to wait for the result if the future + isn't done. If None, then there is no limit on the wait time. + + Returns: + The result of the call that the future represents. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + Exception: If the call raised then that exception will be raised. + 'u'Return the result of the call that the future represents. + + Args: + timeout: The number of seconds to wait for the result if the future + isn't done. If None, then there is no limit on the wait time. + + Returns: + The result of the call that the future represents. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + Exception: If the call raised then that exception will be raised. + 'b'Return the exception raised by the call that the future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + 'u'Return the exception raised by the call that the future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + 'b'Mark the future as running or process any cancel notifications. + + Should only be used by Executor implementations and unit tests. + + If the future has been cancelled (cancel() was called and returned + True) then any threads waiting on the future completing (though calls + to as_completed() or wait()) are notified and False is returned. + + If the future was not cancelled then it is put in the running state + (future calls to running() will return True) and True is returned. + + This method should be called by Executor implementations before + executing the work associated with this future. If this method returns + False then the work should not be executed. + + Returns: + False if the Future was cancelled, True otherwise. + + Raises: + RuntimeError: if this method was already called or if set_result() + or set_exception() was called. + 'u'Mark the future as running or process any cancel notifications. + + Should only be used by Executor implementations and unit tests. + + If the future has been cancelled (cancel() was called and returned + True) then any threads waiting on the future completing (though calls + to as_completed() or wait()) are notified and False is returned. + + If the future was not cancelled then it is put in the running state + (future calls to running() will return True) and True is returned. + + This method should be called by Executor implementations before + executing the work associated with this future. If this method returns + False then the work should not be executed. + + Returns: + False if the Future was cancelled, True otherwise. + + Raises: + RuntimeError: if this method was already called or if set_result() + or set_exception() was called. + 'b'Future %s in unexpected state: %s'u'Future %s in unexpected state: %s'b'Future in unexpected state'u'Future in unexpected state'b'Sets the return value of work associated with the future. + + Should only be used by Executor implementations and unit tests. + 'u'Sets the return value of work associated with the future. + + Should only be used by Executor implementations and unit tests. + 'b'{}: {!r}'u'{}: {!r}'b'Sets the result of the future as being the given exception. + + Should only be used by Executor implementations and unit tests. + 'u'Sets the result of the future as being the given exception. + + Should only be used by Executor implementations and unit tests. + 'b'This is an abstract base class for concrete asynchronous executors.'u'This is an abstract base class for concrete asynchronous executors.'b'Submits a callable to be executed with the given arguments. + + Schedules the callable to be executed as fn(*args, **kwargs) and returns + a Future instance representing the execution of the callable. + + Returns: + A Future representing the given call. + 'u'Submits a callable to be executed with the given arguments. + + Schedules the callable to be executed as fn(*args, **kwargs) and returns + a Future instance representing the execution of the callable. + + Returns: + A Future representing the given call. + 'b'descriptor 'submit' of 'Executor' object needs an argument'u'descriptor 'submit' of 'Executor' object needs an argument'b'fn'u'fn'b'Passing 'fn' as keyword argument is deprecated'u'Passing 'fn' as keyword argument is deprecated'b'submit expected at least 1 positional argument, got %d'u'submit expected at least 1 positional argument, got %d'b'($self, fn, /, *args, **kwargs)'u'($self, fn, /, *args, **kwargs)'b'Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: The size of the chunks the iterable will be broken into + before being passed to a child process. This argument is only + used by ProcessPoolExecutor; it is ignored by + ThreadPoolExecutor. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + 'u'Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: The size of the chunks the iterable will be broken into + before being passed to a child process. This argument is only + used by ProcessPoolExecutor; it is ignored by + ThreadPoolExecutor. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + 'b'Clean-up the resources associated with the Executor. + + It is safe to call this method several times. Otherwise, no other + methods can be called after this one. + + Args: + wait: If True then shutdown will not return until all running + futures have finished executing and the resources used by the + executor have been reclaimed. + 'u'Clean-up the resources associated with the Executor. + + It is safe to call this method several times. Otherwise, no other + methods can be called after this one. + + Args: + wait: If True then shutdown will not return until all running + futures have finished executing and the resources used by the + executor have been reclaimed. + 'b' + Raised when a executor has become non-functional after a severe failure. + 'u' + Raised when a executor has become non-functional after a severe failure. + 'u'concurrent.futures._base'u'futures._base'u'_base'u'Bisection algorithms. + +This module provides support for maintaining a list in sorted order without +having to sort the list after each insertion. For long lists of items with +expensive comparison operations, this can be an improvement over the more +common approach. +'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_bisect.cpython-38-darwin.so'u'_bisect'bisect_leftbisect_rightinsort_leftinsort_right_bisectBLAKE2B_MAX_DIGEST_SIZEBLAKE2B_MAX_KEY_SIZEBLAKE2B_PERSON_SIZEBLAKE2B_SALT_SIZEBLAKE2S_MAX_DIGEST_SIZEBLAKE2S_MAX_KEY_SIZEBLAKE2S_PERSON_SIZEBLAKE2S_SALT_SIZEu'_blake2b provides BLAKE2b for hashlib +'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_blake2.cpython-38-darwin.so'u'_blake2'MAX_DIGEST_SIZEMAX_KEY_SIZEPERSON_SIZESALT_SIZEu'Return a new BLAKE2b hash object.'u'blake2b.block_size'block_sizedigestu'blake2b.digest_size'digest_sizehexdigestu'blake2b.name'_blake2.blake2bblake2bu'Return a new BLAKE2s hash object.'u'blake2s.block_size'u'blake2s.digest_size'u'blake2s.name'_blake2.blake2sblake2s_blake2A minimal subset of the locale module used at interpreter startup +(imported by the _io module), in order to reduce startup time. + +Don't import directly from third-party code; use the `locale` module instead! +_localedo_setlocaleUTF-8_getdefaultlocaleCODESETnl_langinfo# On Android langinfo.h and CODESET are missing, and UTF-8 is# always used in mbstowcs() and wcstombs().# This path for legacy systems needs the more complex# getdefaultlocale() function, import the full locale module.# nl_langinfo can return an empty string# when the setting has an invalid value.# Default to UTF-8 in that case because# UTF-8 is the default charset on OSX and# returning nothing will crash the# interpreter.b'A minimal subset of the locale module used at interpreter startup +(imported by the _io module), in order to reduce startup time. + +Don't import directly from third-party code; use the `locale` module instead! +'u'A minimal subset of the locale module used at interpreter startup +(imported by the _io module), in order to reduce startup time. + +Don't import directly from third-party code; use the `locale` module instead! +'b'UTF-8'u'UTF-8'u'_bootlocale'Core implementation of import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +_wrapSimple substitute for functools.update_wrapper._new_module_module_locks_blocking_on_DeadlockError_ModuleLockA recursive lock implementation which is able to detect deadlocks + (e.g. thread 1 trying to take locks A then B, and thread 2 trying to + take locks B then A). + allocate_lockwakeupownerwaitershas_deadlockmetid + Acquire the module lock. If a potential deadlock is detected, + a _DeadlockError is raised. + Otherwise, the lock is always acquired and True is returned. + deadlock detected by %rcannot release un-acquired lock_ModuleLock({!r}) at {}_DummyModuleLockA simple _ModuleLock equivalent for Python builds without + multi-threading support._DummyModuleLock({!r}) at {}_ModuleLockManager_get_module_lockGet or create the module lock for a given module name. + + Acquire/release internally the global import lock to protect + _module_locks.acquire_lockcbrelease_lock_lock_unlock_moduleAcquires then releases the module lock for a given module name. + + This is used to ensure a module is completely initialized, in the + event it is being imported by another thread. + _call_with_frames_removedremove_importlib_frames in import.c will always remove sequences + of importlib frames that end with a call to this function + + Use it instead of a normal call in places where including the importlib + frames introduces unwanted noise into the traceback (e.g. when executing + module code) + _verbose_messagePrint the message to stderr if -v/PYTHONVERBOSE is turned on.import # _requires_builtinfxnDecorator to verify the named module is built-in._requires_builtin_wrapper{!r} is not a built-in module_requires_frozenDecorator to verify the named module is frozen._requires_frozen_wrapperis_frozen{!r} is not a frozen module_load_module_shimLoad the specified module into sys.modules and return it. + + This method is deprecated. Use loader.exec_module instead. + + spec_from_loader_load_module_repr_module_repr_from_specModuleSpecThe specification for a module, used for loading. + + A module's spec is the source for information about the module. For + data associated with the module, including source, use the spec's + loader. + + `name` is the absolute name of the module. `loader` is the loader + to use when loading the module. `parent` is the name of the + package the module is in. The parent is derived from the name. + + `is_package` determines if the module is considered a package or + not. On modules this is reflected by the `__path__` attribute. + + `origin` is the specific location used by the loader from which to + load the module, if that information is available. When filename is + set, origin will match. + + `has_location` indicates that a spec's "origin" reflects a location. + When this is True, `__file__` attribute of the module is set. + + `cached` is the location of the cached bytecode file, if any. It + corresponds to the `__cached__` attribute. + + `submodule_search_locations` is the sequence of path entries to + search when importing submodules. If set, is_package should be + True--and False otherwise. + + Packages are simply modules that (may) have submodules. If a spec + has a non-None value in `submodule_search_locations`, the import + system will consider modules loaded from the spec as packages. + + Only finders (see importlib.abc.MetaPathFinder and + importlib.abc.PathEntryFinder) should modify ModuleSpec instances. + + originloader_state_set_fileattr_cachedname={!r}loader={!r}origin={!r}submodule_search_locations={}{}({})smsl_get_cachedThe name of the module's parent.Return a module spec based on various loader methods.spec_from_file_location_spec_from_module__cached___init_module_attrsoverride_NamespaceLoader_pathmodule_from_specCreate a module based on the provided spec.loaders that define exec_module() must also define create_module()'loaders that define exec_module() ''must also define create_module()'Return the repr to use for the module.Execute the spec's specified module in an existing module's namespace.module {!r} not in sys.modulesmissing loader_load_backward_compatible_load_unlocked_initializingimport {!r} # {!r}Return a new module object, loaded by the spec's loader. + + The module is not added to its parent. + + If a module is already in sys.modules, that existing module gets + clobbered. + + BuiltinImporterMeta path import for built-in modules. + + All methods are either class or static methods to avoid the need to + instantiate the class. + + Return repr for the module. + + The method is deprecated. The import machinery does the job itself. + + is_builtinbuilt-inFind the built-in module. + + If 'path' is ever specified then the search is considered a failure. + + This method is deprecated. Use find_spec() instead. + + Create a built-in modulecreate_builtinExec a built-in moduleexec_builtinReturn None as built-in modules do not have code objects.Return None as built-in modules do not have source code.Return False as built-in modules are never packages.FrozenImporterMeta path import for frozen modules. + + All methods are either class or static methods to avoid the need to + instantiate the class. + + frozenFind a frozen module. + + This method is deprecated. Use find_spec() instead. + + Use default semantics for module creation.get_frozen_objectLoad a frozen module. + + This method is deprecated. Use exec_module() instead. + + Return the code object for the frozen module.Return None as frozen modules do not have source code.Return True if the frozen module is a package.is_frozen_package_ImportLockContextContext manager for the import lock.Acquire the import lock.Release the import lock regardless of any raised exceptions._resolve_nameResolve a relative module name to an absolute one.attempted relative import beyond top-level package{}.{}_find_spec_legacyFind a module's spec.sys.meta_path is None, Python is likely shutting down"sys.meta_path is None, Python is likely ""shutting down"sys.meta_path is emptyis_reload_sanity_checkVerify arguments are "sane".module name must be str, not {}level must be >= 0__package__ not set to a stringattempted relative import with no known parent package'attempted relative import with no known parent ''package'Empty module nameNo module named _ERR_MSG_PREFIX{!r}_ERR_MSG_find_and_load_unlockedimport_parent_module; {!r} is not a package_NEEDS_LOADING_find_and_loadFind and load the module.import of {} halted; None in sys.modules'import of {} halted; ''None in sys.modules'Import and return the module based on its name, the package the call is + being made from, and the level adjustment. + + This function represents the greatest common denominator of functionality + between import_module and __import__. This includes setting __package__ if + the loader did not. + + _handle_fromlistrecursiveFigure out what __import__ should return. + + The import_ parameter is a callable which takes the name of module to + import. It is required to decouple the function from assuming importlib's + import implementation is desired. + + .__all__where``from list''Item in must be str, not " must be str, ""not "from_name_calc___package__Calculate what __package__ should be. + + __package__ is not guaranteed to be defined or could be set to None + to represent that its proper value is unknown. + + __package__ != __spec__.parent ("__package__ != __spec__.parent ""(" != can't resolve package from __spec__ or __package__, falling back on __name__ and __path__"can't resolve package from __spec__ or __package__, ""falling back on __name__ and __path__"Import a module. + + The 'globals' argument is used to infer where the import is occurring from + to handle relative imports. The 'locals' argument is ignored. The + 'fromlist' argument specifies what should exist as attributes on the module + being imported (e.g. ``from module import ``). The 'level' + argument represents the package location to import from in a relative + import (e.g. ``from ..pkg import mod`` would have a 'level' of 2). + + globals_cut_off_builtin_from_nameno built-in module named sys_module_imp_moduleSetup importlib by importing needed built-in modules and injecting them + into the global namespace. + + As sys is needed for sys.modules access and _imp is needed to load built-in + modules, those two modules must be explicitly passed in. + + module_typeself_modulebuiltin_name_installInstall importers for builtin and frozen modules_install_external_importersInstall importers that require external filesystem access# IMPORTANT: Whenever making changes to this module, be sure to run a top-level# `make regen-importlib` followed by `make` in order to get the frozen version# of the module updated. Not doing so will result in the Makefile to fail for# all others who don't have a ./python around to freeze the module# in the early stages of compilation.# See importlib._setup() for what is injected into the global namespace.# When editing this code be aware that code executed at import time CANNOT# reference any injected objects! This includes not only global code but also# anything specified at the class level.# Bootstrap-related code ####################################################### Module-level locking ######################################################### A dict mapping module names to weakrefs of _ModuleLock instances# Dictionary protected by the global import lock# A dict mapping thread ids to _ModuleLock instances# Deadlock avoidance for concurrent circular imports.# Wait for a release() call# The following two functions are for consumption by Python/import.c.# bpo-31070: Check if another thread created a new lock# after the previous lock was destroyed# but before the weakref callback was called.# Concurrent circular import, we'll accept a partially initialized# module object.# Frame stripping magic ################################################ Typically used by loader classes as a method replacement.# Module specifications ######################################################## The implementation of ModuleType.__repr__().# As soon as BuiltinImporter, FrozenImporter, and NamespaceLoader# drop their implementations for module_repr. we can add a# deprecation warning here.# We could use module.__class__.__name__ instead of 'module' in the# various repr permutations.# file-location attributes# aka, undefined# the default# This function is meant for use in _setup().# loader will stay None.# The passed-in module may be not support attribute assignment,# in which case we simply don't set the attributes.# __name__# __loader__# A backward compatibility hack.# While the docs say that module.__file__ is not set for# built-in modules, and the code below will avoid setting it if# spec.has_location is false, this is incorrect for namespace# packages. Namespace packages have no location, but their# __spec__.origin is None, and thus their module.__file__# should also be None for consistency. While a bit of a hack,# this is the best place to ensure this consistency.# See # https://docs.python.org/3/library/importlib.html#importlib.abc.Loader.load_module# and bpo-32305# __package__# __spec__# __path__# __file__/__cached__# Typically loaders will not implement create_module().# If create_module() returns `None` then it means default# module creation should be used.# We mostly replicate _module_repr() using the spec attributes.# Used by importlib.reload() and _load_module_shim().# Namespace package.# (issue19713) Once BuiltinImporter and ExtensionFileLoader# have exec_module() implemented, we can add a deprecation# warning here.# Update the order of insertion into sys.modules for module# clean-up at shutdown.# The module must be in sys.modules at this point!# Move it to the end of sys.modules.# Since module.__path__ may not line up with# spec.submodule_search_paths, we can't necessarily rely# on spec.parent here.# A helper for direct use by the import system.# Not a namespace package.# This must be done before putting the module in sys.modules# (otherwise an optimization shortcut in import.c becomes# wrong).# A namespace package so do nothing.# Move the module to the end of sys.modules.# We don't ensure that the import-related module attributes get# set in the sys.modules replacement case. Such modules are on# their own.# A method used during testing of _load_unlocked() and by# _load_module_shim().# Loaders ###################################################################### Import itself ################################################################ This would be a good place for a DeprecationWarning if# we ended up going that route.# PyImport_Cleanup() is running or has been called.# We check sys.modules here for the reload case. While a passed-in# target will usually indicate a reload there is no guarantee, whereas# sys.modules provides one.# The parent import may have already imported this module.# We use the found spec since that is the one that# we would have used if the parent module hadn't# beaten us to the punch.# Crazy side-effects!# Set the module as an attribute on its parent.# The hell that is fromlist ...# If a package was imported, try to import stuff from fromlist.# Backwards-compatibility dictates we ignore failed# imports triggered by fromlist for modules that don't# exist.# Return up to the first dot in 'name'. This is complicated by the fact# that 'name' may be relative.# Figure out where to slice the module's name up to the first dot# in 'name'.# Slice end needs to be positive to alleviate need to special-case# when ``'.' not in name``.# Set up the spec for existing builtin/frozen modules.# Directly load built-in modules needed during bootstrap.b'Core implementation of import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +'u'Core implementation of import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +'b'Simple substitute for functools.update_wrapper.'u'Simple substitute for functools.update_wrapper.'b'__qualname__'u'__qualname__'b'A recursive lock implementation which is able to detect deadlocks + (e.g. thread 1 trying to take locks A then B, and thread 2 trying to + take locks B then A). + 'u'A recursive lock implementation which is able to detect deadlocks + (e.g. thread 1 trying to take locks A then B, and thread 2 trying to + take locks B then A). + 'b' + Acquire the module lock. If a potential deadlock is detected, + a _DeadlockError is raised. + Otherwise, the lock is always acquired and True is returned. + 'u' + Acquire the module lock. If a potential deadlock is detected, + a _DeadlockError is raised. + Otherwise, the lock is always acquired and True is returned. + 'b'deadlock detected by %r'u'deadlock detected by %r'b'cannot release un-acquired lock'u'cannot release un-acquired lock'b'_ModuleLock({!r}) at {}'u'_ModuleLock({!r}) at {}'b'A simple _ModuleLock equivalent for Python builds without + multi-threading support.'u'A simple _ModuleLock equivalent for Python builds without + multi-threading support.'b'_DummyModuleLock({!r}) at {}'u'_DummyModuleLock({!r}) at {}'b'Get or create the module lock for a given module name. + + Acquire/release internally the global import lock to protect + _module_locks.'u'Get or create the module lock for a given module name. + + Acquire/release internally the global import lock to protect + _module_locks.'b'Acquires then releases the module lock for a given module name. + + This is used to ensure a module is completely initialized, in the + event it is being imported by another thread. + 'u'Acquires then releases the module lock for a given module name. + + This is used to ensure a module is completely initialized, in the + event it is being imported by another thread. + 'b'remove_importlib_frames in import.c will always remove sequences + of importlib frames that end with a call to this function + + Use it instead of a normal call in places where including the importlib + frames introduces unwanted noise into the traceback (e.g. when executing + module code) + 'u'remove_importlib_frames in import.c will always remove sequences + of importlib frames that end with a call to this function + + Use it instead of a normal call in places where including the importlib + frames introduces unwanted noise into the traceback (e.g. when executing + module code) + 'b'Print the message to stderr if -v/PYTHONVERBOSE is turned on.'u'Print the message to stderr if -v/PYTHONVERBOSE is turned on.'b'import 'u'import 'b'# 'u'# 'b'Decorator to verify the named module is built-in.'u'Decorator to verify the named module is built-in.'b'{!r} is not a built-in module'u'{!r} is not a built-in module'b'Decorator to verify the named module is frozen.'u'Decorator to verify the named module is frozen.'b'{!r} is not a frozen module'u'{!r} is not a frozen module'b'Load the specified module into sys.modules and return it. + + This method is deprecated. Use loader.exec_module instead. + + 'u'Load the specified module into sys.modules and return it. + + This method is deprecated. Use loader.exec_module instead. + + 'b'__loader__'u'__loader__'b'module_repr'u'module_repr'b''u''b''u''b''u''b'The specification for a module, used for loading. + + A module's spec is the source for information about the module. For + data associated with the module, including source, use the spec's + loader. + + `name` is the absolute name of the module. `loader` is the loader + to use when loading the module. `parent` is the name of the + package the module is in. The parent is derived from the name. + + `is_package` determines if the module is considered a package or + not. On modules this is reflected by the `__path__` attribute. + + `origin` is the specific location used by the loader from which to + load the module, if that information is available. When filename is + set, origin will match. + + `has_location` indicates that a spec's "origin" reflects a location. + When this is True, `__file__` attribute of the module is set. + + `cached` is the location of the cached bytecode file, if any. It + corresponds to the `__cached__` attribute. + + `submodule_search_locations` is the sequence of path entries to + search when importing submodules. If set, is_package should be + True--and False otherwise. + + Packages are simply modules that (may) have submodules. If a spec + has a non-None value in `submodule_search_locations`, the import + system will consider modules loaded from the spec as packages. + + Only finders (see importlib.abc.MetaPathFinder and + importlib.abc.PathEntryFinder) should modify ModuleSpec instances. + + 'b'name={!r}'u'name={!r}'b'loader={!r}'u'loader={!r}'b'origin={!r}'u'origin={!r}'b'submodule_search_locations={}'u'submodule_search_locations={}'b'{}({})'u'{}({})'b'The name of the module's parent.'u'The name of the module's parent.'b'Return a module spec based on various loader methods.'u'Return a module spec based on various loader methods.'b'get_filename'u'get_filename'b'is_package'u'is_package'b'__package__'u'__package__'b'__path__'u'__path__'b'__file__'u'__file__'b'__cached__'u'__cached__'b'Create a module based on the provided spec.'u'Create a module based on the provided spec.'b'create_module'u'create_module'b'exec_module'u'exec_module'b'loaders that define exec_module() must also define create_module()'u'loaders that define exec_module() must also define create_module()'b'Return the repr to use for the module.'u'Return the repr to use for the module.'b''u''b'Execute the spec's specified module in an existing module's namespace.'u'Execute the spec's specified module in an existing module's namespace.'b'module {!r} not in sys.modules'u'module {!r} not in sys.modules'b'missing loader'u'missing loader'b'__spec__'u'__spec__'b'import {!r} # {!r}'u'import {!r} # {!r}'b'Return a new module object, loaded by the spec's loader. + + The module is not added to its parent. + + If a module is already in sys.modules, that existing module gets + clobbered. + + 'u'Return a new module object, loaded by the spec's loader. + + The module is not added to its parent. + + If a module is already in sys.modules, that existing module gets + clobbered. + + 'b'Meta path import for built-in modules. + + All methods are either class or static methods to avoid the need to + instantiate the class. + + 'b'Return repr for the module. + + The method is deprecated. The import machinery does the job itself. + + 'u'Return repr for the module. + + The method is deprecated. The import machinery does the job itself. + + 'b''u''b'built-in'u'built-in'b'Find the built-in module. + + If 'path' is ever specified then the search is considered a failure. + + This method is deprecated. Use find_spec() instead. + + 'u'Find the built-in module. + + If 'path' is ever specified then the search is considered a failure. + + This method is deprecated. Use find_spec() instead. + + 'b'Create a built-in module'u'Create a built-in module'b'Exec a built-in module'u'Exec a built-in module'b'Return None as built-in modules do not have code objects.'u'Return None as built-in modules do not have code objects.'b'Return None as built-in modules do not have source code.'u'Return None as built-in modules do not have source code.'b'Return False as built-in modules are never packages.'u'Return False as built-in modules are never packages.'b'Meta path import for frozen modules. + + All methods are either class or static methods to avoid the need to + instantiate the class. + + 'b'frozen'b'Find a frozen module. + + This method is deprecated. Use find_spec() instead. + + 'u'Find a frozen module. + + This method is deprecated. Use find_spec() instead. + + 'b'Use default semantics for module creation.'u'Use default semantics for module creation.'b'Load a frozen module. + + This method is deprecated. Use exec_module() instead. + + 'u'Load a frozen module. + + This method is deprecated. Use exec_module() instead. + + 'b'Return the code object for the frozen module.'u'Return the code object for the frozen module.'b'Return None as frozen modules do not have source code.'u'Return None as frozen modules do not have source code.'b'Return True if the frozen module is a package.'u'Return True if the frozen module is a package.'b'Context manager for the import lock.'u'Context manager for the import lock.'b'Acquire the import lock.'u'Acquire the import lock.'b'Release the import lock regardless of any raised exceptions.'u'Release the import lock regardless of any raised exceptions.'b'Resolve a relative module name to an absolute one.'u'Resolve a relative module name to an absolute one.'b'attempted relative import beyond top-level package'u'attempted relative import beyond top-level package'b'{}.{}'u'{}.{}'b'Find a module's spec.'u'Find a module's spec.'b'sys.meta_path is None, Python is likely shutting down'u'sys.meta_path is None, Python is likely shutting down'b'sys.meta_path is empty'u'sys.meta_path is empty'b'Verify arguments are "sane".'u'Verify arguments are "sane".'b'module name must be str, not {}'u'module name must be str, not {}'b'level must be >= 0'u'level must be >= 0'b'__package__ not set to a string'u'__package__ not set to a string'b'attempted relative import with no known parent package'u'attempted relative import with no known parent package'b'Empty module name'u'Empty module name'b'No module named 'u'No module named 'b'{!r}'u'{!r}'b'; {!r} is not a package'u'; {!r} is not a package'b'Find and load the module.'u'Find and load the module.'b'import of {} halted; None in sys.modules'u'import of {} halted; None in sys.modules'b'Import and return the module based on its name, the package the call is + being made from, and the level adjustment. + + This function represents the greatest common denominator of functionality + between import_module and __import__. This includes setting __package__ if + the loader did not. + + 'u'Import and return the module based on its name, the package the call is + being made from, and the level adjustment. + + This function represents the greatest common denominator of functionality + between import_module and __import__. This includes setting __package__ if + the loader did not. + + 'b'Figure out what __import__ should return. + + The import_ parameter is a callable which takes the name of module to + import. It is required to decouple the function from assuming importlib's + import implementation is desired. + + 'u'Figure out what __import__ should return. + + The import_ parameter is a callable which takes the name of module to + import. It is required to decouple the function from assuming importlib's + import implementation is desired. + + 'b'.__all__'u'.__all__'b'``from list'''u'``from list'''b'Item in 'u'Item in 'b' must be str, not 'u' must be str, not 'b'__all__'u'__all__'b'Calculate what __package__ should be. + + __package__ is not guaranteed to be defined or could be set to None + to represent that its proper value is unknown. + + 'u'Calculate what __package__ should be. + + __package__ is not guaranteed to be defined or could be set to None + to represent that its proper value is unknown. + + 'b'__package__ != __spec__.parent ('u'__package__ != __spec__.parent ('b' != 'u' != 'b'can't resolve package from __spec__ or __package__, falling back on __name__ and __path__'u'can't resolve package from __spec__ or __package__, falling back on __name__ and __path__'b'Import a module. + + The 'globals' argument is used to infer where the import is occurring from + to handle relative imports. The 'locals' argument is ignored. The + 'fromlist' argument specifies what should exist as attributes on the module + being imported (e.g. ``from module import ``). The 'level' + argument represents the package location to import from in a relative + import (e.g. ``from ..pkg import mod`` would have a 'level' of 2). + + 'u'Import a module. + + The 'globals' argument is used to infer where the import is occurring from + to handle relative imports. The 'locals' argument is ignored. The + 'fromlist' argument specifies what should exist as attributes on the module + being imported (e.g. ``from module import ``). The 'level' + argument represents the package location to import from in a relative + import (e.g. ``from ..pkg import mod`` would have a 'level' of 2). + + 'b'no built-in module named 'u'no built-in module named 'b'Setup importlib by importing needed built-in modules and injecting them + into the global namespace. + + As sys is needed for sys.modules access and _imp is needed to load built-in + modules, those two modules must be explicitly passed in. + + 'u'Setup importlib by importing needed built-in modules and injecting them + into the global namespace. + + As sys is needed for sys.modules access and _imp is needed to load built-in + modules, those two modules must be explicitly passed in. + + 'b'_thread'b'_warnings'b'_weakref'b'Install importers for builtin and frozen modules'u'Install importers for builtin and frozen modules'b'Install importers that require external filesystem access'u'Install importers that require external filesystem access'u'_bootstrap'Core implementation of path-based import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +_iomarshal_MS_WINDOWSwinregpath_separatorspath_seppath_sep_tuplesetcomp_pathseps_with_colon_CASE_INSENSITIVE_PLATFORMS_STR_KEY_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY_CASE_INSENSITIVE_PLATFORMS_make_relax_casePYTHONCASEOK_relax_caseTrue if filenames must be checked case-insensitively.Convert a 32-bit integer to little-endian.42949672950xFFFFFFFFlittleConvert 4 bytes in little-endian to an integer._unpack_uint16Convert 2 bytes in little-endian to an integer._path_joinpath_partsReplacement for os.path.join().new_root_path_splitroot_path_splitReplacement for os.path.split()._path_statStat the path. + + Made a separate function to make it easier to override in experiments + (e.g. cache stat results). + + _path_is_mode_typeTest whether the path is the specified mode type.stat_info614400o170000_path_isfileReplacement for os.path.isfile.327680o100000_path_isdirReplacement for os.path.isdir.163840o040000_path_isabsReplacement for os.path.isabs.\\_write_atomic4380o666Best-effort function to write data to a path atomically. + Be prepared to handle a FileExistsError if concurrent writing of the + temporary file is attempted.path_tmpO_EXCLFileIO_code_type3413MAGIC_NUMBER_RAW_MAGIC_NUMBER__pycache___PYCACHEopt-_OPTSOURCE_SUFFIXESBYTECODE_SUFFIXESDEBUG_BYTECODE_SUFFIXESOPTIMIZED_BYTECODE_SUFFIXESdebug_overrideGiven the path to a .py file, return the path to its .pyc file. + + The .py file does not need to exist; this simply returns the path to the + .pyc file calculated as if the .py file were imported. + + The 'optimization' parameter controls the presumed optimization level of + the bytecode file. If 'optimization' is not None, the string representation + of the argument is taken and verified to be alphanumeric (else ValueError + is raised). + + The debug_override parameter is deprecated. If debug_override is not None, + a True value is the same as setting 'optimization' to the empty string + while a False value is equivalent to setting 'optimization' to '1'. + + If sys.implementation.cache_tag is None then NotImplementedError is raised. + + the debug_override parameter is deprecated; use 'optimization' instead'the debug_override parameter is deprecated; use '"'optimization' instead"debug_override or optimization must be set to Noneheadrestcache_tagsys.implementation.cache_tag is Nonealmost_filename{!r} is not alphanumeric{}.{}{}source_from_cacheGiven the path to a .pyc. file, return the path to its .py file. + + The .pyc file does not need to exist; this simply returns the path to + the .py file calculated to correspond to the .pyc file. If path does + not conform to PEP 3147/488 format, ValueError will be raised. If + sys.implementation.cache_tag is None then NotImplementedError is raised. + + pycache_filenamefound_in_pycache_prefixstripped_pathpycache not bottom-level directory in ' not bottom-level directory in 'dot_countexpected only 2 or 3 dots in optimization portion of filename does not start with "optimization portion of filename does not start ""with "opt_leveloptimization level is not an alphanumeric value" is not an ""alphanumeric value"base_filename_get_sourcefilebytecode_pathConvert a bytecode file path to a source path (if possible). + + This function exists purely for backwards-compatibility for + PyImport_ExecCodeModuleWithFilenames() in the C API. + + extensionpysource_path_calc_modeCalculate the mode permissions for a bytecode file.0o200_check_nameDecorator to verify that the module being requested matches the one the + loader can handle. + + The first argument (self) must define _name which the second argument is + compared against. If the comparison fails then ImportError is raised. + + _check_name_wrapperloader for %s cannot handle %s_find_module_shimTry to find a loader for the specified module by delegating to + self.find_loader(). + + This method is deprecated in favor of finder.find_spec(). + + portionsNot importing directory {}: missing __init___classify_pycexc_detailsPerform basic validity checking of a pyc header and return the flags field, + which determines how the pyc should be further validated against the source. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required, though.) + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + ImportError is raised when the magic number is incorrect or when the flags + field is invalid. EOFError is raised when the data is found to be truncated. + + magicbad magic number in reached EOF while reading pyc header of 0b11invalid flags in _validate_timestamp_pycsource_mtimesource_sizeValidate a pyc against the source last-modified time. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_mtime* is the last modified timestamp of the source file. + + *source_size* is None or the size of the source file in bytes. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + bytecode is stale for _validate_hash_pycsource_hashValidate a hash-based pyc by checking the real source hash against the one in + the pyc header. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_hash* is the importlib.util.source_hash() of the source file. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + hash in bytecode doesn't match hash of source _compile_bytecodeCompile bytecode as found in a pyc.loadscode object from {!r}_fix_co_filenameNon-code object in {!r}_code_to_timestamp_pycmtimeProduce the data for a timestamp-based pyc.dumps_code_to_hash_pyccheckedProduce the data for a hash-based pyc.0b1decode_sourcesource_bytesDecode bytes representing source code and return the string. + + Universal newline support is used in the decoding. + tokenizesource_bytes_readlinedetect_encodingIncrementalNewlineDecodernewline_decoder_POPULATEReturn a module spec based on a file location. + + To indicate that the module is a package, set + submodule_search_locations to a list of directory paths. An + empty list is sufficient, though its not otherwise useful to the + import system. + + The loader must take a spec as its only __init__() arg. + + loader_classsuffixes_get_supported_file_loadersWindowsRegistryFinderMeta path finder for modules declared in the Windows registry.Software\Python\PythonCore\{sys_version}\Modules\{fullname}'Software\\Python\\PythonCore\\{sys_version}''\\Modules\\{fullname}'REGISTRY_KEYSoftware\Python\PythonCore\{sys_version}\Modules\{fullname}\Debug'\\Modules\\{fullname}\\Debug'REGISTRY_KEY_DEBUGDEBUG_BUILD_open_registry_winregOpenKeyHKEY_CURRENT_USERHKEY_LOCAL_MACHINE_search_registryregistry_key%d.%dsys_versionhkeyQueryValuefilepathFind module named in the registry. + + This method is deprecated. Use exec_module() instead. + + _LoaderBasicsBase class of common code needed by both SourceLoader and + SourcelessFileLoader.Concrete implementation of InspectLoader.is_package by checking if + the path returned by get_filename has a filename of '__init__.py'.filename_basetail_nameExecute the module.cannot load module {!r} when get_code() returns None'cannot load module {!r} when get_code() ''returns None'This module is deprecated.SourceLoaderpath_mtimeOptional method that returns the modification time (an int) for the + specified path (a str). + + Raises OSError when the path cannot be handled. + path_statsOptional method returning a metadata dict for the specified + path (a str). + + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + + Implementing this method allows the loader to read bytecode files. + Raises OSError when the path cannot be handled. + _cache_bytecodecache_pathOptional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + + The source path is needed in order to correctly transfer permissions + set_dataOptional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + Concrete implementation of InspectLoader.get_source.source not available through get_data()source_to_code_optimizeReturn the code object compiled from source. + + The 'data' argument can be any object type that compile() supports. + dont_inheritConcrete implementation of InspectLoader.get_code. + + Reading of bytecode requires path_stats to be implemented. To write + bytecode, set_data must also be implemented. + + hash_basedcheck_sourcestbytes_data0b10check_hash_based_pycsnever{} matches {}code_objectcode object from {}FileLoaderBase file loader class which implements the loader protocol methods that + require file system usage.Cache the module name and the path to the file found by the + finder.Load a module from a file. + + This method is deprecated. Use exec_module() instead. + + Return the path to the source file as found by the finder.Return the data from path as raw bytes.ExtensionFileLoaderopen_codeSourceFileLoaderConcrete implementation of SourceLoader using the file system.Return the metadata for the path.st_mtimest_size_modeWrite bytes data to a file.could not create {!r}: {!r}created {!r}SourcelessFileLoaderLoader which handles sourceless file imports.Return None as there is no source code.EXTENSION_SUFFIXESLoader for extension modules. + + The constructor is designed to work with FileFinder. + + Create an unitialized extension modulecreate_dynamicextension module {!r} loaded from {!r}Initialize an extension moduleexec_dynamicextension module {!r} executed from {!r}Return True if the extension module is a package.file_nameReturn None as an extension module cannot create a code object.Return None as extension modules have no source code._NamespacePathRepresents a namespace package's path. It uses the module name + to find its parent module, and from there it looks up the parent's + __path__. When this changes, the module's own path is recomputed, + using path_finder. For top-level modules, the parent module's path + is sys.path.path_finder_get_parent_path_last_parent_path_path_finder_find_parent_path_namesReturns a tuple of (parent-module-name, parent-path-attr-name)dotparent_module_namepath_attr_name_recalculateparent_path_NamespacePath({!r})Load a namespace module. + + This method is deprecated. Use exec_module() instead. + + namespace module loaded with path {!r}PathFinderMeta path finder for sys.path and package __path__ attributes.Call the invalidate_caches() method on all path entry finders + stored in sys.path_importer_caches (where implemented).Search sys.path_hooks for a finder for 'path'.sys.path_hooks is emptyhookGet the finder for the path entry from sys.path_importer_cache. + + If the path entry is not in the cache, find the appropriate finder + and cache it. If no finder is available, store None. + + Find the loader or namespace_path for this module/package name.namespace_pathspec missing loaderTry to find a spec for 'fullname' on sys.path or 'path'. + + The search is based on sys.path_hooks and sys.path_importer_cache. + find the module on sys.path or 'path' based on sys.path_hooks and + sys.path_importer_cache. + + This method is deprecated. Use find_spec() instead. + + + Find distributions. + + Return an iterable of all Distribution instances capable of + loading the metadata for packages matching ``context.name`` + (or all names if ``None`` indicated) along the paths in the list + of directories ``context.path``. + importlib.metadataMetadataPathFinderFileFinderFile-based finder. + + Interactions with the file system are cached for performance, being + refreshed when the directory the finder is handling has been modified. + + loader_detailsInitialize with the path to search on and a variable number of + 2-tuples containing the loader and the file suffixes the loader + recognizes.loaders_loaders_path_mtime_path_cache_relaxed_path_cacheInvalidate the directory mtime.Try to find a loader for the specified module, or the namespace + package portions. Returns (loader, list-of-portions). + + This method is deprecated. Use find_spec() instead. + + Try to find a spec for the specified module. + + Returns the matching spec, or None if not found. + is_namespacetail_module_fill_cachecachecache_moduleinit_filenamefull_pathtrying {}possible namespace for {}Fill the cache of potential modules and packages for this directory.lower_suffix_contentsnew_namepath_hookA class method which returns a closure to use on sys.path_hook + which will return an instance using the specified loaders and the path + called on the closure. + + If the path called on the closure is not a directory, ImportError is + raised. + + Path hook for importlib.machinery.FileFinder.only directories are supportedFileFinder({!r})_fix_up_modulecpathnameReturns a list of file-based module loaders. + + Each item is a tuple (loader, suffixes). + extension_suffixesextensionsbytecode_bootstrap_moduleSetup the path-based importers for importlib by importing needed + built-in modules and injecting them into the global namespace. + + Other components are extracted from the core bootstrap module. + + os_detailsbuiltin_osos_moduleimportlib requires posix or ntthread_moduleweakref_modulewinreg_module.pyw_d.pydInstall the path-based import components.supported_loaders# all others who don't have a ./python around to freeze the module in the early# stages of compilation.# Import builtin modules# Assumption made in _path_join()# Drive relative paths have to be resolved by the OS, so we reset the# tail but do not add a path_sep prefix.# Avoid losing the root's trailing separator when joining with nothing# id() is used to generate a pseudo-random filename.# We first write data to a temporary file, and then use os.replace() to# perform an atomic rename.# Finder/loader utility code ################################################ Magic word to reject .pyc files generated by other Python versions.# It should change for each incompatible change to the bytecode.# The value of CR and LF is incorporated so if you ever read or write# a .pyc file in text mode the magic number will be wrong; also, the# Apple MPW compiler swaps their values, botching string constants.# There were a variety of old schemes for setting the magic number.# The current working scheme is to increment the previous value by# 10.# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic# number also includes a new "magic tag", i.e. a human readable string used# to represent the magic number in __pycache__ directories. When you change# the magic number, you must also set a new unique magic tag. Generally this# can be named after the Python major version of the magic number bump, but# it can really be anything, as long as it's different than anything else# that's come before. The tags are included in the following table, starting# with Python 3.2a0.# Known values:# Python 1.5: 20121# Python 1.5.1: 20121# Python 1.5.2: 20121# Python 1.6: 50428# Python 2.0: 50823# Python 2.0.1: 50823# Python 2.1: 60202# Python 2.1.1: 60202# Python 2.1.2: 60202# Python 2.2: 60717# Python 2.3a0: 62011# Python 2.3a0: 62021# Python 2.3a0: 62011 (!)# Python 2.4a0: 62041# Python 2.4a3: 62051# Python 2.4b1: 62061# Python 2.5a0: 62071# Python 2.5a0: 62081 (ast-branch)# Python 2.5a0: 62091 (with)# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)# Python 2.5b3: 62101 (fix wrong code: for x, in ...)# Python 2.5b3: 62111 (fix wrong code: x += yield)# Python 2.5c1: 62121 (fix wrong lnotab with for loops and# storing constants that should have been removed)# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)# Python 2.6a1: 62161 (WITH_CLEANUP optimization)# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND)# Python 2.7a0: 62181 (optimize conditional branches:# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)# Python 2.7a0 62191 (introduce SETUP_WITH)# Python 2.7a0 62201 (introduce BUILD_SET)# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD)# Python 3000: 3000# 3010 (removed UNARY_CONVERT)# 3020 (added BUILD_SET)# 3030 (added keyword-only parameters)# 3040 (added signature annotations)# 3050 (print becomes a function)# 3060 (PEP 3115 metaclass syntax)# 3061 (string literals become unicode)# 3071 (PEP 3109 raise changes)# 3081 (PEP 3137 make __file__ and __name__ unicode)# 3091 (kill str8 interning)# 3101 (merge from 2.6a0, see 62151)# 3103 (__file__ points to source file)# Python 3.0a4: 3111 (WITH_CLEANUP optimization).# Python 3.0b1: 3131 (lexical exception stacking, including POP_EXCEPT#3021)# Python 3.1a1: 3141 (optimize list, set and dict comprehensions:# change LIST_APPEND and SET_ADD, add MAP_ADD #2183)# Python 3.1a1: 3151 (optimize conditional branches:# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE#4715)# Python 3.2a1: 3160 (add SETUP_WITH #6101)# tag: cpython-32# Python 3.2a2: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR #9225)# Python 3.2a3 3180 (add DELETE_DEREF #4617)# Python 3.3a1 3190 (__class__ super closure changed)# Python 3.3a1 3200 (PEP 3155 __qualname__ added #13448)# Python 3.3a1 3210 (added size modulo 2**32 to the pyc header #13645)# Python 3.3a2 3220 (changed PEP 380 implementation #14230)# Python 3.3a4 3230 (revert changes to implicit __class__ closure #14857)# Python 3.4a1 3250 (evaluate positional default arguments before# keyword-only defaults #16967)# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override# free vars #17853)# Python 3.4a1 3270 (various tweaks to the __class__ closure #12370)# Python 3.4a1 3280 (remove implicit class argument)# Python 3.4a4 3290 (changes to __qualname__ computation #19301)# Python 3.4a4 3300 (more changes to __qualname__ computation #19301)# Python 3.4rc2 3310 (alter __qualname__ computation #20625)# Python 3.5a1 3320 (PEP 465: Matrix multiplication operator #21176)# Python 3.5b1 3330 (PEP 448: Additional Unpacking Generalizations #2292)# Python 3.5b2 3340 (fix dictionary display evaluation order #11205)# Python 3.5b3 3350 (add GET_YIELD_FROM_ITER opcode #24400)# Python 3.5.2 3351 (fix BUILD_MAP_UNPACK_WITH_CALL opcode #27286)# Python 3.6a0 3360 (add FORMAT_VALUE opcode #25483)# Python 3.6a1 3361 (lineno delta of code.co_lnotab becomes signed #26107)# Python 3.6a2 3370 (16 bit wordcode #26647)# Python 3.6a2 3371 (add BUILD_CONST_KEY_MAP opcode #27140)# Python 3.6a2 3372 (MAKE_FUNCTION simplification, remove MAKE_CLOSURE# #27095)# Python 3.6b1 3373 (add BUILD_STRING opcode #27078)# Python 3.6b1 3375 (add SETUP_ANNOTATIONS and STORE_ANNOTATION opcodes# #27985)# Python 3.6b1 3376 (simplify CALL_FUNCTIONs & BUILD_MAP_UNPACK_WITH_CALL#27213)# Python 3.6b1 3377 (set __class__ cell from type.__new__ #23722)# Python 3.6b2 3378 (add BUILD_TUPLE_UNPACK_WITH_CALL #28257)# Python 3.6rc1 3379 (more thorough __class__ validation #23722)# Python 3.7a1 3390 (add LOAD_METHOD and CALL_METHOD opcodes #26110)# Python 3.7a2 3391 (update GET_AITER #31709)# Python 3.7a4 3392 (PEP 552: Deterministic pycs #31650)# Python 3.7b1 3393 (remove STORE_ANNOTATION opcode #32550)# Python 3.7b5 3394 (restored docstring as the first stmt in the body;# this might affected the first line number #32911)# Python 3.8a1 3400 (move frame block handling to compiler #17611)# Python 3.8a1 3401 (add END_ASYNC_FOR #33041)# Python 3.8a1 3410 (PEP570 Python Positional-Only Parameters #36540)# Python 3.8b2 3411 (Reverse evaluation order of key: value in dict# comprehensions #35224)# Python 3.8b2 3412 (Swap the position of positional args and positional# only args in ast.arguments #37593)# Python 3.8b4 3413 (Fix "break" and "continue" in "finally" #37830)# MAGIC must change whenever the bytecode emitted by the compiler may no# longer be understood by older implementations of the eval loop (usually# due to the addition of new opcodes).# Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array# in PC/launcher.c must also be updated.# For import.c# _setup() adds .pyw as needed.# Deprecated.# We need an absolute path to the py file to avoid the possibility of# collisions within sys.pycache_prefix, if someone has two different# `foo/bar.py` on their system and they import both of them using the# same sys.pycache_prefix. Let's say sys.pycache_prefix is# `C:\Bytecode`; the idea here is that if we get `Foo\Bar`, we first# make it absolute (`C:\Somewhere\Foo\Bar`), then make it root-relative# (`Somewhere\Foo\Bar`), so we end up placing the bytecode file in an# unambiguous `C:\Bytecode\Somewhere\Foo\Bar\`.# Strip initial drive from a Windows path. We know we have an absolute# path here, so the second part of the check rules out a POSIX path that# happens to contain a colon at the second character.# Strip initial path separator from `head` to complete the conversion# back to a root-relative path before joining.# We always ensure write access so we can update cached files# later even when the source files are read-only on Windows (#6074)# XXX yuck# Call find_loader(). If it returns a string (indicating this# is a namespace package portion), generate a warning and# return None.# Only the first two flags are defined.# To avoid bootstrap issues.# The caller may simply want a partially populated location-# oriented spec. So we set the location to a bogus value and# fill in as much as we can.# ExecutionLoader# If the location is on the filesystem, but doesn't actually exist,# we could return None here, indicating that the location is not# valid. However, we don't have a good way of testing since an# indirect location (e.g. a zip file or URL) will look like a# non-existent file relative to the filesystem.# Pick a loader if one wasn't provided.# Set submodule_search_paths appropriately.# Check the loader.# Changed in _setup()# For backwards compatibility, we delegate to set_data()# The only reason for this method is for the name check.# Issue #14857: Avoid the zero-argument form of super so the implementation# of that form can be updated without breaking the frozen module# ResourceReader ABC API.# Adapt between the two APIs# Figure out what directories are missing.# Create needed directories.# Probably another Python process already created the dir.# Could be a permission error, read-only filesystem: just forget# about writing the data.# Same as above: just don't write the bytecode.# Call _classify_pyc to do basic validation of the pyc but ignore the# result. There's no source to check against.# Filled in by _setup().# This is a top-level module. sys.path contains the parent path.# Not a top-level module. parent-module.__path__ contains the# parent path.# If the parent's path has changed, recalculate _path# Make a copy# Note that no changes are made if a loader is returned, but we# do remember the new parent path# Save the copy# We use this exclusively in module_from_spec() for backward-compatibility.# The import system never calls this method.# Finders ###################################################################### Don't cache the failure as the cwd can easily change to# a valid directory later on.# If this ends up being a namespace package, namespace_path is# the list of paths that will become its __path__# This is possibly part of a namespace package.# Remember these path entries (if any) for when we# create a namespace package, and continue iterating# on path.# We found at least one namespace path. Return a spec which# can create the namespace package.# Base (directory) path# tail_module keeps the original casing, for __file__ and friends# Check if the module is the name of a directory (and thus a package).# If a namespace package, return the path if we don't# find a module in the next section.# Check for a file w/ a proper suffix exists.# Directory has either been removed, turned into a file, or made# unreadable.# We store two cached versions, to handle runtime changes of the# PYTHONCASEOK environment variable.# Windows users can import modules with case-insensitive file# suffixes (for legacy reasons). Make the suffix lowercase here# so it's done once instead of for every import. This is safe as# the specified suffixes to check against are always specified in a# case-sensitive manner.# Import setup ################################################################ This function is used by PyImport_ExecCodeModuleObject().# Not important enough to report.# Directly load the os module (needed during bootstrap).# Directly load the _thread module (needed during bootstrap).# Directly load the _weakref module (needed during bootstrap).# Directly load the winreg module (needed during bootstrap).# Constantsb'Core implementation of path-based import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +'u'Core implementation of path-based import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +'b'PYTHONCASEOK'u'PYTHONCASEOK'b'True if filenames must be checked case-insensitively.'u'True if filenames must be checked case-insensitively.'b'Convert a 32-bit integer to little-endian.'u'Convert a 32-bit integer to little-endian.'b'little'b'Convert 4 bytes in little-endian to an integer.'u'Convert 4 bytes in little-endian to an integer.'b'Convert 2 bytes in little-endian to an integer.'u'Convert 2 bytes in little-endian to an integer.'b'Replacement for os.path.join().'u'Replacement for os.path.join().'b'Replacement for os.path.split().'u'Replacement for os.path.split().'b'Stat the path. + + Made a separate function to make it easier to override in experiments + (e.g. cache stat results). + + 'u'Stat the path. + + Made a separate function to make it easier to override in experiments + (e.g. cache stat results). + + 'b'Test whether the path is the specified mode type.'u'Test whether the path is the specified mode type.'b'Replacement for os.path.isfile.'u'Replacement for os.path.isfile.'b'Replacement for os.path.isdir.'u'Replacement for os.path.isdir.'b'Replacement for os.path.isabs.'u'Replacement for os.path.isabs.'b'\\'u'\\'b'Best-effort function to write data to a path atomically. + Be prepared to handle a FileExistsError if concurrent writing of the + temporary file is attempted.'u'Best-effort function to write data to a path atomically. + Be prepared to handle a FileExistsError if concurrent writing of the + temporary file is attempted.'b'__pycache__'u'__pycache__'b'opt-'u'opt-'b'Given the path to a .py file, return the path to its .pyc file. + + The .py file does not need to exist; this simply returns the path to the + .pyc file calculated as if the .py file were imported. + + The 'optimization' parameter controls the presumed optimization level of + the bytecode file. If 'optimization' is not None, the string representation + of the argument is taken and verified to be alphanumeric (else ValueError + is raised). + + The debug_override parameter is deprecated. If debug_override is not None, + a True value is the same as setting 'optimization' to the empty string + while a False value is equivalent to setting 'optimization' to '1'. + + If sys.implementation.cache_tag is None then NotImplementedError is raised. + + 'u'Given the path to a .py file, return the path to its .pyc file. + + The .py file does not need to exist; this simply returns the path to the + .pyc file calculated as if the .py file were imported. + + The 'optimization' parameter controls the presumed optimization level of + the bytecode file. If 'optimization' is not None, the string representation + of the argument is taken and verified to be alphanumeric (else ValueError + is raised). + + The debug_override parameter is deprecated. If debug_override is not None, + a True value is the same as setting 'optimization' to the empty string + while a False value is equivalent to setting 'optimization' to '1'. + + If sys.implementation.cache_tag is None then NotImplementedError is raised. + + 'b'the debug_override parameter is deprecated; use 'optimization' instead'u'the debug_override parameter is deprecated; use 'optimization' instead'b'debug_override or optimization must be set to None'u'debug_override or optimization must be set to None'b'sys.implementation.cache_tag is None'u'sys.implementation.cache_tag is None'b'{!r} is not alphanumeric'u'{!r} is not alphanumeric'b'{}.{}{}'u'{}.{}{}'b'Given the path to a .pyc. file, return the path to its .py file. + + The .pyc file does not need to exist; this simply returns the path to + the .py file calculated to correspond to the .pyc file. If path does + not conform to PEP 3147/488 format, ValueError will be raised. If + sys.implementation.cache_tag is None then NotImplementedError is raised. + + 'u'Given the path to a .pyc. file, return the path to its .py file. + + The .pyc file does not need to exist; this simply returns the path to + the .py file calculated to correspond to the .pyc file. If path does + not conform to PEP 3147/488 format, ValueError will be raised. If + sys.implementation.cache_tag is None then NotImplementedError is raised. + + 'b' not bottom-level directory in 'u' not bottom-level directory in 'b'expected only 2 or 3 dots in 'u'expected only 2 or 3 dots in 'b'optimization portion of filename does not start with 'u'optimization portion of filename does not start with 'b'optimization level 'u'optimization level 'b' is not an alphanumeric value'u' is not an alphanumeric value'b'Convert a bytecode file path to a source path (if possible). + + This function exists purely for backwards-compatibility for + PyImport_ExecCodeModuleWithFilenames() in the C API. + + 'u'Convert a bytecode file path to a source path (if possible). + + This function exists purely for backwards-compatibility for + PyImport_ExecCodeModuleWithFilenames() in the C API. + + 'b'py'u'py'b'Calculate the mode permissions for a bytecode file.'u'Calculate the mode permissions for a bytecode file.'b'Decorator to verify that the module being requested matches the one the + loader can handle. + + The first argument (self) must define _name which the second argument is + compared against. If the comparison fails then ImportError is raised. + + 'u'Decorator to verify that the module being requested matches the one the + loader can handle. + + The first argument (self) must define _name which the second argument is + compared against. If the comparison fails then ImportError is raised. + + 'b'loader for %s cannot handle %s'u'loader for %s cannot handle %s'b'Try to find a loader for the specified module by delegating to + self.find_loader(). + + This method is deprecated in favor of finder.find_spec(). + + 'u'Try to find a loader for the specified module by delegating to + self.find_loader(). + + This method is deprecated in favor of finder.find_spec(). + + 'b'Not importing directory {}: missing __init__'u'Not importing directory {}: missing __init__'b'Perform basic validity checking of a pyc header and return the flags field, + which determines how the pyc should be further validated against the source. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required, though.) + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + ImportError is raised when the magic number is incorrect or when the flags + field is invalid. EOFError is raised when the data is found to be truncated. + + 'u'Perform basic validity checking of a pyc header and return the flags field, + which determines how the pyc should be further validated against the source. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required, though.) + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + ImportError is raised when the magic number is incorrect or when the flags + field is invalid. EOFError is raised when the data is found to be truncated. + + 'b'bad magic number in 'u'bad magic number in 'b'reached EOF while reading pyc header of 'u'reached EOF while reading pyc header of 'b'invalid flags 'u'invalid flags 'b' in 'u' in 'b'Validate a pyc against the source last-modified time. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_mtime* is the last modified timestamp of the source file. + + *source_size* is None or the size of the source file in bytes. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + 'u'Validate a pyc against the source last-modified time. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_mtime* is the last modified timestamp of the source file. + + *source_size* is None or the size of the source file in bytes. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + 'b'bytecode is stale for 'u'bytecode is stale for 'b'Validate a hash-based pyc by checking the real source hash against the one in + the pyc header. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_hash* is the importlib.util.source_hash() of the source file. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + 'u'Validate a hash-based pyc by checking the real source hash against the one in + the pyc header. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_hash* is the importlib.util.source_hash() of the source file. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + 'b'hash in bytecode doesn't match hash of source 'u'hash in bytecode doesn't match hash of source 'b'Compile bytecode as found in a pyc.'u'Compile bytecode as found in a pyc.'b'code object from {!r}'u'code object from {!r}'b'Non-code object in {!r}'u'Non-code object in {!r}'b'Produce the data for a timestamp-based pyc.'u'Produce the data for a timestamp-based pyc.'b'Produce the data for a hash-based pyc.'u'Produce the data for a hash-based pyc.'b'Decode bytes representing source code and return the string. + + Universal newline support is used in the decoding. + 'u'Decode bytes representing source code and return the string. + + Universal newline support is used in the decoding. + 'b'Return a module spec based on a file location. + + To indicate that the module is a package, set + submodule_search_locations to a list of directory paths. An + empty list is sufficient, though its not otherwise useful to the + import system. + + The loader must take a spec as its only __init__() arg. + + 'u'Return a module spec based on a file location. + + To indicate that the module is a package, set + submodule_search_locations to a list of directory paths. An + empty list is sufficient, though its not otherwise useful to the + import system. + + The loader must take a spec as its only __init__() arg. + + 'b''u''b'Meta path finder for modules declared in the Windows registry.'u'Meta path finder for modules declared in the Windows registry.'b'Software\Python\PythonCore\{sys_version}\Modules\{fullname}'u'Software\Python\PythonCore\{sys_version}\Modules\{fullname}'b'Software\Python\PythonCore\{sys_version}\Modules\{fullname}\Debug'u'Software\Python\PythonCore\{sys_version}\Modules\{fullname}\Debug'b'%d.%d'u'%d.%d'b'Find module named in the registry. + + This method is deprecated. Use exec_module() instead. + + 'u'Find module named in the registry. + + This method is deprecated. Use exec_module() instead. + + 'b'Base class of common code needed by both SourceLoader and + SourcelessFileLoader.'u'Base class of common code needed by both SourceLoader and + SourcelessFileLoader.'b'Concrete implementation of InspectLoader.is_package by checking if + the path returned by get_filename has a filename of '__init__.py'.'u'Concrete implementation of InspectLoader.is_package by checking if + the path returned by get_filename has a filename of '__init__.py'.'b'__init__'b'Execute the module.'u'Execute the module.'b'cannot load module {!r} when get_code() returns None'u'cannot load module {!r} when get_code() returns None'b'This module is deprecated.'u'This module is deprecated.'b'Optional method that returns the modification time (an int) for the + specified path (a str). + + Raises OSError when the path cannot be handled. + 'u'Optional method that returns the modification time (an int) for the + specified path (a str). + + Raises OSError when the path cannot be handled. + 'b'Optional method returning a metadata dict for the specified + path (a str). + + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + + Implementing this method allows the loader to read bytecode files. + Raises OSError when the path cannot be handled. + 'u'Optional method returning a metadata dict for the specified + path (a str). + + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + + Implementing this method allows the loader to read bytecode files. + Raises OSError when the path cannot be handled. + 'b'mtime'u'mtime'b'Optional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + + The source path is needed in order to correctly transfer permissions + 'u'Optional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + + The source path is needed in order to correctly transfer permissions + 'b'Optional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + 'u'Optional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + 'b'Concrete implementation of InspectLoader.get_source.'u'Concrete implementation of InspectLoader.get_source.'b'source not available through get_data()'u'source not available through get_data()'b'Return the code object compiled from source. + + The 'data' argument can be any object type that compile() supports. + 'u'Return the code object compiled from source. + + The 'data' argument can be any object type that compile() supports. + 'b'Concrete implementation of InspectLoader.get_code. + + Reading of bytecode requires path_stats to be implemented. To write + bytecode, set_data must also be implemented. + + 'u'Concrete implementation of InspectLoader.get_code. + + Reading of bytecode requires path_stats to be implemented. To write + bytecode, set_data must also be implemented. + + 'b'path'u'path'b'never'u'never'b'{} matches {}'u'{} matches {}'b'code object from {}'u'code object from {}'b'Base file loader class which implements the loader protocol methods that + require file system usage.'b'Cache the module name and the path to the file found by the + finder.'u'Cache the module name and the path to the file found by the + finder.'b'Load a module from a file. + + This method is deprecated. Use exec_module() instead. + + 'u'Load a module from a file. + + This method is deprecated. Use exec_module() instead. + + 'b'Return the path to the source file as found by the finder.'u'Return the path to the source file as found by the finder.'b'Return the data from path as raw bytes.'u'Return the data from path as raw bytes.'b'Concrete implementation of SourceLoader using the file system.'u'Concrete implementation of SourceLoader using the file system.'b'Return the metadata for the path.'u'Return the metadata for the path.'b'Write bytes data to a file.'u'Write bytes data to a file.'b'could not create {!r}: {!r}'u'could not create {!r}: {!r}'b'created {!r}'u'created {!r}'b'Loader which handles sourceless file imports.'u'Loader which handles sourceless file imports.'b'Return None as there is no source code.'u'Return None as there is no source code.'b'Loader for extension modules. + + The constructor is designed to work with FileFinder. + + 'b'Create an unitialized extension module'u'Create an unitialized extension module'b'extension module {!r} loaded from {!r}'u'extension module {!r} loaded from {!r}'b'Initialize an extension module'u'Initialize an extension module'b'extension module {!r} executed from {!r}'u'extension module {!r} executed from {!r}'b'Return True if the extension module is a package.'u'Return True if the extension module is a package.'b'Return None as an extension module cannot create a code object.'u'Return None as an extension module cannot create a code object.'b'Return None as extension modules have no source code.'u'Return None as extension modules have no source code.'b'Represents a namespace package's path. It uses the module name + to find its parent module, and from there it looks up the parent's + __path__. When this changes, the module's own path is recomputed, + using path_finder. For top-level modules, the parent module's path + is sys.path.'u'Represents a namespace package's path. It uses the module name + to find its parent module, and from there it looks up the parent's + __path__. When this changes, the module's own path is recomputed, + using path_finder. For top-level modules, the parent module's path + is sys.path.'b'Returns a tuple of (parent-module-name, parent-path-attr-name)'u'Returns a tuple of (parent-module-name, parent-path-attr-name)'b'sys'b'_NamespacePath({!r})'u'_NamespacePath({!r})'b''u''b''u''b'Load a namespace module. + + This method is deprecated. Use exec_module() instead. + + 'u'Load a namespace module. + + This method is deprecated. Use exec_module() instead. + + 'b'namespace module loaded with path {!r}'u'namespace module loaded with path {!r}'b'Meta path finder for sys.path and package __path__ attributes.'b'Call the invalidate_caches() method on all path entry finders + stored in sys.path_importer_caches (where implemented).'u'Call the invalidate_caches() method on all path entry finders + stored in sys.path_importer_caches (where implemented).'b'Search sys.path_hooks for a finder for 'path'.'u'Search sys.path_hooks for a finder for 'path'.'b'sys.path_hooks is empty'u'sys.path_hooks is empty'b'Get the finder for the path entry from sys.path_importer_cache. + + If the path entry is not in the cache, find the appropriate finder + and cache it. If no finder is available, store None. + + 'u'Get the finder for the path entry from sys.path_importer_cache. + + If the path entry is not in the cache, find the appropriate finder + and cache it. If no finder is available, store None. + + 'b'find_loader'u'find_loader'b'Find the loader or namespace_path for this module/package name.'u'Find the loader or namespace_path for this module/package name.'b'find_spec'u'find_spec'b'spec missing loader'u'spec missing loader'b'Try to find a spec for 'fullname' on sys.path or 'path'. + + The search is based on sys.path_hooks and sys.path_importer_cache. + 'u'Try to find a spec for 'fullname' on sys.path or 'path'. + + The search is based on sys.path_hooks and sys.path_importer_cache. + 'b'find the module on sys.path or 'path' based on sys.path_hooks and + sys.path_importer_cache. + + This method is deprecated. Use find_spec() instead. + + 'u'find the module on sys.path or 'path' based on sys.path_hooks and + sys.path_importer_cache. + + This method is deprecated. Use find_spec() instead. + + 'b' + Find distributions. + + Return an iterable of all Distribution instances capable of + loading the metadata for packages matching ``context.name`` + (or all names if ``None`` indicated) along the paths in the list + of directories ``context.path``. + 'u' + Find distributions. + + Return an iterable of all Distribution instances capable of + loading the metadata for packages matching ``context.name`` + (or all names if ``None`` indicated) along the paths in the list + of directories ``context.path``. + 'b'File-based finder. + + Interactions with the file system are cached for performance, being + refreshed when the directory the finder is handling has been modified. + + 'u'File-based finder. + + Interactions with the file system are cached for performance, being + refreshed when the directory the finder is handling has been modified. + + 'b'Initialize with the path to search on and a variable number of + 2-tuples containing the loader and the file suffixes the loader + recognizes.'u'Initialize with the path to search on and a variable number of + 2-tuples containing the loader and the file suffixes the loader + recognizes.'b'Invalidate the directory mtime.'u'Invalidate the directory mtime.'b'Try to find a loader for the specified module, or the namespace + package portions. Returns (loader, list-of-portions). + + This method is deprecated. Use find_spec() instead. + + 'u'Try to find a loader for the specified module, or the namespace + package portions. Returns (loader, list-of-portions). + + This method is deprecated. Use find_spec() instead. + + 'b'Try to find a spec for the specified module. + + Returns the matching spec, or None if not found. + 'u'Try to find a spec for the specified module. + + Returns the matching spec, or None if not found. + 'b'trying {}'u'trying {}'b'possible namespace for {}'u'possible namespace for {}'b'Fill the cache of potential modules and packages for this directory.'u'Fill the cache of potential modules and packages for this directory.'b'A class method which returns a closure to use on sys.path_hook + which will return an instance using the specified loaders and the path + called on the closure. + + If the path called on the closure is not a directory, ImportError is + raised. + + 'u'A class method which returns a closure to use on sys.path_hook + which will return an instance using the specified loaders and the path + called on the closure. + + If the path called on the closure is not a directory, ImportError is + raised. + + 'b'Path hook for importlib.machinery.FileFinder.'u'Path hook for importlib.machinery.FileFinder.'b'only directories are supported'u'only directories are supported'b'FileFinder({!r})'u'FileFinder({!r})'b'Returns a list of file-based module loaders. + + Each item is a tuple (loader, suffixes). + 'u'Returns a list of file-based module loaders. + + Each item is a tuple (loader, suffixes). + 'b'Setup the path-based importers for importlib by importing needed + built-in modules and injecting them into the global namespace. + + Other components are extracted from the core bootstrap module. + + 'u'Setup the path-based importers for importlib by importing needed + built-in modules and injecting them into the global namespace. + + Other components are extracted from the core bootstrap module. + + 'b'_io'b'builtins'b'marshal'b'importlib requires posix or nt'u'importlib requires posix or nt'b'_os'u'_os'b'path_sep'u'path_sep'b'path_separators'u'path_separators'b'_pathseps_with_colon'u'_pathseps_with_colon'b'winreg'u'winreg'b'_winreg'u'_winreg'b'_relax_case'u'_relax_case'b'.pyw'u'.pyw'b'_d.pyd'u'_d.pyd'b'Install the path-based import components.'u'Install the path-based import components.'u'_bootstrap_external'u'Create a compressor object for compressing data incrementally. + + compresslevel + Compression level, as a number between 1 and 9. + +For one-shot compression, use the compress() function instead.'compress_bz2.BZ2CompressorBZ2Compressoru'Create a decompressor object for decompressing data incrementally. + +For one-shot decompression, use the decompress() function instead.'decompresseofneeds_inputunused_data_bz2.BZ2DecompressorBZ2Decompressoru'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_bz2.cpython-38-darwin.so'u'_bz2'_bz2_forget_codecascii_decodeascii_encodecharmap_buildcharmap_decodecharmap_encodeescape_decodeescape_encodelatin_1_decodelatin_1_encodelookuplookup_errorraw_unicode_escape_decoderaw_unicode_escape_encodereadbuffer_encoderegister_errorunicode_escape_decodeunicode_escape_encodeutf_16_be_decodeutf_16_be_encodeutf_16_decodeutf_16_encodeutf_16_ex_decodeutf_16_le_decodeutf_16_le_encodeutf_32_be_decodeutf_32_be_encodeutf_32_decodeutf_32_encodeutf_32_ex_decodeutf_32_le_decodeutf_32_le_encodeutf_7_decodeutf_7_encodeutf_8_decodeutf_8_encode_codecsu'OrderedDict.__dict__'collections.OrderedDictu'High performance data structures. +- deque: ordered collection accessible from endpoints only +- defaultdict: dict subclass with a default value factory +'_collections._deque_iterator_deque_iterator_collections._deque_reverse_iterator_deque_reverse_iterator_collections._tuplegetteru'defaultdict(default_factory[, ...]) --> dict with default factory + +The default factory is called without arguments to produce +a new value when a key is not present, in __getitem__ only. +A defaultdict compares equal to a dict with the same items. +All remaining arguments are treated the same as if they were +passed to the dict constructor, including keyword arguments. +'default_factorycollections.defaultdictu'deque([iterable[, maxlen]]) --> deque object + +A list-like sequence optimized for data accesses near its endpoints.'appendleftextendleftu'maximum size of a deque or None if unbounded'u'deque.maxlen'maxlenrotatecollections.dequeAbstract Base Classes (ABCs) for collections, according to PEP 3119. + +Unit tests are in test_collections. +ABCMetaabstractmethodAwaitableCoroutineAsyncIterableAsyncIteratorAsyncGeneratorHashableIterableGeneratorReversibleSizedContainerCallableCollectionMutableSetMappingViewByteStringbytes_iteratorbytearray_iteratordict_keyiteratordict_valueiteratordict_itemiteratorlist_iteratorlist_reverseiteratorrange_iteratorlongrange_iteratorset_iteratorstr_iteratortuple_iteratorzip_iteratordict_keysdict_valuesdict_itemsmappingproxycoroutine_agasync_generator_check_methodsCmethodsmetaclassSend a value into the coroutine. + Return next yielded value or raise StopIteration. + Raise an exception in the coroutine. + Return next yielded value or raise StopIteration. + Raise GeneratorExit inside coroutine. + coroutine ignored GeneratorExit__aiter____anext__Return the next item or raise StopAsyncIteration when exhausted.Return the next item from the asynchronous generator. + When exhausted, raise StopAsyncIteration. + asendSend a value into the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + athrowRaise an exception in the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + acloseasynchronous generator ignored GeneratorExitReturn the next item from the iterator. When exhausted, raise StopIterationReturn the next item from the generator. + When exhausted, raise StopIteration. + Send a value into the generator. + Return next yielded value or raise StopIteration. + Raise an exception in the generator. + Return next yielded value or raise StopIteration. + Raise GeneratorExit inside generator. + generator ignored GeneratorExitA set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__ and __len__. + + To override the comparisons (presumably for speed, as the + semantics are fixed), redefine __le__ and __ge__, + then the other operations will automatically follow suit. + _from_iterableConstruct an instance of the class from any iterable input. + + Must override this method if the class constructor signature + does not accept an iterable for an input. + Return True if two sets have a null intersection._hashCompute the hash value of a set. + + Note that we don't define __hash__: not all sets are hashable. + But if you define a hashable set type, its __hash__ should + call this function. + + This must be compatible __eq__. + + All sets ought to compare equal if they contain the same + elements, regardless of how they are implemented, and + regardless of the order of the elements; so there's not much + freedom for __eq__ or __hash__. We match the algorithm used + by the built-in frozenset type. + MAXMASK1927868237hx89869747364479816769069907133923590923713A mutable set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__, __len__, + add(), and discard(). + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + Add an element.Remove an element. Do not raise an exception if absent.Remove an element. If not a member, raise a KeyError.Return the popped value. Raise KeyError if empty.This is slow (creates N new iterators!) but effective.A Mapping is a generic container for associating key/value + pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __iter__, and __len__. + + D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.{0.__class__.__name__}({0._mapping!r})A MutableMapping is a generic container for associating + key/value pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __setitem__, __delitem__, + __iter__, and __len__. + + D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + D.popitem() -> (k, v), remove and return some (key, value) pair + as a 2-tuple; but raise KeyError if D is empty. + D.clear() -> None. Remove all items from D. D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. + If E present and has a .keys() method, does: for k in E: D[k] = E[k] + If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v + In either case, this is followed by: for k, v in F.items(): D[k] = v + D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in DAll the operations on a read-only sequence. + + Concrete subclasses must override __new__ or __init__, + __getitem__, and __len__. + S.index(value, [start, [stop]]) -> integer -- return first index of value. + Raises ValueError if the value is not present. + + Supporting start and stop arguments is optional, but + recommended. + S.count(value) -> integer -- return number of occurrences of valueThis unifies bytes and bytearray. + + XXX Should add all their methods. + All the operations on a read-write sequence. + + Concrete subclasses must provide __new__ or __init__, + __getitem__, __setitem__, __delitem__, __len__, and insert(). + + S.insert(index, value) -- insert value before indexS.append(value) -- append value to the end of the sequenceS.clear() -> None -- remove all items from SS.reverse() -- reverse *IN PLACE*S.extend(iterable) -- extend sequence by appending elements from the iterableS.pop([index]) -> item -- remove and return item at index (default last). + Raise IndexError if list is empty or index is out of range. + S.remove(value) -- remove first occurrence of value. + Raise ValueError if the value is not present. + # Copyright 2007 Google, Inc. All Rights Reserved.# This module has been renamed from collections.abc to _collections_abc to# speed up interpreter startup. Some of the types such as MutableMapping are# required early but collections module imports a lot of other modules.# See issue #19218# Private list of types that we want to register with the various ABCs# so that they will pass tests like:# it = iter(somebytearray)# assert isinstance(it, Iterable)# Note: in other implementations, these types might not be distinct# and they may have their own implementation specific types that# are not included on this list.#callable_iterator = ???## views #### misc #### coroutine ### Prevent ResourceWarning## asynchronous generator ##### ONE-TRICK PONIES ####Iterator.register(callable_iterator)### SETS ###### MAPPINGS ###### SEQUENCES #### Multiply inheriting, see ByteStringb'Abstract Base Classes (ABCs) for collections, according to PEP 3119. + +Unit tests are in test_collections. +'u'Abstract Base Classes (ABCs) for collections, according to PEP 3119. + +Unit tests are in test_collections. +'b'Awaitable'u'Awaitable'b'Coroutine'u'Coroutine'b'AsyncIterable'u'AsyncIterable'b'AsyncIterator'u'AsyncIterator'b'AsyncGenerator'u'AsyncGenerator'b'Hashable'u'Hashable'b'Iterable'u'Iterable'b'Iterator'u'Iterator'b'Generator'u'Generator'b'Reversible'u'Reversible'b'Sized'u'Sized'b'Container'u'Container'b'Callable'u'Callable'b'Collection'u'Collection'b'Set'u'Set'b'MutableSet'u'MutableSet'b'Mapping'u'Mapping'b'MutableMapping'u'MutableMapping'b'MappingView'u'MappingView'b'KeysView'u'KeysView'b'ItemsView'u'ItemsView'b'ValuesView'u'ValuesView'b'Sequence'u'Sequence'b'MutableSequence'u'MutableSequence'b'ByteString'u'ByteString'b'collections.abc'u'collections.abc'b'__hash__'u'__hash__'b'__await__'u'__await__'b'Send a value into the coroutine. + Return next yielded value or raise StopIteration. + 'u'Send a value into the coroutine. + Return next yielded value or raise StopIteration. + 'b'Raise an exception in the coroutine. + Return next yielded value or raise StopIteration. + 'u'Raise an exception in the coroutine. + Return next yielded value or raise StopIteration. + 'b'Raise GeneratorExit inside coroutine. + 'u'Raise GeneratorExit inside coroutine. + 'b'coroutine ignored GeneratorExit'u'coroutine ignored GeneratorExit'b'throw'u'throw'b'__aiter__'u'__aiter__'b'Return the next item or raise StopAsyncIteration when exhausted.'u'Return the next item or raise StopAsyncIteration when exhausted.'b'__anext__'u'__anext__'b'Return the next item from the asynchronous generator. + When exhausted, raise StopAsyncIteration. + 'u'Return the next item from the asynchronous generator. + When exhausted, raise StopAsyncIteration. + 'b'Send a value into the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + 'u'Send a value into the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + 'b'Raise an exception in the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + 'u'Raise an exception in the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + 'b'asynchronous generator ignored GeneratorExit'u'asynchronous generator ignored GeneratorExit'b'asend'u'asend'b'athrow'u'athrow'b'aclose'u'aclose'b'__iter__'u'__iter__'b'Return the next item from the iterator. When exhausted, raise StopIteration'u'Return the next item from the iterator. When exhausted, raise StopIteration'b'__next__'u'__next__'b'__reversed__'u'__reversed__'b'Return the next item from the generator. + When exhausted, raise StopIteration. + 'u'Return the next item from the generator. + When exhausted, raise StopIteration. + 'b'Send a value into the generator. + Return next yielded value or raise StopIteration. + 'u'Send a value into the generator. + Return next yielded value or raise StopIteration. + 'b'Raise an exception in the generator. + Return next yielded value or raise StopIteration. + 'u'Raise an exception in the generator. + Return next yielded value or raise StopIteration. + 'b'Raise GeneratorExit inside generator. + 'u'Raise GeneratorExit inside generator. + 'b'generator ignored GeneratorExit'u'generator ignored GeneratorExit'b'__len__'u'__len__'b'__contains__'u'__contains__'b'__call__'u'__call__'b'A set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__ and __len__. + + To override the comparisons (presumably for speed, as the + semantics are fixed), redefine __le__ and __ge__, + then the other operations will automatically follow suit. + 'u'A set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__ and __len__. + + To override the comparisons (presumably for speed, as the + semantics are fixed), redefine __le__ and __ge__, + then the other operations will automatically follow suit. + 'b'Construct an instance of the class from any iterable input. + + Must override this method if the class constructor signature + does not accept an iterable for an input. + 'u'Construct an instance of the class from any iterable input. + + Must override this method if the class constructor signature + does not accept an iterable for an input. + 'b'Return True if two sets have a null intersection.'u'Return True if two sets have a null intersection.'b'Compute the hash value of a set. + + Note that we don't define __hash__: not all sets are hashable. + But if you define a hashable set type, its __hash__ should + call this function. + + This must be compatible __eq__. + + All sets ought to compare equal if they contain the same + elements, regardless of how they are implemented, and + regardless of the order of the elements; so there's not much + freedom for __eq__ or __hash__. We match the algorithm used + by the built-in frozenset type. + 'u'Compute the hash value of a set. + + Note that we don't define __hash__: not all sets are hashable. + But if you define a hashable set type, its __hash__ should + call this function. + + This must be compatible __eq__. + + All sets ought to compare equal if they contain the same + elements, regardless of how they are implemented, and + regardless of the order of the elements; so there's not much + freedom for __eq__ or __hash__. We match the algorithm used + by the built-in frozenset type. + 'b'A mutable set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__, __len__, + add(), and discard(). + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + 'u'A mutable set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__, __len__, + add(), and discard(). + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + 'b'Add an element.'u'Add an element.'b'Remove an element. Do not raise an exception if absent.'u'Remove an element. Do not raise an exception if absent.'b'Remove an element. If not a member, raise a KeyError.'u'Remove an element. If not a member, raise a KeyError.'b'Return the popped value. Raise KeyError if empty.'u'Return the popped value. Raise KeyError if empty.'b'This is slow (creates N new iterators!) but effective.'u'This is slow (creates N new iterators!) but effective.'b'A Mapping is a generic container for associating key/value + pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __iter__, and __len__. + + 'u'A Mapping is a generic container for associating key/value + pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __iter__, and __len__. + + 'b'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'u'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'b'_mapping'u'_mapping'b'{0.__class__.__name__}({0._mapping!r})'u'{0.__class__.__name__}({0._mapping!r})'b'A MutableMapping is a generic container for associating + key/value pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __setitem__, __delitem__, + __iter__, and __len__. + + 'u'A MutableMapping is a generic container for associating + key/value pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __setitem__, __delitem__, + __iter__, and __len__. + + 'b'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + 'u'D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + 'b'D.popitem() -> (k, v), remove and return some (key, value) pair + as a 2-tuple; but raise KeyError if D is empty. + 'u'D.popitem() -> (k, v), remove and return some (key, value) pair + as a 2-tuple; but raise KeyError if D is empty. + 'b'D.clear() -> None. Remove all items from D.'u'D.clear() -> None. Remove all items from D.'b' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. + If E present and has a .keys() method, does: for k in E: D[k] = E[k] + If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v + In either case, this is followed by: for k, v in F.items(): D[k] = v + 'u' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. + If E present and has a .keys() method, does: for k in E: D[k] = E[k] + If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v + In either case, this is followed by: for k, v in F.items(): D[k] = v + 'b'keys'b'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'u'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D'b'All the operations on a read-only sequence. + + Concrete subclasses must override __new__ or __init__, + __getitem__, and __len__. + 'u'All the operations on a read-only sequence. + + Concrete subclasses must override __new__ or __init__, + __getitem__, and __len__. + 'b'S.index(value, [start, [stop]]) -> integer -- return first index of value. + Raises ValueError if the value is not present. + + Supporting start and stop arguments is optional, but + recommended. + 'u'S.index(value, [start, [stop]]) -> integer -- return first index of value. + Raises ValueError if the value is not present. + + Supporting start and stop arguments is optional, but + recommended. + 'b'S.count(value) -> integer -- return number of occurrences of value'u'S.count(value) -> integer -- return number of occurrences of value'b'This unifies bytes and bytearray. + + XXX Should add all their methods. + 'u'This unifies bytes and bytearray. + + XXX Should add all their methods. + 'b'All the operations on a read-write sequence. + + Concrete subclasses must provide __new__ or __init__, + __getitem__, __setitem__, __delitem__, __len__, and insert(). + + 'u'All the operations on a read-write sequence. + + Concrete subclasses must provide __new__ or __init__, + __getitem__, __setitem__, __delitem__, __len__, and insert(). + + 'b'S.insert(index, value) -- insert value before index'u'S.insert(index, value) -- insert value before index'b'S.append(value) -- append value to the end of the sequence'u'S.append(value) -- append value to the end of the sequence'b'S.clear() -> None -- remove all items from S'u'S.clear() -> None -- remove all items from S'b'S.reverse() -- reverse *IN PLACE*'u'S.reverse() -- reverse *IN PLACE*'b'S.extend(iterable) -- extend sequence by appending elements from the iterable'u'S.extend(iterable) -- extend sequence by appending elements from the iterable'b'S.pop([index]) -> item -- remove and return item at index (default last). + Raise IndexError if list is empty or index is out of range. + 'u'S.pop([index]) -> item -- remove and return item at index (default last). + Raise IndexError if list is empty or index is out of range. + 'b'S.remove(value) -- remove first occurrence of value. + Raise ValueError if the value is not present. + 'u'S.remove(value) -- remove first occurrence of value. + Raise ValueError if the value is not present. + 'u'_collections_abc'__builtin__copyregcopy_regqueueQueuesocketserverSocketServerconfigparserConfigParsertkinter.filedialogtkFileDialogtkinter.simpledialogtkSimpleDialogtkinter.colorchoosertkColorChoosertkinter.commondialogtkCommonDialogtkinter.dialogDialogtkinter.dndTkdndtkinter.fonttkFonttkinter.messageboxtkMessageBoxtkinter.scrolledtextScrolledTextTkconstantstkinter.tixTixtkinter.ttkttkTkinter_markupbasemarkupbase_dummy_threaddummy_threaddbm.bsddbhashdbm.dumbdumbdbmdbm.ndbmdbmdbm.gnugdbmxmlrpc.clientxmlrpclibxmlrpc.serverSimpleXMLRPCServerhttp.clienthttplibhtmlentitydefshtml.parserHTMLParserhttp.cookiesCookiehttp.cookiejarcookielibhttp.serverBaseHTTPServertest.test_supportcommandsurllib.robotparserrobotparserurllib2anydbm_abcollIMPORT_MAPPINGxrangereduceunichrlongizipimapifilterfilterfalseifilterfalsezip_longestizip_longestIterableUserDictwhichdbfromfd_socketmultiprocessing.connectionConnectionmultiprocessing.contextProcessmultiprocessing.popen_forkmultiprocessing.forkingContentTooShortErrorgetproxiespathname2urlquote_plusunquote_plusunquoteurl2pathnameurlcleanupurlencodeurlopenurlretrieveNAME_MAPPINGPYTHON2_EXCEPTIONSWindowsErrorexcnameAuthenticationErrorBufferTooShortProcessErrorMULTIPROCESSING_EXCEPTIONSREVERSE_IMPORT_MAPPINGREVERSE_NAME_MAPPINGcPicklexml.etree.ElementTreeFileDialogSimpleDialogDocXMLRPCServerSimpleHTTPServerCGIHTTPServercStringIO_dbm_functools_gdbm_picklebasestringStandardErrorSocketType_socketobjectLoadFileDialogSaveFileDialogServerHTMLDocXMLRPCDocGeneratorDocXMLRPCRequestHandlerDocCGIXMLRPCRequestHandlerSimpleHTTPRequestHandlerCGIHTTPRequestHandlerPYTHON3_OSERROR_EXCEPTIONSPYTHON3_IMPORTERROR_EXCEPTIONS# This module is used to map the old Python 2 names to the new names used in# Python 3 for the pickle module. This needed to make pickle streams# generated with Python 2 loadable by Python 3.# This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import# lib2to3 and use the mapping defined there, because lib2to3 uses pickle.# Thus, this could cause the module to be imported recursively.# This contains rename rules that are easy to handle. We ignore the more# complex stuff (e.g. mapping the names in the urllib and types modules).# These rules should be run before import names are fixed.# StandardError is gone in Python 3, so we map it to Exception# Same, but for 3.x to 2.x# Non-mutual mappings.# For compatibility with broken pickles saved in old Python 3 versionsb'__builtin__'u'__builtin__'b'copyreg'u'copyreg'b'copy_reg'u'copy_reg'b'queue'u'queue'b'Queue'u'Queue'b'socketserver'u'socketserver'b'SocketServer'u'SocketServer'b'configparser'u'configparser'b'ConfigParser'u'ConfigParser'b'reprlib'u'reprlib'b'repr'u'repr'b'tkinter.filedialog'u'tkinter.filedialog'b'tkFileDialog'u'tkFileDialog'b'tkinter.simpledialog'u'tkinter.simpledialog'b'tkSimpleDialog'u'tkSimpleDialog'b'tkinter.colorchooser'u'tkinter.colorchooser'b'tkColorChooser'u'tkColorChooser'b'tkinter.commondialog'u'tkinter.commondialog'b'tkCommonDialog'u'tkCommonDialog'b'tkinter.dialog'u'tkinter.dialog'b'Dialog'u'Dialog'b'tkinter.dnd'u'tkinter.dnd'b'Tkdnd'u'Tkdnd'b'tkinter.font'u'tkinter.font'b'tkFont'u'tkFont'b'tkinter.messagebox'u'tkinter.messagebox'b'tkMessageBox'u'tkMessageBox'b'tkinter.scrolledtext'u'tkinter.scrolledtext'b'ScrolledText'u'ScrolledText'b'tkinter.constants'u'tkinter.constants'b'Tkconstants'u'Tkconstants'b'tkinter.tix'u'tkinter.tix'b'Tix'u'Tix'b'tkinter.ttk'u'tkinter.ttk'b'ttk'u'ttk'b'tkinter'b'Tkinter'u'Tkinter'b'_markupbase'u'_markupbase'b'markupbase'u'markupbase'b'thread'u'thread'b'_dummy_thread'u'_dummy_thread'b'dummy_thread'u'dummy_thread'b'dbm.bsd'u'dbm.bsd'b'dbhash'u'dbhash'b'dbm.dumb'u'dbm.dumb'b'dumbdbm'u'dumbdbm'b'dbm.ndbm'u'dbm.ndbm'b'dbm'u'dbm'b'dbm.gnu'u'dbm.gnu'b'gdbm'u'gdbm'b'xmlrpc.client'u'xmlrpc.client'b'xmlrpclib'u'xmlrpclib'b'xmlrpc.server'u'xmlrpc.server'b'SimpleXMLRPCServer'u'SimpleXMLRPCServer'b'http.client'u'http.client'b'httplib'u'httplib'b'html.entities'u'html.entities'b'htmlentitydefs'u'htmlentitydefs'b'html.parser'u'html.parser'b'HTMLParser'u'HTMLParser'b'http.cookies'u'http.cookies'b'Cookie'u'Cookie'b'http.cookiejar'u'http.cookiejar'b'cookielib'u'cookielib'b'http.server'u'http.server'b'BaseHTTPServer'u'BaseHTTPServer'b'test.test_support'u'test.test_support'b'subprocess'u'subprocess'b'commands'u'commands'b'urllib.parse'u'urllib.parse'b'urlparse'u'urlparse'b'urllib.robotparser'u'urllib.robotparser'b'robotparser'u'robotparser'b'urllib.request'u'urllib.request'b'urllib2'u'urllib2'b'anydbm'u'anydbm'b'_abcoll'u'_abcoll'b'xrange'u'xrange'b'functools'u'functools'b'reduce'u'reduce'b'intern'u'intern'b'chr'u'chr'b'unichr'u'unichr'b'str'u'str'b'int'u'int'b'long'u'long'b'zip'u'zip'b'itertools'b'izip'u'izip'b'map'u'map'b'imap'u'imap'b'ifilter'u'ifilter'b'filterfalse'u'filterfalse'b'ifilterfalse'u'ifilterfalse'b'zip_longest'u'zip_longest'b'izip_longest'u'izip_longest'b'collections'b'IterableUserDict'u'IterableUserDict'b'whichdb'u'whichdb'b'socket'u'socket'b'fromfd'u'fromfd'b'_socket'u'_socket'b'multiprocessing.connection'u'multiprocessing.connection'b'Connection'u'Connection'b'multiprocessing.context'u'multiprocessing.context'b'Process'u'Process'b'multiprocessing.process'u'multiprocessing.process'b'multiprocessing.popen_fork'u'multiprocessing.popen_fork'b'Popen'u'Popen'b'multiprocessing.forking'u'multiprocessing.forking'b'urllib.error'u'urllib.error'b'ContentTooShortError'u'ContentTooShortError'b'urllib'b'getproxies'u'getproxies'b'pathname2url'u'pathname2url'b'quote_plus'u'quote_plus'b'quote'u'quote'b'unquote_plus'u'unquote_plus'b'unquote'u'unquote'b'url2pathname'u'url2pathname'b'urlcleanup'u'urlcleanup'b'urlencode'u'urlencode'b'urlopen'u'urlopen'b'urlretrieve'u'urlretrieve'b'HTTPError'u'HTTPError'b'URLError'u'URLError'b'ArithmeticError'u'ArithmeticError'b'AssertionError'u'AssertionError'b'AttributeError'u'AttributeError'b'BaseException'u'BaseException'b'BufferError'u'BufferError'b'BytesWarning'u'BytesWarning'b'DeprecationWarning'u'DeprecationWarning'b'EnvironmentError'u'EnvironmentError'b'Exception'u'Exception'b'FloatingPointError'u'FloatingPointError'b'FutureWarning'u'FutureWarning'b'GeneratorExit'u'GeneratorExit'b'IOError'u'IOError'b'ImportError'u'ImportError'b'ImportWarning'u'ImportWarning'b'IndentationError'u'IndentationError'b'IndexError'u'IndexError'b'KeyError'u'KeyError'b'KeyboardInterrupt'u'KeyboardInterrupt'b'LookupError'u'LookupError'b'MemoryError'u'MemoryError'b'NameError'u'NameError'b'NotImplementedError'u'NotImplementedError'b'OSError'u'OSError'b'OverflowError'u'OverflowError'b'PendingDeprecationWarning'u'PendingDeprecationWarning'b'ReferenceError'u'ReferenceError'b'RuntimeError'u'RuntimeError'b'RuntimeWarning'u'RuntimeWarning'b'StopIteration'u'StopIteration'b'SyntaxError'u'SyntaxError'b'SyntaxWarning'u'SyntaxWarning'b'SystemError'u'SystemError'b'SystemExit'u'SystemExit'b'TabError'u'TabError'b'TypeError'u'TypeError'b'UnboundLocalError'u'UnboundLocalError'b'UnicodeDecodeError'u'UnicodeDecodeError'b'UnicodeEncodeError'u'UnicodeEncodeError'b'UnicodeError'u'UnicodeError'b'UnicodeTranslateError'u'UnicodeTranslateError'b'UnicodeWarning'u'UnicodeWarning'b'UserWarning'u'UserWarning'b'ValueError'u'ValueError'b'Warning'u'Warning'b'ZeroDivisionError'u'ZeroDivisionError'b'WindowsError'u'WindowsError'b'exceptions'u'exceptions'b'AuthenticationError'u'AuthenticationError'b'BufferTooShort'u'BufferTooShort'b'ProcessError'u'ProcessError'b'pickle'u'pickle'b'cPickle'u'cPickle'b'xml.etree.ElementTree'b'_elementtree'u'_elementtree'b'FileDialog'u'FileDialog'b'SimpleDialog'u'SimpleDialog'b'DocXMLRPCServer'u'DocXMLRPCServer'b'SimpleHTTPServer'u'SimpleHTTPServer'b'CGIHTTPServer'u'CGIHTTPServer'b'io'u'io'b'StringIO'u'StringIO'b'cStringIO'u'cStringIO'b'bz2'u'bz2'b'_bz2'b'_dbm'u'_dbm'b'_functools'b'_gdbm'u'_gdbm'b'_pickle'u'_pickle'b'basestring'u'basestring'b'StandardError'u'StandardError'b'SocketType'u'SocketType'b'_socketobject'u'_socketobject'b'LoadFileDialog'u'LoadFileDialog'b'SaveFileDialog'u'SaveFileDialog'b'ServerHTMLDoc'u'ServerHTMLDoc'b'XMLRPCDocGenerator'u'XMLRPCDocGenerator'b'DocXMLRPCRequestHandler'u'DocXMLRPCRequestHandler'b'DocCGIXMLRPCRequestHandler'u'DocCGIXMLRPCRequestHandler'b'SimpleHTTPRequestHandler'u'SimpleHTTPRequestHandler'b'CGIHTTPRequestHandler'u'CGIHTTPRequestHandler'b'BrokenPipeError'u'BrokenPipeError'b'ChildProcessError'u'ChildProcessError'b'ConnectionAbortedError'u'ConnectionAbortedError'b'ConnectionError'u'ConnectionError'b'ConnectionResetError'u'ConnectionResetError'b'FileExistsError'u'FileExistsError'b'FileNotFoundError'u'FileNotFoundError'b'InterruptedError'u'InterruptedError'b'IsADirectoryError'u'IsADirectoryError'b'NotADirectoryError'u'NotADirectoryError'b'PermissionError'u'PermissionError'b'ProcessLookupError'u'ProcessLookupError'b'ModuleNotFoundError'u'ModuleNotFoundError'u'_compat_pickle'Internal classes used by the gzip, lzma and bz2 modulesDEFAULT_BUFFER_SIZEBUFFER_SIZEBaseStreamMode-checking helper functions._check_not_closedI/O operation on closed file_check_can_readUnsupportedOperationFile not open for reading_check_can_writeFile not open for writing_check_can_seekSeeking is only supported on files open for reading"Seeking is only supported ""on files open for reading"The underlying file object does not support seeking"The underlying file object ""does not support seeking"DecompressReaderAdapts the decompressor API to a RawIOBase reader APIdecomp_factorytrailing_errordecomp_args_fp_eof_pos_size_decomp_factory_decomp_args_decompressor_trailing_errorreadintoviewbyte_viewreadallrawblockCompressed file ended before the end-of-stream marker was reached"Compressed file ended before the ""end-of-stream marker was reached"_rewindSEEK_SETwhenceSEEK_CURSEEK_ENDInvalid value for whence: {}Return the current file position.# Compressed data read chunk size# Current offset in decompressed stream# Set to size of decompressed stream once it is known, for SEEK_END# Save the decompressor factory and arguments.# If the file contains multiple compressed streams, each# stream will need a separate decompressor object. A new decompressor# object is also needed when implementing a backwards seek().# Exception class to catch from decompressor signifying invalid# trailing data to ignore# Default if EOF is encountered# Depending on the input data, our call to the decompressor may not# return any data. In this case, try again after reading another block.# Continue to next stream.# Trailing data isn't a valid compressed stream; ignore it.# Rewind the file to the beginning of the data stream.# Recalculate offset as an absolute file position.# Seeking relative to EOF - we need to know the file's size.# Make it so that offset is the number of bytes to skip forward.# Read and discard data until we reach the desired position.b'Internal classes used by the gzip, lzma and bz2 modules'u'Internal classes used by the gzip, lzma and bz2 modules'b'Mode-checking helper functions.'u'Mode-checking helper functions.'b'I/O operation on closed file'u'I/O operation on closed file'b'File not open for reading'u'File not open for reading'b'File not open for writing'u'File not open for writing'b'Seeking is only supported on files open for reading'u'Seeking is only supported on files open for reading'b'The underlying file object does not support seeking'u'The underlying file object does not support seeking'b'Adapts the decompressor API to a RawIOBase reader API'u'Adapts the decompressor API to a RawIOBase reader API'b'Compressed file ended before the end-of-stream marker was reached'u'Compressed file ended before the end-of-stream marker was reached'b'Invalid value for whence: {}'u'Invalid value for whence: {}'b'Return the current file position.'u'Return the current file position.'u'_compression'Context__class_getitem__ContextVarToken.MISSINGMISSINGu'Token.old_value'u'Token.var'Tokenu'Context Variables'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_contextvars.cpython-38-darwin.so'u'_contextvars'copy_context_contextvarsu'ArgumentError.__weakref__'ctypes.ArgumentErroru'metatype for the Array Objects'from_addressfrom_buffer_copyin_dll_ctypes.PyCArrayTypeu'XXX to be provided'__ctypes_from_outparam___b_base__b_needsfree__objects_ctypes._CData_ctypes.Arrayu'metatype for C function pointers'_ctypes.PyCFuncPtrTypeu'Function Pointer'u'specify the argument types'u'PyCFuncPtr.argtypes'u'a function to check for errors'u'PyCFuncPtr.errcheck'errchecku'specify the result type'u'PyCFuncPtr.restype'_ctypes.PyCFuncPtrPyObj_FromPtrPy_DECREFPy_INCREFu'metatype for the CData Objects'_ctypes.PyCStructTypeu'Structure base class'_ctypes.Structure_ctypes.UnionTypeu'Union base class'_ctypes.Unionu'metatype for the Pointer Objects'_ctypes.PyCPointerTypeu'the object this pointer points to (read-write)'u'_Pointer.contents'_ctypes._Pointeru'metatype for the PyCSimpleType Objects'_ctypes.PyCSimpleTypeu'current value'u'_SimpleCData.value'_ctypes._SimpleCDatau'Create and manipulate C compatible data types in Python.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_ctypes.cpython-38-darwin.so'u'_ctypes'4333954608_dyld_shared_cache_contains_path679858336067985826404333954472_unpickle4333955340buffer_infocall_cdeclfunctioncall_functiondlclosedlsym9999MAXYEARMINYEARu'Fast implementation of the datetime type.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_datetime.cpython-38-darwin.so'u'_datetime'u'date(year, month, day) --> date object'ctimeu'date.day'dayfromisocalendarfromisoformatfromordinalfromtimestampisocalendarisoformatisoweekdayu'date.month'monthu'Difference between two datetime values. + +timedelta(days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0) + +All arguments are optional and default to 0. +Arguments may be integers or floats, and may be positive or negative.'daysmicrosecondsresolutionsecondstotal_secondsdatetime.timedeltatimetupletodaytoordinalweekdayu'date.year'yeardatetime.datedateu'datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) + +The year, month and day arguments are required. tzinfo may be None, or an +instance of a tzinfo subclass. The remaining arguments may be ints. +'astimezonecombinedstu'datetime.fold'foldu'datetime.hour'houru'datetime.microsecond'microsecondu'datetime.minute'minutenowu'datetime.second'secondstrptimetimestamptimetzu'datetime.tzinfo'tzinfotznameutcfromtimestamputcnowutcoffsetutctimetupledatetime.datetimedatetimeu'Capsule objects let you wrap a C "void *" pointer in a Python +object. They're a way of passing data through the Python interpreter +without creating your own custom type. + +Capsules are used for communication between extension modules. +They provide a way for an extension module to export a C interface +to other extension modules, so that extension modules can use the +Python import mechanism to link to one another. +'PyCapsuledatetime_CAPIu'time([hour[, minute[, second[, microsecond[, tzinfo]]]]]) --> a time object + +All arguments are optional. tzinfo may be None, or an instance of +a tzinfo subclass. The remaining arguments may be ints. +'u'time.fold'u'time.hour'u'time.microsecond'u'time.minute'u'time.second'u'time.tzinfo'datetime.timetimedeltau'Fixed offset from UTC implementation of tzinfo.'__getinitargs__fromutcutcu'Abstract base class for time zone info objects.'datetime.tzinfodatetime.timezonetimezone_datetimeu'Context.Emax'Emaxu'Context.Emin'EminEtinyEtopu'The context affects almost all operations and controls rounding, +Over/Underflow, raising of exceptions and much more. A new context +can be constructed as follows: + + >>> c = Context(prec=28, Emin=-425000000, Emax=425000000, + ... rounding=ROUND_HALF_EVEN, capitals=1, clamp=1, + ... traps=[InvalidOperation, DivisionByZero, Overflow], + ... flags=[]) + >>> + + +'u'decimal'_applycanonicalu'Context.capitals'capitalsu'Context.clamp'clampclear_flagsclear_trapscompare_signalcompare_totalcompare_total_magcopy_abscopy_decimalcopy_negatecopy_signcreate_decimalcreate_decimal_from_floatdividedivide_intexpfmais_canonicalis_finiteis_infiniteis_nanis_normalis_qnanis_signedis_snanis_subnormalis_zerolnlog10logblogical_andlogical_invertlogical_orlogical_xormax_magmin_magminusmultiplynext_minusnext_plusnext_towardnumber_classpluspoweru'Context.prec'quantizeremainderremainder_nearu'Context.rounding'roundingsame_quantumscalebshiftto_eng_stringto_integralto_integral_exactto_integral_valueto_sci_stringdecimal.ContextBasicContextu'DecimalException.__weakref__'decimal.DecimalExceptiondecimal.ClampedClampeddecimal.InvalidOperationdecimal.ConversionSyntaxConversionSyntax__deepcopy__u'Construct a new Decimal object. 'value' can be an integer, string, tuple, +or another Decimal object. If no value is given, return Decimal('0'). The +context does not affect the conversion and is only passed to determine if +the InvalidOperation trap is active. + +'adjustedas_tuplefrom_floatu'Decimal.imag'u'Decimal.real'decimal.DecimalDecimalExceptionu'DecimalTuple(sign, digits, exponent)'u'sign'u'digits'u'exponent'digitsexponentsigndecimal.DecimalTupleDecimalTupleDefaultContextdecimal.DivisionByZeroDivisionByZerodecimal.DivisionImpossibleDivisionImpossibledecimal.DivisionUndefinedDivisionUndefinedExtendedContextdecimal.FloatOperationFloatOperationHAVE_CONTEXTVARHAVE_THREADSdecimal.InexactInexactdecimal.InvalidContextInvalidContextInvalidOperation999999999999999999MAX_EMAXMAX_PREC-999999999999999999MIN_EMIN-1999999999999999997MIN_ETINYdecimal.OverflowOverflowu'ROUND_05UP'ROUND_05UPu'ROUND_CEILING'ROUND_CEILINGu'ROUND_DOWN'ROUND_DOWNu'ROUND_FLOOR'ROUND_FLOORu'ROUND_HALF_DOWN'ROUND_HALF_DOWNu'ROUND_HALF_EVEN'ROUND_HALF_EVENu'ROUND_HALF_UP'ROUND_HALF_UPu'ROUND_UP'ROUND_UPdecimal.RoundedRoundeddecimal.SubnormalSubnormaldecimal.UnderflowUnderflowu'C decimal arithmetic module'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_decimal.cpython-38-darwin.so'u'2.4.2'__libmpdec_version__u'1.70'getcontextsetcontext__getstate__u'A dictionary containing the element's attributes'u'Element.attrib'u'A string identifying what kind of data this element represents'u'Element.tag'u'A string of text directly after the end tag, or None'u'Element.tail'u'A string of text directly after the start tag, or None'u'Element.text'xml.etree.ElementTree.Elementu'ParseError.__weakref__'xml.etree.ElementTree.ParseErrorxml.etree.ElementTree.TreeBuilderu'XMLParser.version'xml.etree.ElementTree.XMLParseru'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_elementtree.cpython-38-darwin.so' Routines for manipulating RFC2047 encoded words. + +This is currently a package-private API, but will be considered for promotion +to a public API if there is demand. + +base64binasciiascii_lettersemaildecode_qencode_qdecode_bencode_blen_qlen_bpartial=([a-fA-F0-9]{2})br'_q_byte_subberencoded_QByteMap-!*+/safe={:02X}_q_byte_mapbstringpad_err===missing_paddingb64decodeInvalidBase64PaddingDefectInvalidBase64CharactersDefect==InvalidBase64LengthDefectb64encodegroups_of_3leftover_cte_decodersewDecode encoded word and return (string, charset, lang, defects) tuple. + + An RFC 2047/2243 encoded word has the form: + + =?charset*lang?cte?encoded_string?= + + where '*lang' may be omitted but the other parts may not be. + + This function expects exactly such a string (that is, it does not check the + syntax and may raise errors if the string is not well formed), and returns + the encoded_string decoded first from its Content Transfer Encoding and + then from the resulting bytes into unicode using the specified charset. If + the cte-decoded string does not successfully decode using the specified + character set, a defect is added to the defects list and the unknown octets + are replaced by the unicode 'unknown' character \uFDFF. + + The specified charset and language are returned. The default for language, + which is rarely if ever encountered, is the empty string. + + ctecte_stringlangdefectsUndecodableBytesDefectEncoded word contains bytes not decodable using {} charset"Encoded word ""contains bytes not decodable using {} charset"unknown-8bitCharsetErrorUnknown charset {} in encoded word; decoded as unknown bytes"Unknown charset {} ""in encoded word; decoded as unknown bytes"_cte_encoders_cte_encode_lengthEncode string using the CTE encoding that produces the shorter result. + + Produces an RFC 2047/2243 encoded word of the form: + + =?charset*lang?cte?encoded_string?= + + where '*lang' is omitted unless the 'lang' parameter is given a value. + Optional argument charset (defaults to utf-8) specifies the charset to use + to encode the string to binary before CTE encoding it. Optional argument + 'encoding' is the cte specifier for the encoding that should be used ('q' + or 'b'); if it is None (the default) the encoding which produces the + shortest encoded sequence is used, except that 'q' is preferred if it is up + to five characters longer. Optional argument 'lang' (default '') gives the + RFC 2243 language string to specify in the encoded word. + + qlenblen=?{}{}?{}?{}?=# An ecoded word looks like this:# =?charset[*lang]?cte?encoded_string?=# for more information about charset see the charset module. Here it is one# of the preferred MIME charset names (hopefully; you never know when parsing).# cte (Content Transfer Encoding) is either 'q' or 'b' (ignoring case). In# theory other letters could be used for other encodings, but in practice this# (almost?) never happens. There could be a public API for adding entries# to the CTE tables, but YAGNI for now. 'q' is Quoted Printable, 'b' is# Base64. The meaning of encoded_string should be obvious. 'lang' is optional# as indicated by the brackets (they are not part of the syntax) but is almost# never encountered in practice.# The general interface for a CTE decoder is that it takes the encoded_string# as its argument, and returns a tuple (cte_decoded_string, defects). The# cte_decoded_string is the original binary that was encoded using the# specified cte. 'defects' is a list of MessageDefect instances indicating any# problems encountered during conversion. 'charset' and 'lang' are the# corresponding strings extracted from the EW, case preserved.# The general interface for a CTE encoder is that it takes a binary sequence# as input and returns the cte_encoded_string, which is an ascii-only string.# Each decoder must also supply a length function that takes the binary# sequence as its argument and returns the length of the resulting encoded# string.# The main API functions for the module are decode, which calls the decoder# referenced by the cte specifier, and encode, which adds the appropriate# RFC 2047 "chrome" to the encoded string, and can optionally automatically# select the shortest possible encoding. See their docstrings below for# details.# Quoted Printable# regex based decoder.# dict mapping bytes to their encoded form# In headers spaces are mapped to '_'.# Base64# First try encoding with validate=True, fixing the padding if needed.# This will succeed only if encoded includes no invalid characters.# Since we had correct padding, this is likely an invalid char error.# The non-alphabet characters are ignored as far as padding# goes, but we don't know how many there are. So try without adding# padding to see if it works.# Add as much padding as could possibly be necessary (extra padding# is ignored).# This only happens when the encoded string's length is 1 more# than a multiple of 4, which is invalid.# bpo-27397: Just return the encoded string since there's no# way to decode.# 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.# Recover the original bytes and do CTE decoding.# Turn the CTE decoded bytes into unicode.# Bias toward q. 5 is arbitrary.b' Routines for manipulating RFC2047 encoded words. + +This is currently a package-private API, but will be considered for promotion +to a public API if there is demand. + +'u' Routines for manipulating RFC2047 encoded words. + +This is currently a package-private API, but will be considered for promotion +to a public API if there is demand. + +'b'decode_q'u'decode_q'b'encode_q'u'encode_q'b'decode_b'u'decode_b'b'encode_b'u'encode_b'b'len_q'u'len_q'b'len_b'u'len_b'b'decode'u'decode'b'encode'u'encode'b'=([a-fA-F0-9]{2})'b'-!*+/'b'={:02X}'u'={:02X}'b'==='b'=='b'Decode encoded word and return (string, charset, lang, defects) tuple. + + An RFC 2047/2243 encoded word has the form: + + =?charset*lang?cte?encoded_string?= + + where '*lang' may be omitted but the other parts may not be. + + This function expects exactly such a string (that is, it does not check the + syntax and may raise errors if the string is not well formed), and returns + the encoded_string decoded first from its Content Transfer Encoding and + then from the resulting bytes into unicode using the specified charset. If + the cte-decoded string does not successfully decode using the specified + character set, a defect is added to the defects list and the unknown octets + are replaced by the unicode 'unknown' character \uFDFF. + + The specified charset and language are returned. The default for language, + which is rarely if ever encountered, is the empty string. + + 'u'Decode encoded word and return (string, charset, lang, defects) tuple. + + An RFC 2047/2243 encoded word has the form: + + =?charset*lang?cte?encoded_string?= + + where '*lang' may be omitted but the other parts may not be. + + This function expects exactly such a string (that is, it does not check the + syntax and may raise errors if the string is not well formed), and returns + the encoded_string decoded first from its Content Transfer Encoding and + then from the resulting bytes into unicode using the specified charset. If + the cte-decoded string does not successfully decode using the specified + character set, a defect is added to the defects list and the unknown octets + are replaced by the unicode 'unknown' character \uFDFF. + + The specified charset and language are returned. The default for language, + which is rarely if ever encountered, is the empty string. + + 'b'Encoded word contains bytes not decodable using {} charset'u'Encoded word contains bytes not decodable using {} charset'b'unknown-8bit'u'unknown-8bit'b'Unknown charset {} in encoded word; decoded as unknown bytes'u'Unknown charset {} in encoded word; decoded as unknown bytes'b'Encode string using the CTE encoding that produces the shorter result. + + Produces an RFC 2047/2243 encoded word of the form: + + =?charset*lang?cte?encoded_string?= + + where '*lang' is omitted unless the 'lang' parameter is given a value. + Optional argument charset (defaults to utf-8) specifies the charset to use + to encode the string to binary before CTE encoding it. Optional argument + 'encoding' is the cte specifier for the encoding that should be used ('q' + or 'b'); if it is None (the default) the encoding which produces the + shortest encoded sequence is used, except that 'q' is preferred if it is up + to five characters longer. Optional argument 'lang' (default '') gives the + RFC 2243 language string to specify in the encoded word. + + 'u'Encode string using the CTE encoding that produces the shorter result. + + Produces an RFC 2047/2243 encoded word of the form: + + =?charset*lang?cte?encoded_string?= + + where '*lang' is omitted unless the 'lang' parameter is given a value. + Optional argument charset (defaults to utf-8) specifies the charset to use + to encode the string to binary before CTE encoding it. Optional argument + 'encoding' is the cte specifier for the encoding that should be used ('q' + or 'b'); if it is None (the default) the encoding which produces the + shortest encoded sequence is used, except that 'q' is preferred if it is up + to five characters longer. Optional argument 'lang' (default '') gives the + RFC 2243 language string to specify in the encoded word. + + 'b'=?{}{}?{}?{}?='u'=?{}{}?{}?{}?='u'email._encoded_words'u'_encoded_words'_array_type_other_endianReturn the type with the 'other' byte order. Simple types like + c_int and so on already have __ctype_be__ and __ctype_le__ + attributes which contain the types, for more complicated types + arrays and structures are supported. + _OTHER_ENDIAN_length_This type does not support other endian: %s_swapped_metaattrnamedescStructure with big endian byte order_swappedbytes_bigStructure with little endian byte orderInvalid byteorder# check _OTHER_ENDIAN attribute (present if typ is primitive type)# if typ is array# if typ is structure# Note: The Structure metaclass checks for the *presence* (not the# value!) of a _swapped_bytes_ attribute to determine the bit order in# structures containing bit fields.b'Return the type with the 'other' byte order. Simple types like + c_int and so on already have __ctype_be__ and __ctype_le__ + attributes which contain the types, for more complicated types + arrays and structures are supported. + 'u'Return the type with the 'other' byte order. Simple types like + c_int and so on already have __ctype_be__ and __ctype_le__ + attributes which contain the types, for more complicated types + arrays and structures are supported. + 'b'This type does not support other endian: %s'u'This type does not support other endian: %s'b'_fields_'u'_fields_'b'__ctype_be__'u'__ctype_be__'b'Structure with big endian byte order'u'Structure with big endian byte order'b'big'u'big'b'__ctype_le__'u'__ctype_le__'b'Structure with little endian byte order'u'Structure with little endian byte order'b'Invalid byteorder'u'Invalid byteorder'u'ctypes._endian'u'_endian'u'Tools that operate on functions.'u'_lru_cache_wrapper.__dict__'u'Create a cached callable that wraps another function. + +user_function: the function being cached + +maxsize: 0 for no caching + None for unlimited cache size + n for a bounded cache + +typed: False cache f(3) and f(3.0) as identical calls + True cache f(3) and f(3.0) as distinct calls + +cache_info_type: namedtuple class with the fields: + hits misses currsize maxsize +'cache_clearcache_infofunctools._lru_cache_wrapper_lru_cache_wrappercmp_to_keyu'partial.__dict__'u'partial(func, *args, **keywords) - new function with partial application + of the given arguments and keywords. +'keywordsfunctools.partialu'A hash is an object used to calculate a checksum of a string of information. + +Methods: + +update() -- updates the current digest with an additional string +digest() -- return the current digest value +hexdigest() -- return the current digest as a string of hexadecimal digits +copy() -- return a copy of the current hash object + +Attributes: + +name -- the hash algorithm being used by this object +digest_size -- number of bytes in this hashes output'u'HASH.block_size'u'HASH.digest_size'u'HASH.name'_hashlib.HASHHASHu'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_hashlib.cpython-38-darwin.so'u'_hashlib'hmac_digestopenssl_md5openssl_md_meth_namesopenssl_sha1openssl_sha224openssl_sha256openssl_sha384openssl_sha512pbkdf2_hmacscryptu'Heap queues + +[explanation by François Pinard] + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +The strange invariant above is meant to be an efficient memory +representation for a tournament. The numbers below are `k', not a[k]: + + 0 + + 1 2 + + 3 4 5 6 + + 7 8 9 10 11 12 13 14 + + 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 + + +In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In +a usual binary tournament we see in sports, each cell is the winner +over the two cells it tops, and we can trace the winner down the tree +to see all opponents s/he had. However, in many computer applications +of such tournaments, we do not need to trace the history of a winner. +To be more memory efficient, when a winner is promoted, we try to +replace it by something else at a lower level, and the rule becomes +that a cell and the two cells it tops contain three different items, +but the top cell "wins" over the two topped cells. + +If this heap invariant is protected at all time, index 0 is clearly +the overall winner. The simplest algorithmic way to remove it and +find the "next" winner is to move some loser (let's say cell 30 in the +diagram above) into the 0 position, and then percolate this new 0 down +the tree, exchanging values, until the invariant is re-established. +This is clearly logarithmic on the total number of items in the tree. +By iterating over all items, you get an O(n ln n) sort. + +A nice feature of this sort is that you can efficiently insert new +items while the sort is going on, provided that the inserted items are +not "better" than the last 0'th element you extracted. This is +especially useful in simulation contexts, where the tree holds all +incoming events, and the "win" condition means the smallest scheduled +time. When an event schedule other events for execution, they are +scheduled into the future, so they can easily go into the heap. So, a +heap is a good structure for implementing schedulers (this is what I +used for my MIDI sequencer :-). + +Various structures for implementing schedulers have been extensively +studied, and heaps are good for this, as they are reasonably speedy, +the speed is almost constant, and the worst case is not much different +than the average case. However, there are other representations which +are more efficient overall, yet the worst cases might be terrible. + +Heaps are also very useful in big disk sorts. You most probably all +know that a big sort implies producing "runs" (which are pre-sorted +sequences, which size is usually related to the amount of CPU memory), +followed by a merging passes for these runs, which merging is often +very cleverly organised[1]. It is very important that the initial +sort produces the longest runs possible. Tournaments are a good way +to that. If, using all the memory available to hold a tournament, you +replace and percolate items that happen to fit the current run, you'll +produce runs which are twice the size of the memory for random input, +and much better for input fuzzily ordered. + +Moreover, if you output the 0'th item on disk and get an input which +may not fit in the current tournament (because the value "wins" over +the last output value), it cannot fit in the heap, so the size of the +heap decreases. The freed memory could be cleverly reused immediately +for progressively building a second heap, which grows at exactly the +same rate the first heap is melting. When the first heap completely +vanishes, you switch heaps and start a new run. Clever and quite +effective! + +In a word, heaps are useful memory structures to know. I use them in +a few applications, and I think it is good to keep a `heap' module +around. :-) + +-------------------- +[1] The disk balancing algorithms which are current, nowadays, are +more annoying than clever, and this is a consequence of the seeking +capabilities of the disks. On devices which cannot seek, like big +tape drives, the story was quite different, and one had to be very +clever to ensure (far in advance) that each tape movement will be the +most effective possible (that is, will best participate at +"progressing" the merge). Some tapes were even able to read +backwards, and this was also used to avoid the rewinding time. +Believe me, real good tape sorts were quite spectacular to watch! +From all times, sorting has always been a Great Art! :-) +'__about__u'Heap queue algorithm (a.k.a. priority queue). + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +Usage: + +heap = [] # creates an empty heap +heappush(heap, item) # pushes a new item on the heap +item = heappop(heap) # pops the smallest item from the heap +item = heap[0] # smallest item on the heap without popping it +heapify(x) # transforms list into a heap, in-place, in linear time +item = heapreplace(heap, item) # pops and returns smallest item, and adds + # new item; the heap size is unchanged + +Our API differs from textbook heap algorithms as follows: + +- We use 0-based indexing. This makes the relationship between the + index for a node and the indexes for its children slightly less + obvious, but is more suitable since Python uses 0-based indexing. + +- Our heappop() method returns the smallest item, not the largest. + +These two make it possible to view the heap as a regular Python list +without surprises: heap[0] is the smallest item, and heap.sort() +maintains the heap invariant! +'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_heapq.cpython-38-darwin.so'u'_heapq'_heapify_max_heappop_max_heapreplace_maxheapifyheappopheappushheappushpopheapreplaceu'(Extremely) low-level import machinery bits as used by importlib and imp.'u'default'init_frozenlock_heldu'A buffered reader and writer object together. + +A buffered reader object and buffered writer object put together to +form a sequential IO object that can read and write. This is typically +used with a socket or two-way pipe. + +reader and writer are RawIOBase objects that are readable and +writeable respectively. If the buffer_size is omitted it defaults to +DEFAULT_BUFFER_SIZE.'u'BufferedRWPair.closed'peekread1readinto1u'Base class for buffered IO objects. + +The main difference with RawIOBase is that the read() method +supports omitting the size argument, and does not have a default +implementation that defers to readinto(). + +In addition, read(), readinto() and write() may raise +BlockingIOError if the underlying raw stream is in non-blocking +mode and not ready; unlike their raw counterparts, they will never +return None. + +A typical implementation should not inherit from a RawIOBase +implementation, but wrap one. +'_io._BufferedIOBase_io.BufferedRWPairBufferedRWPairu'A buffered interface to random access streams. + +The constructor creates a reader and writer for a seekable stream, +raw, given in the first argument. If the buffer_size is omitted it +defaults to DEFAULT_BUFFER_SIZE.'_dealloc_warnu'BufferedRandom.closed'u'BufferedRandom.mode'u'BufferedRandom.name'raw_io.BufferedRandomBufferedRandomu'Create a new buffered reader using the given readable raw IO object.'u'BufferedReader.closed'u'BufferedReader.mode'u'BufferedReader.name'_io.BufferedReaderBufferedReaderu'A buffer for a writeable sequential RawIO object. + +The constructor creates a BufferedWriter for the given writeable raw +stream. If the buffer_size is not given, it defaults to +DEFAULT_BUFFER_SIZE.'u'BufferedWriter.closed'u'BufferedWriter.mode'u'BufferedWriter.name'_io.BufferedWriteru'Buffered I/O implementation using an in-memory bytes buffer.'u'True if the file is closed.'u'BytesIO.closed'getbuffer_io.BytesIOu'Open a file. + +The mode can be 'r' (default), 'w', 'x' or 'a' for reading, +writing, exclusive creation or appending. The file will be created if it +doesn't exist when opened for writing or appending; it will be truncated +when opened for writing. A FileExistsError will be raised if it already +exists when opened for creating. Opening a file for creating implies +writing so this mode behaves in a similar way to 'w'.Add a '+' to the mode +to allow simultaneous reading and writing. A custom opener can be used by +passing a callable as *opener*. The underlying file descriptor for the file +object is then obtained by calling opener with (*name*, *flags*). +*opener* must return an open file descriptor (passing os.open as *opener* +results in functionality similar to passing None).'_blksizeu'True if the file is closed'u'FileIO.closed'u'True if the file descriptor will be closed by close().'u'FileIO.closefd'closefdu'String giving the file mode'u'FileIO.mode'u'Base class for raw binary I/O.'_io._RawIOBase_io.FileIOu'Codec used when reading a file in universal newlines mode. + +It wraps another incremental decoder, translating \r\n and \r into \n. +It also records the types of newlines encountered. When used with +translate=False, it ensures that the newline sequence is returned in +one piece. When used with decoder=None, it expects unicode strings as +decode input and translates newlines without first invoking an external +decoder.'getstateu'IncrementalNewlineDecoder.newlines'setstate_io.IncrementalNewlineDecoderu'Text I/O implementation using an in-memory buffer. + +The initial_value argument sets the value of object. The newline +argument is like the one of TextIOWrapper's constructor.'u'StringIO.closed'u'StringIO.line_buffering'u'StringIO.newlines'_io.StringIOu'UnsupportedOperation.__weakref__'io.UnsupportedOperation_BufferedIOBase_IOBase_RawIOBase_TextIOBaseu'The io module provides the Python interfaces to stream handling. The +builtin open function is defined in this module. + +At the top of the I/O hierarchy is the abstract base class IOBase. It +defines the basic interface to a stream. Note, however, that there is no +separation between reading and writing to streams; implementations are +allowed to raise an OSError if they do not support a given operation. + +Extending IOBase is RawIOBase which deals simply with the reading and +writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide +an interface to OS files. + +BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its +subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer +streams that are readable, writable, and both respectively. +BufferedRandom provides a buffered interface to random access +streams. BytesIO is a simple stream of in-memory bytes. + +Another IOBase subclass, TextIOBase, deals with the encoding and decoding +of streams into text. TextIOWrapper, which extends it, is a buffered text +interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO +is an in-memory stream for text. + +Argument names are not part of the specification, and only the arguments +of open() are intended to be used as keyword arguments. + +data: + +DEFAULT_BUFFER_SIZE + + An int containing the default buffer size used by the module's buffered + I/O classes. open() uses the file's blksize (as obtained by os.stat) if + possible. +'ABDAY_1ABDAY_2ABDAY_3ABDAY_4ABDAY_5ABDAY_6ABDAY_7ABMON_1ABMON_1043ABMON_1144ABMON_12ABMON_2ABMON_3ABMON_4ABMON_5ABMON_6ABMON_7ABMON_841ABMON_949ALT_DIGITSAM_STRCHAR_MAX56CRNCYSTRDAY_1DAY_2DAY_3DAY_4DAY_5DAY_6DAY_7D_FMTD_T_FMT45ERA46ERA_D_FMT47ERA_D_T_FMT48ERA_T_FMTu'locale'u'Error.__weakref__'locale.ErrorLC_ALLLC_COLLATELC_CTYPELC_MESSAGESLC_MONETARYLC_NUMERICLC_TIMEMON_1MON_10MON_11MON_12MON_2MON_3MON_4MON_5MON_6MON_7MON_8MON_9NOEXPRPM_STRRADIXCHAR51THOUSEPT_FMTT_FMT_AMPM52YESEXPRu'Support for POSIX locales.'bind_textdomain_codesetbindtextdomaindcgettextdgettextgettextlocaleconvstrcollstrxfrmtextdomainCHECK_CRC32CHECK_CRC64CHECK_ID_MAXCHECK_NONECHECK_SHA256CHECK_UNKNOWNFILTER_ARMFILTER_ARMTHUMBFILTER_DELTAFILTER_IA644611686018427387905FILTER_LZMA1FILTER_LZMA2FILTER_POWERPCFILTER_SPARCFILTER_X86FORMAT_ALONEFORMAT_AUTOFORMAT_RAWFORMAT_XZu'LZMACompressor(format=FORMAT_XZ, check=-1, preset=None, filters=None) + +Create a compressor object for compressing data incrementally. + +format specifies the container format to use for the output. This can +be FORMAT_XZ (default), FORMAT_ALONE, or FORMAT_RAW. + +check specifies the integrity check to use. For FORMAT_XZ, the default +is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not support integrity +checks; for these formats, check must be omitted, or be CHECK_NONE. + +The settings used by the compressor can be specified either as a +preset compression level (with the 'preset' argument), or in detail +as a custom filter chain (with the 'filters' argument). For FORMAT_XZ +and FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset +level. For FORMAT_RAW, the caller must always specify a filter chain; +the raw compressor does not support preset compression levels. + +preset (if provided) should be an integer in the range 0-9, optionally +OR-ed with the constant PRESET_EXTREME. + +filters (if provided) should be a sequence of dicts. Each dict should +have an entry for "id" indicating the ID of the filter, plus +additional entries for options to the filter. + +For one-shot compression, use the compress() function instead. +'_lzma.LZMACompressorLZMACompressoru'Create a decompressor object for decompressing data incrementally. + + format + Specifies the container format of the input stream. If this is + FORMAT_AUTO (the default), the decompressor will automatically detect + whether the input is FORMAT_XZ or FORMAT_ALONE. Streams created with + FORMAT_RAW cannot be autodetected. + memlimit + Limit the amount of memory used by the decompressor. This will cause + decompression to fail if the input cannot be decompressed within the + given limit. + filters + A custom filter chain. This argument is required for FORMAT_RAW, and + not accepted with any other format. When provided, this should be a + sequence of dicts, each indicating the ID and options for a single + filter. + +For one-shot decompression, use the decompress() function instead.'_lzma.LZMADecompressorLZMADecompressoru'Call to liblzma failed.'u'_lzma'u'LZMAError.__weakref__'_lzma.LZMAErrorLZMAErrorMF_BT2MF_BT3MF_BT4MF_HC3MF_HC4MODE_FASTMODE_NORMALPRESET_DEFAULT2147483648PRESET_EXTREMEu'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_lzma.cpython-38-darwin.so'_decode_filter_properties_encode_filter_propertiesis_check_supported_lzmau'md5.block_size'u'md5.digest_size'u'md5.name'_md5.md5MD5Typeu'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_md5.cpython-38-darwin.so'u'_md5'md5_md532767SEM_VALUE_MAXu'Semaphore/Mutex type'_after_fork_get_value_is_mine_is_zero_rebuildmaxvalue_multiprocessing.SemLockSemLocku'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_multiprocessing.cpython-38-darwin.so'sem_unlinku'Opcode support module.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_opcode.cpython-38-darwin.so'u'_opcode'stack_effect_opcodeu'Operator interface. + +This module exports a set of functions implemented in C corresponding +to the intrinsic operators of Python. For example, operator.add(x, y) +is equivalent to the expression x+y. The function names are those +used for special methods; variants without leading and trailing +'__' are also provided for convenience.'_compare_digestand_u'attrgetter(attr, ...) --> attrgetter object + +Return a callable object that fetches the given attribute(s) from its operand. +After f = attrgetter('name'), the call f(r) returns r.name. +After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). +After h = attrgetter('name.first', 'name.last'), the call h(r) returns +(r.name.first, r.name.last).'operator.attrgetterattrgetterconcatcontainscountOfdelitemfloordivgegetitemgtiaddiandiconcatifloordivilshiftimatmulimodimulindexOfinvinvertioripowirshiftis_is_notisubu'itemgetter(item, ...) --> itemgetter object + +Return a callable object that fetches the given item(s) from its operand. +After f = itemgetter(2), the call f(r) returns r[2]. +After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3])'operator.itemgetteritruedivixorlelength_hintlshiftltmatmulu'methodcaller(name, ...) --> methodcaller object + +Return a callable object that calls the given method on its operand. +After f = methodcaller('name'), the call f(r) returns r.name(). +After g = methodcaller('name', 'date', foo=1), the call g(r) returns +r.name('date', foo=1).'operator.methodcallermethodcallermulnenegnot_or_posrshiftsetitemtruedivtruthxor_operatorShared OS X support functions.compiler_fixupcustomize_config_varsget_platform_osxCFLAGSLDFLAGSCPPFLAGSBASECFLAGSBLDSHAREDLDSHAREDCCCXXPY_LDFLAGSPY_CPPFLAGSPY_CORE_CFLAGSPY_CORE_LDFLAGS_UNIVERSAL_CONFIG_VARS_COMPILER_CONFIG_VARS_OSX_SUPPORT_INITIAL__INITPRE_find_executableTries to find 'executable' in the directories listed in 'path'. + + A string listing directories separated by 'os.pathsep'; defaults to + os.environ['PATH']. Returns the complete filename or None if not found. + PATHpathsep.exe_read_outputcommandstringcapture_stderrOutput from successful command execution or None/tmp/_osx_support.%sw+bclosing%s >'%s' 2>&1%s 2>/dev/null >'%s'_find_build_tooltoolnameFind a build tool on current path or using xcrun/usr/bin/xcrun -find %s_SYSTEM_VERSION_get_system_versionReturn the OS X system version as a string/System/Library/CoreServices/SystemVersion.plistProductUserVisibleVersion\s*(.*?)r'ProductUserVisibleVersion\s*'r'(.*?)'_SYSTEM_VERSION_TUPLE_get_system_version_tuple + Return the macOS system version as a tuple + + The return value is safe to use to compare + two version numbers. + osx_version_remove_original_values_config_varsRemove original unmodified values for testing_save_modified_valuecvnewvalueSave modified and original unmodified value of configuration varoldvalue_cache_default_sysroot_default_sysrootcc Returns the root of the default SDK for this system, or '/' %s -c -E -v - End of search list/usr/include.sdk/usr/include_supports_universal_buildsReturns True if universal builds are supported on this system_supports_arm64_buildsReturns True if arm64 builds are supported on this system_find_appropriate_compilerFind appropriate C compiler for extension module buildsoldccclanggcc'%s' --version'"'"'llvm-gccCannot locate working compilercv_split++_remove_universal_flagsRemove all universal build arguments from config vars-arch\s+\w+\s-isysroot\s*\S+_remove_unsupported_archsRemove any unsupported archs from config vars-arch\s+ppcecho 'int main{};' | '%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""echo 'int main{};' | """"""'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""-arch\s+ppc\w*\s_override_all_archsAllow override of all archs with ARCHFLAGS env varARCHFLAGSarch-arch_check_for_unavailable_sdkRemove references to any SDKs not available-isysroot\s*(\S+)sdk-isysroot\s*\S+(?:\s|$)compiler_socc_args + This function will strip '-isysroot PATH' and '-arch ARCH' from the + compile flags if the user has specified one them in extra_compile_flags. + + This is needed because '-arch ARCH' adds another architecture to the + build, without a way to remove an architecture. Furthermore GCC will + barf if multiple '-isysroot' arguments are present. + stripArchstripSysroot-isysrootidxarm64sysrootargvarisdirCompiling with an SDK that doesn't seem to exist: %sPlease check your Xcode installationCustomize Python build configuration variables. + + Called internally from sysconfig with a mutable mapping + containing name/value pairs parsed from the configured + makefile used to build this interpreter. Returns + the mapping updated as needed to reflect the environment + in which the interpreter is running; in the case of + a Python from a binary installer, the installed + environment may be very different from the build + environment, i.e. different OS levels, different + built tools, different available CPU architectures. + + This customization is performed whenever + distutils.sysconfig.get_config_vars() is first + called. It may be used in environments where no + compilers are present, i.e. when installing pure + Python dists. Customization of compiler paths + and detection of unavailable archs is deferred + until the first extension module build is + requested (in distutils.sysconfig.customize_compiler). + + Currently called from distutils.sysconfig + Customize compiler path and configuration variables. + + This customization is performed when the first + extension module build is requested + in distutils.sysconfig.customize_compiler). + osnamemachineFilter values for get_platform()MACOSX_DEPLOYMENT_TARGETmacvermacreleasemacosxfat-arch\s+(\S+)archsx86_64universal2i386ppcintelfat3ppc64fat64universalDon't know machine value for archs=%rPowerPCPower_Macintosh# configuration variables that may contain universal build flags,# like "-arch" or "-isdkroot", that may need customization for# the user environment# configuration variables that may contain compiler calls# prefix added to original configuration variable names# the file exists, we have a shot at spawn working# Similar to os.popen(commandstring, "r").read(),# but without actually using os.popen because that# function is not usable during python bootstrap.# tempfile is also not available then.# Reading this plist is a documented way to get the system# version (see the documentation for the Gestalt Manager)# We avoid using platform.mac_ver to avoid possible bootstrap issues during# the build of Python itself (distutils is used to build standard library# extensions).# We're on a plain darwin box, fall back to the default# behaviour.# else: fall back to the default behaviour# This is needed for higher-level cross-platform tests of get_platform.# As an approximation, we assume that if we are running on 10.4 or above,# then we are running with an Xcode environment that supports universal# builds, in particular -isysroot and -arch arguments to the compiler. This# is in support of allowing 10.4 universal builds to run on 10.3.x systems.# There are two sets of systems supporting macOS/arm64 builds:# 1. macOS 11 and later, unconditionally# 2. macOS 10.15 with Xcode 12.2 or later# For now the second category is ignored.# Issue #13590:# The OSX location for the compiler varies between OSX# (or rather Xcode) releases. With older releases (up-to 10.5)# the compiler is in /usr/bin, with newer releases the compiler# can only be found inside Xcode.app if the "Command Line Tools"# are not installed.# Furthermore, the compiler that can be used varies between# Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'# as the compiler, after that 'clang' should be used because# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that# miscompiles Python.# skip checks if the compiler was overridden with a CC env variable# The CC config var might contain additional arguments.# Ignore them while searching.# Compiler is not found on the shell search PATH.# Now search for clang, first on PATH (if the Command LIne# Tools have been installed in / or if the user has provided# another location via CC). If not found, try using xcrun# to find an uninstalled clang (within a selected Xcode).# NOTE: Cannot use subprocess here because of bootstrap# issues when building Python itself (and os.popen is# implemented on top of subprocess and is therefore not# usable as well)# Compiler is GCC, check if it is LLVM-GCC# Found LLVM-GCC, fall back to clang# Found a replacement compiler.# Modify config vars using new compiler, if not already explicitly# overridden by an env variable, preserving additional arguments.# Do not alter a config var explicitly overridden by env var# Different Xcode releases support different sets for '-arch'# flags. In particular, Xcode 4.x no longer supports the# PPC architectures.# This code automatically removes '-arch ppc' and '-arch ppc64'# when these are not supported. That makes it possible to# build extensions on OSX 10.7 and later with the prebuilt# 32-bit installer on the python.org website.# issues when building Python itself# The compile failed for some reason. Because of differences# across Xcode and compiler versions, there is no reliable way# to be sure why it failed. Assume here it was due to lack of# PPC support and remove the related '-arch' flags from each# config variables not explicitly overridden by an environment# variable. If the error was for some other reason, we hope the# failure will show up again when trying to compile an extension# module.# NOTE: This name was introduced by Apple in OSX 10.5 and# is used by several scripting languages distributed with# that OS release.# If we're on OSX 10.5 or later and the user tries to# compile an extension using an SDK that is not present# on the current machine it is better to not use an SDK# than to fail. This is particularly important with# the standalone Command Line Tools alternative to a# full-blown Xcode install since the CLT packages do not# provide SDKs. If the SDK is not present, it is assumed# that the header files and dev libs have been installed# to /usr and /System/Library by either a standalone CLT# package or the CLT component within Xcode.# OSX before 10.4.0, these don't support -arch and -isysroot at# all.# Strip this argument and the next one:# Look for "-arch arm64" and drop that# User specified different -arch flags in the environ,# see also distutils.sysconfig# It's '-isysroot/some/path' in one arg# Check if the SDK that is used during compilation actually exists,# the universal build requires the usage of a universal SDK and not all# users have that installed by default.# On Mac OS X before 10.4, check if -arch and -isysroot# are in CFLAGS or LDFLAGS and remove them if they are.# This is needed when building extensions on a 10.3 system# using a universal build of python.# Allow user to override all archs with ARCHFLAGS env var# Remove references to sdks that are not found# Find a compiler to use for extension module builds# Remove ppc arch flags if not supported here# called from get_platform() in sysconfig and distutils.util# For our purposes, we'll assume that the system version from# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set# to. This makes the compatibility story a bit more sane because the# machine is going to compile and link as if it were# MACOSX_DEPLOYMENT_TARGET.# Use the original CFLAGS value, if available, so that we# return the same machine type for the platform string.# Otherwise, distutils may consider this a cross-compiling# case and disallow installs.# assume no universal support# The universal build will build fat binaries, but not on# systems before 10.4# On OSX the machine type returned by uname is always the# 32-bit variant, even if the executable architecture is# the 64-bit variant# Pick a sane name for the PPC architecture.# See 'i386' caseb'Shared OS X support functions.'u'Shared OS X support functions.'b'compiler_fixup'u'compiler_fixup'b'customize_config_vars'u'customize_config_vars'b'customize_compiler'u'customize_compiler'b'get_platform_osx'u'get_platform_osx'b'CFLAGS'u'CFLAGS'b'LDFLAGS'u'LDFLAGS'b'CPPFLAGS'u'CPPFLAGS'b'BASECFLAGS'u'BASECFLAGS'b'BLDSHARED'u'BLDSHARED'b'LDSHARED'u'LDSHARED'b'CC'u'CC'b'CXX'u'CXX'b'PY_LDFLAGS'u'PY_LDFLAGS'b'PY_CPPFLAGS'u'PY_CPPFLAGS'b'PY_CORE_CFLAGS'u'PY_CORE_CFLAGS'b'PY_CORE_LDFLAGS'u'PY_CORE_LDFLAGS'b'_OSX_SUPPORT_INITIAL_'u'_OSX_SUPPORT_INITIAL_'b'Tries to find 'executable' in the directories listed in 'path'. + + A string listing directories separated by 'os.pathsep'; defaults to + os.environ['PATH']. Returns the complete filename or None if not found. + 'u'Tries to find 'executable' in the directories listed in 'path'. + + A string listing directories separated by 'os.pathsep'; defaults to + os.environ['PATH']. Returns the complete filename or None if not found. + 'b'PATH'u'PATH'b'.exe'u'.exe'b'Output from successful command execution or None'u'Output from successful command execution or None'b'/tmp/_osx_support.%s'u'/tmp/_osx_support.%s'b'w+b'u'w+b'b'%s >'%s' 2>&1'u'%s >'%s' 2>&1'b'%s 2>/dev/null >'%s''u'%s 2>/dev/null >'%s''b'Find a build tool on current path or using xcrun'u'Find a build tool on current path or using xcrun'b'/usr/bin/xcrun -find %s'u'/usr/bin/xcrun -find %s'b'Return the OS X system version as a string'u'Return the OS X system version as a string'b'/System/Library/CoreServices/SystemVersion.plist'u'/System/Library/CoreServices/SystemVersion.plist'b'ProductUserVisibleVersion\s*(.*?)'u'ProductUserVisibleVersion\s*(.*?)'b' + Return the macOS system version as a tuple + + The return value is safe to use to compare + two version numbers. + 'u' + Return the macOS system version as a tuple + + The return value is safe to use to compare + two version numbers. + 'b'Remove original unmodified values for testing'u'Remove original unmodified values for testing'b'Save modified and original unmodified value of configuration var'u'Save modified and original unmodified value of configuration var'b' Returns the root of the default SDK for this system, or '/' 'u' Returns the root of the default SDK for this system, or '/' 'b'%s -c -E -v - 'u'#include <...>'b'End of search list'u'End of search list'b'/usr/include'u'/usr/include'b'.sdk/usr/include'u'.sdk/usr/include'b'Returns True if universal builds are supported on this system'u'Returns True if universal builds are supported on this system'b'Returns True if arm64 builds are supported on this system'u'Returns True if arm64 builds are supported on this system'b'Find appropriate C compiler for extension module builds'u'Find appropriate C compiler for extension module builds'b'clang'u'clang'b'gcc'u'gcc'b''%s' --version'u''%s' --version'b''"'"''u''"'"''b'llvm-gcc'u'llvm-gcc'b'Cannot locate working compiler'u'Cannot locate working compiler'b'++'u'++'b'Remove all universal build arguments from config vars'u'Remove all universal build arguments from config vars'b'-arch\s+\w+\s'u'-arch\s+\w+\s'b'-isysroot\s*\S+'u'-isysroot\s*\S+'b'Remove any unsupported archs from config vars'u'Remove any unsupported archs from config vars'b'-arch\s+ppc'u'-arch\s+ppc'b'echo 'int main{};' | '%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null'u'echo 'int main{};' | '%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null'b'-arch\s+ppc\w*\s'u'-arch\s+ppc\w*\s'b'Allow override of all archs with ARCHFLAGS env var'u'Allow override of all archs with ARCHFLAGS env var'b'ARCHFLAGS'u'ARCHFLAGS'b'-arch'u'-arch'b'Remove references to any SDKs not available'u'Remove references to any SDKs not available'b'-isysroot\s*(\S+)'u'-isysroot\s*(\S+)'b'-isysroot\s*\S+(?:\s|$)'u'-isysroot\s*\S+(?:\s|$)'b' + This function will strip '-isysroot PATH' and '-arch ARCH' from the + compile flags if the user has specified one them in extra_compile_flags. + + This is needed because '-arch ARCH' adds another architecture to the + build, without a way to remove an architecture. Furthermore GCC will + barf if multiple '-isysroot' arguments are present. + 'u' + This function will strip '-isysroot PATH' and '-arch ARCH' from the + compile flags if the user has specified one them in extra_compile_flags. + + This is needed because '-arch ARCH' adds another architecture to the + build, without a way to remove an architecture. Furthermore GCC will + barf if multiple '-isysroot' arguments are present. + 'b'-isysroot'u'-isysroot'b'arm64'u'arm64'b'Compiling with an SDK that doesn't seem to exist: %s'u'Compiling with an SDK that doesn't seem to exist: %s'b'Please check your Xcode installation'u'Please check your Xcode installation'b'Customize Python build configuration variables. + + Called internally from sysconfig with a mutable mapping + containing name/value pairs parsed from the configured + makefile used to build this interpreter. Returns + the mapping updated as needed to reflect the environment + in which the interpreter is running; in the case of + a Python from a binary installer, the installed + environment may be very different from the build + environment, i.e. different OS levels, different + built tools, different available CPU architectures. + + This customization is performed whenever + distutils.sysconfig.get_config_vars() is first + called. It may be used in environments where no + compilers are present, i.e. when installing pure + Python dists. Customization of compiler paths + and detection of unavailable archs is deferred + until the first extension module build is + requested (in distutils.sysconfig.customize_compiler). + + Currently called from distutils.sysconfig + 'u'Customize Python build configuration variables. + + Called internally from sysconfig with a mutable mapping + containing name/value pairs parsed from the configured + makefile used to build this interpreter. Returns + the mapping updated as needed to reflect the environment + in which the interpreter is running; in the case of + a Python from a binary installer, the installed + environment may be very different from the build + environment, i.e. different OS levels, different + built tools, different available CPU architectures. + + This customization is performed whenever + distutils.sysconfig.get_config_vars() is first + called. It may be used in environments where no + compilers are present, i.e. when installing pure + Python dists. Customization of compiler paths + and detection of unavailable archs is deferred + until the first extension module build is + requested (in distutils.sysconfig.customize_compiler). + + Currently called from distutils.sysconfig + 'b'Customize compiler path and configuration variables. + + This customization is performed when the first + extension module build is requested + in distutils.sysconfig.customize_compiler). + 'u'Customize compiler path and configuration variables. + + This customization is performed when the first + extension module build is requested + in distutils.sysconfig.customize_compiler). + 'b'Filter values for get_platform()'u'Filter values for get_platform()'b'MACOSX_DEPLOYMENT_TARGET'u'MACOSX_DEPLOYMENT_TARGET'b'macosx'u'macosx'b'fat'u'fat'b'-arch\s+(\S+)'u'-arch\s+(\S+)'b'x86_64'u'x86_64'b'universal2'u'universal2'b'i386'u'i386'b'ppc'u'ppc'b'intel'u'intel'b'fat3'u'fat3'b'ppc64'u'ppc64'b'fat64'u'fat64'b'universal'u'universal'b'Don't know machine value for archs=%r'u'Don't know machine value for archs=%r'b'PowerPC'u'PowerPC'b'Power_Macintosh'u'Power_Macintosh'u'_osx_support'Email address parsing code. + +Lifted directly from rfc822.py. This should eventually be rewritten. +mktime_tzparsedateparsedate_tzcalendarSPACEEMPTYSTRINGCOMMASPACEjanfebmaraprmayjunjulaugnovjanuaryfebruarymarchapriljunejulyaugustseptemberoctobernovemberdecember_monthnamesmontuewedthufrisatsun_daynamesUTUTCGMTADTESTEDT600CSTCDT700MSTMDT800PSTPDT_timezonesConvert a date string to a time tuple. + + Accounts for military timezones. + _parsedate_tzConvert date to extended time tuple. + + The last (additional) element is the time zone offset in seconds, except if + the timezone was specified as -0000. In that case the last element is + None. This indicates a UTC timestamp that explicitly declaims knowledge of + the source timezone, as opposed to a +0000 timestamp that indicates the + source timezone really was UTC. + + stuffddmmyytmthhtmmtss6819002000tzoffsettzsign3600Convert a time string to a time tuple.Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp.mktimetimegmPrepare string to be used in a quoted string. + + Turns backslash and double quote characters into quoted pairs. These + are the only characters that need to be quoted inside a quoted string. + Does not add the surrounding double quotes. + \"AddrlistClassAddress parser class by Ben Escoto. + + To understand what this class does, it helps to have a copy of RFC 2822 in + front of you. + + Note: this class interface is deprecated and may be removed in the future. + Use email.utils.AddressList instead. + fieldInitialize a new instance. + + `field' is an unparsed address header field, containing + one or more addresses. + ()<>@,:;."[]specials LWSCRFWSatomendsphraseendscommentlistgotonextSkip white space and extract comments.wslist + getcommentgetaddrlistParse all addresses. + + Returns a list containing all of the addresses. + getaddressadParse the next address.oldposoldclgetphraselistplistreturnlist.@getaddrspecaddrspecfieldlengetrouteaddrrouteaddr (Parse a route address (Return-path value). + + This method just skips all the route stuff and returns the addrspec. + expectrouteadlistgetdomainParse an RFC 2822 addr-spec.aslistpreserve_ws"%s"getquotegetatomdomainGet the complete domain name from an address.sdlistgetdomainliteralgetdelimitedbegincharendcharsallowcommentsParse a header fragment delimited by special characters. + + `beginchar' is the start character for the fragment. + If self is not looking at an instance of `beginchar' then + getdelimited returns the empty string. + + `endchars' is a sequence of allowable end-delimiting characters. + Parsing stops when one of these is encountered. + + If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed + within the parsed fragment. + slistGet a quote-delimited fragment from self's field." Get a parenthesis-delimited fragment from self's field.) Parse an RFC 2822 domain-literal.] Parse an RFC 2822 atom. + + Optional atomends specifies a different set of end token delimiters + (the default is to use self.atomends). This is used e.g. in + getphraselist() since phrase endings must not include the `.' (which + is legal in phrases).atomlistParse a sequence of RFC 2822 phrases. + + A phrase is a sequence of words, which are in turn either RFC 2822 + atoms or quoted-strings. Phrases are canonicalized by squeezing all + runs of continuous whitespace into one space. + AddressListAn AddressList encapsulates a list of parsed RFC 2822 addresses.addresslistnewaddr# Copyright (C) 2002-2007 Python Software Foundation# Parse a date field# The timezone table does not include the military time zones defined# in RFC822, other than Z. According to RFC1123, the description in# RFC822 gets the signs wrong, so we can't rely on any such time# zones. RFC1123 recommends that numeric timezone indicators be used# instead of timezone names.# Atlantic (used in Canada)# Eastern# Central# Mountain# Pacific# This happens for whitespace-only input.# The FWS after the comma after the day-of-week is optional, so search and# adjust for this.# There's a dayname here. Skip it# RFC 850 date, deprecated# Dummy tz# Some non-compliant MUAs use '.' to separate time elements.# Check for a yy specified in two-digit format, then convert it to the# appropriate four-digit format, according to the POSIX standard. RFC 822# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)# mandates a 4-digit yy. For more information, see the documentation for# the time module.# The year is between 1969 and 1999 (inclusive).# The year is between 2000 and 2068 (inclusive).# Convert a timezone offset into seconds ; -0500 -> -18000# Daylight Saving Time flag is set to -1, since DST is unknown.# No zone info, so localtime is better assumption than GMT# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it# is obsolete syntax. RFC 2822 requires that we recognize obsolete# syntax, so allow dots in phrases.# Bad email address technically, no domain.# email address is just an addrspec# this isn't very efficient since we start over# address is a group# Address is a phrase then a route addr# Invalid domain, return an empty address instead of returning a# local part to denote failed parsing.# bpo-34155: Don't parse domains with two `@` like# `a@malicious.org@important.com`.# have already advanced pos from getcomment# Set union# Set union, in-place# Set difference# Set difference, in-place# Make indexing, slices, and 'in' workb'Email address parsing code. + +Lifted directly from rfc822.py. This should eventually be rewritten. +'u'Email address parsing code. + +Lifted directly from rfc822.py. This should eventually be rewritten. +'b'mktime_tz'u'mktime_tz'b'parsedate'u'parsedate'b'parsedate_tz'u'parsedate_tz'b'jan'u'jan'b'feb'u'feb'b'mar'u'mar'b'apr'u'apr'b'may'u'may'b'jun'u'jun'b'jul'u'jul'b'aug'u'aug'b'sep'u'sep'b'oct'u'oct'b'nov'u'nov'b'dec'u'dec'b'january'u'january'b'february'u'february'b'march'u'march'b'april'u'april'b'june'u'june'b'july'u'july'b'august'u'august'b'september'u'september'b'october'u'october'b'november'u'november'b'december'u'december'b'mon'u'mon'b'tue'u'tue'b'wed'u'wed'b'thu'u'thu'b'fri'u'fri'b'sat'u'sat'b'sun'u'sun'b'UT'u'UT'b'UTC'u'UTC'b'GMT'u'GMT'b'AST'u'AST'b'ADT'u'ADT'b'EST'u'EST'b'EDT'u'EDT'b'CST'u'CST'b'CDT'u'CDT'b'MST'u'MST'b'MDT'u'MDT'b'PST'u'PST'b'PDT'u'PDT'b'Convert a date string to a time tuple. + + Accounts for military timezones. + 'u'Convert a date string to a time tuple. + + Accounts for military timezones. + 'b'Convert date to extended time tuple. + + The last (additional) element is the time zone offset in seconds, except if + the timezone was specified as -0000. In that case the last element is + None. This indicates a UTC timestamp that explicitly declaims knowledge of + the source timezone, as opposed to a +0000 timestamp that indicates the + source timezone really was UTC. + + 'u'Convert date to extended time tuple. + + The last (additional) element is the time zone offset in seconds, except if + the timezone was specified as -0000. In that case the last element is + None. This indicates a UTC timestamp that explicitly declaims knowledge of + the source timezone, as opposed to a +0000 timestamp that indicates the + source timezone really was UTC. + + 'b'0'u'0'b'Convert a time string to a time tuple.'u'Convert a time string to a time tuple.'b'Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp.'u'Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp.'b'Prepare string to be used in a quoted string. + + Turns backslash and double quote characters into quoted pairs. These + are the only characters that need to be quoted inside a quoted string. + Does not add the surrounding double quotes. + 'u'Prepare string to be used in a quoted string. + + Turns backslash and double quote characters into quoted pairs. These + are the only characters that need to be quoted inside a quoted string. + Does not add the surrounding double quotes. + 'b'\"'u'\"'b'Address parser class by Ben Escoto. + + To understand what this class does, it helps to have a copy of RFC 2822 in + front of you. + + Note: this class interface is deprecated and may be removed in the future. + Use email.utils.AddressList instead. + 'u'Address parser class by Ben Escoto. + + To understand what this class does, it helps to have a copy of RFC 2822 in + front of you. + + Note: this class interface is deprecated and may be removed in the future. + Use email.utils.AddressList instead. + 'b'Initialize a new instance. + + `field' is an unparsed address header field, containing + one or more addresses. + 'u'Initialize a new instance. + + `field' is an unparsed address header field, containing + one or more addresses. + 'b'()<>@,:;."[]'u'()<>@,:;."[]'b' 'u' 'b'Skip white space and extract comments.'u'Skip white space and extract comments.'b' + 'u' + 'b'Parse all addresses. + + Returns a list containing all of the addresses. + 'u'Parse all addresses. + + Returns a list containing all of the addresses. + 'b'Parse the next address.'u'Parse the next address.'b'.@'u'.@'b' ('u' ('b'Parse a route address (Return-path value). + + This method just skips all the route stuff and returns the addrspec. + 'u'Parse a route address (Return-path value). + + This method just skips all the route stuff and returns the addrspec. + 'b'Parse an RFC 2822 addr-spec.'u'Parse an RFC 2822 addr-spec.'b'"%s"'u'"%s"'b'Get the complete domain name from an address.'u'Get the complete domain name from an address.'b'Parse a header fragment delimited by special characters. + + `beginchar' is the start character for the fragment. + If self is not looking at an instance of `beginchar' then + getdelimited returns the empty string. + + `endchars' is a sequence of allowable end-delimiting characters. + Parsing stops when one of these is encountered. + + If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed + within the parsed fragment. + 'u'Parse a header fragment delimited by special characters. + + `beginchar' is the start character for the fragment. + If self is not looking at an instance of `beginchar' then + getdelimited returns the empty string. + + `endchars' is a sequence of allowable end-delimiting characters. + Parsing stops when one of these is encountered. + + If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed + within the parsed fragment. + 'b'Get a quote-delimited fragment from self's field.'u'Get a quote-delimited fragment from self's field.'b'" 'u'" 'b'Get a parenthesis-delimited fragment from self's field.'u'Get a parenthesis-delimited fragment from self's field.'b') 'u') 'b'Parse an RFC 2822 domain-literal.'u'Parse an RFC 2822 domain-literal.'b'] 'u'] 'b'Parse an RFC 2822 atom. + + Optional atomends specifies a different set of end token delimiters + (the default is to use self.atomends). This is used e.g. in + getphraselist() since phrase endings must not include the `.' (which + is legal in phrases).'u'Parse an RFC 2822 atom. + + Optional atomends specifies a different set of end token delimiters + (the default is to use self.atomends). This is used e.g. in + getphraselist() since phrase endings must not include the `.' (which + is legal in phrases).'b'Parse a sequence of RFC 2822 phrases. + + A phrase is a sequence of words, which are in turn either RFC 2822 + atoms or quoted-strings. Phrases are canonicalized by squeezing all + runs of continuous whitespace into one space. + 'u'Parse a sequence of RFC 2822 phrases. + + A phrase is a sequence of words, which are in turn either RFC 2822 + atoms or quoted-strings. Phrases are canonicalized by squeezing all + runs of continuous whitespace into one space. + 'b'An AddressList encapsulates a list of parsed RFC 2822 addresses.'u'An AddressList encapsulates a list of parsed RFC 2822 addresses.'u'email._parseaddr'u'_parseaddr'u'Wrapper for potentially out-of-band buffers'pickle.PickleBufferPickleBufferu'PickleError.__weakref__'_pickle.PickleErrorPickleErroru'This takes a binary file for writing a pickle data stream. + +The optional *protocol* argument tells the pickler to use the given +protocol; supported protocols are 0, 1, 2, 3, 4 and 5. The default +protocol is 4. It was introduced in Python 3.4, and is incompatible +with previous versions. + +Specifying a negative protocol version selects the highest protocol +version supported. The higher the protocol used, the more recent the +version of Python needed to read the pickle produced. + +The *file* argument must have a write() method that accepts a single +bytes argument. It can thus be a file object opened for binary +writing, an io.BytesIO instance, or any other custom object that meets +this interface. + +If *fix_imports* is True and protocol is less than 3, pickle will try +to map the new Python 3 names to the old module names used in Python +2, so that the pickle data stream is readable with Python 2. + +If *buffer_callback* is None (the default), buffer views are +serialized into *file* as part of the pickle stream. + +If *buffer_callback* is not None, then it can be called any number +of times with a buffer view. If the callback returns a false value +(such as None), the given buffer is out-of-band; otherwise the +buffer is serialized in-band, i.e. inside the pickle stream. + +It is an error if *buffer_callback* is not None and *protocol* +is None or smaller than 5.'clear_memodispatch_tablefastu'Pickler.memo'memou'Pickler.persistent_id'persistent_id_pickle.PicklerPickler_pickle.PicklingErroru'This takes a binary file for reading a pickle data stream. + +The protocol version of the pickle is detected automatically, so no +protocol argument is needed. Bytes past the pickled object's +representation are ignored. + +The argument *file* must have two methods, a read() method that takes +an integer argument, and a readline() method that requires no +arguments. Both methods should return bytes. Thus *file* can be a +binary file object opened for reading, an io.BytesIO object, or any +other custom object that meets this interface. + +Optional keyword arguments are *fix_imports*, *encoding* and *errors*, +which are used to control compatibility support for pickle stream +generated by Python 2. If *fix_imports* is True, pickle will try to +map the old Python 2 names to the new names used in Python 3. The +*encoding* and *errors* tell pickle how to decode 8-bit string +instances pickled by Python 2; these default to 'ASCII' and 'strict', +respectively. The *encoding* can be 'bytes' to read these 8-bit +string instances as bytes objects.'find_classloadu'Unpickler.memo'u'Unpickler.persistent_load'persistent_load_pickle.UnpicklerUnpickler_pickle.UnpicklingErrorUnpicklingErroru'Optimized C implementation for the Python pickle module.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_pickle.cpython-38-darwin.so'Policy framework for the email package. + +Allows fine grained feature control of how the package parses and emits data. +_charsetemail.utils_has_surrogatesPolicyCompat32compat32_PolicyBasePolicy Object basic framework. + + This class is useless unless subclassed. A subclass should define + class attributes with defaults for any values that are to be + managed by the Policy object. The constructor will then allow + non-default values to be set for these attributes at instance + creation time. The instance will be callable, taking these same + attributes keyword arguments, and returning a new instance + identical to the called instance except for those values changed + by the keyword arguments. Instances may be added, yielding new + instances with any non-default values from the right hand + operand overriding those in the left hand operand. That is, + + A + B == A() + + The repr of an instance can be used to reconstruct the object + if and only if the repr of the values can be used to reconstruct + those values. + + Create new Policy, possibly overriding some defaults. + + See class docstring for a list of overridable attributes. + + {!r} is an invalid keyword argument for {}{}={!r}cloneReturn a new instance with specified attributes changed. + + The new instance has the same attribute values as the current object, + except for the changes passed in as keyword arguments. + + newpolicy{!r} object attribute {!r} is read-only{!r} object has no attribute {!r}Non-default values from right operand override those from left. + + The object returned is a new instance of the subclass. + + _append_docadded_doc_extend_docstringsControls for how messages are interpreted and formatted. + + Most of the classes and many of the methods in the email package accept + Policy objects as parameters. A Policy object contains a set of values and + functions that control how input is interpreted and how output is rendered. + For example, the parameter 'raise_on_defect' controls whether or not an RFC + violation results in an error being raised or not, while 'max_line_length' + controls the maximum length of output lines when a Message is serialized. + + Any valid attribute may be overridden when a Policy is created by passing + it as a keyword argument to the constructor. Policy objects are immutable, + but a new Policy object can be created with only certain values changed by + calling the Policy instance with keyword arguments. Policy objects can + also be added, producing a new Policy object in which the non-default + attributes set in the right hand operand overwrite those specified in the + left operand. + + Settable attributes: + + raise_on_defect -- If true, then defects should be raised as errors. + Default: False. + + linesep -- string containing the value to use as separation + between output lines. Default '\n'. + + cte_type -- Type of allowed content transfer encodings + + 7bit -- ASCII only + 8bit -- Content-Transfer-Encoding: 8bit is allowed + + Default: 8bit. Also controls the disposition of + (RFC invalid) binary data in headers; see the + documentation of the binary_fold method. + + max_line_length -- maximum length of lines, excluding 'linesep', + during serialization. None or 0 means no line + wrapping is done. Default is 78. + + mangle_from_ -- a flag that, when True escapes From_ lines in the + body of the message by putting a `>' in front of + them. This is used when the message is being + serialized by a generator. Default: True. + + message_factory -- the class to use to create new message objects. + If the value is None, the default is Message. + + r"""raise_on_defectlinesep8bitcte_type78max_line_lengthmangle_from_message_factoryhandle_defectdefectBased on policy, either raise defect or call register_defect. + + handle_defect(obj, defect) + + defect should be a Defect subclass, but in any case must be an + Exception subclass. obj is the object on which the defect should be + registered if it is not raised. If the raise_on_defect is True, the + defect is raised as an error, otherwise the object and the defect are + passed to register_defect. + + This method is intended to be called by parsers that discover defects. + The email package parsers always call it with Defect instances. + + register_defectRecord 'defect' on 'obj'. + + Called by handle_defect if raise_on_defect is False. This method is + part of the Policy API so that Policy subclasses can implement custom + defect handling. The default implementation calls the append method of + the defects attribute of obj. The objects used by the email package by + default that get passed to this method will always have a defects + attribute with an append method. + + header_max_countReturn the maximum allowed number of headers named 'name'. + + Called when a header is added to a Message object. If the returned + value is not 0 or None, and there are already a number of headers with + the name 'name' equal to the value returned, a ValueError is raised. + + Because the default behavior of Message's __setitem__ is to append the + value to the list of headers, it is easy to create duplicate headers + without realizing it. This method allows certain headers to be limited + in the number of instances of that header that may be added to a + Message programmatically. (The limit is not observed by the parser, + which will faithfully produce as many headers as exist in the message + being parsed.) + + The default implementation returns None for all header names. + header_source_parsesourcelinesGiven a list of linesep terminated strings constituting the lines of + a single header, return the (name, value) tuple that should be stored + in the model. The input lines should retain their terminating linesep + characters. The lines passed in by the email package may contain + surrogateescaped binary data. + header_store_parseGiven the header name and the value provided by the application + program, return the (name, value) that should be stored in the model. + header_fetch_parseGiven the header name and the value from the model, return the value + to be returned to the application program that is requesting that + header. The value passed in by the email package may contain + surrogateescaped binary data if the lines were parsed by a BytesParser. + The returned value should not contain any surrogateescaped data. + + Given the header name and the value from the model, return a string + containing linesep characters that implement the folding of the header + according to the policy controls. The value passed in by the email + package may contain surrogateescaped binary data if the lines were + parsed by a BytesParser. The returned value should not contain any + surrogateescaped data. + + fold_binaryGiven the header name and the value from the model, return binary + data containing linesep characters that implement the folding of the + header according to the policy controls. The value passed in by the + email package may contain surrogateescaped binary data. + + + + This particular policy is the backward compatibility Policy. It + replicates the behavior of the email package version 5.1. + _sanitize_headerHeaderUNKNOWN8BITheader_name+ + The name is parsed as everything up to the ':' and returned unmodified. + The value is determined by stripping leading whitespace off the + remainder of the first line, joining all subsequent lines together, and + stripping any trailing carriage return or linefeed characters. + + + + The name and value are returned unmodified. + + + If the value contains binary data, it is converted into a Header object + using the unknown-8bit charset. Otherwise it is returned unmodified. + + + Headers are folded using the Header folding algorithm, which preserves + existing line breaks in the value, and wraps each resulting line to the + max_line_length. Non-ASCII binary data are CTE encoded using the + unknown-8bit charset. + + _foldsanitize+ + Headers are folded using the Header folding algorithm, which preserves + existing line breaks in the value, and wraps each resulting line to the + max_line_length. If cte_type is 7bit, non-ascii binary data is CTE + encoded using the unknown-8bit charset. Otherwise the original source + header is used, with its existing line breaks and/or binary data. + + 7bitfolded%s: maxlinelen# If the header value contains surrogates, return a Header using# the unknown-8bit charset to encode the bytes as encoded words.# Assume it is already a header object# If we have raw 8bit data in a byte string, we have no idea# what the encoding is. There is no safe way to split this# string. If it's ascii-subset, then we could do a normal# ascii split, but if it's multibyte then we could break the# string. There's no way to know so the least harm seems to# be to not split the string and risk it being too long.# Assume it is a Header-like object.# The Header class interprets a value of None for maxlinelen as the# default value of 78, as recommended by RFC 2822.b'Policy framework for the email package. + +Allows fine grained feature control of how the package parses and emits data. +'u'Policy framework for the email package. + +Allows fine grained feature control of how the package parses and emits data. +'b'Policy'u'Policy'b'Compat32'u'Compat32'b'compat32'u'compat32'b'Policy Object basic framework. + + This class is useless unless subclassed. A subclass should define + class attributes with defaults for any values that are to be + managed by the Policy object. The constructor will then allow + non-default values to be set for these attributes at instance + creation time. The instance will be callable, taking these same + attributes keyword arguments, and returning a new instance + identical to the called instance except for those values changed + by the keyword arguments. Instances may be added, yielding new + instances with any non-default values from the right hand + operand overriding those in the left hand operand. That is, + + A + B == A() + + The repr of an instance can be used to reconstruct the object + if and only if the repr of the values can be used to reconstruct + those values. + + 'u'Policy Object basic framework. + + This class is useless unless subclassed. A subclass should define + class attributes with defaults for any values that are to be + managed by the Policy object. The constructor will then allow + non-default values to be set for these attributes at instance + creation time. The instance will be callable, taking these same + attributes keyword arguments, and returning a new instance + identical to the called instance except for those values changed + by the keyword arguments. Instances may be added, yielding new + instances with any non-default values from the right hand + operand overriding those in the left hand operand. That is, + + A + B == A() + + The repr of an instance can be used to reconstruct the object + if and only if the repr of the values can be used to reconstruct + those values. + + 'b'Create new Policy, possibly overriding some defaults. + + See class docstring for a list of overridable attributes. + + 'u'Create new Policy, possibly overriding some defaults. + + See class docstring for a list of overridable attributes. + + 'b'{!r} is an invalid keyword argument for {}'u'{!r} is an invalid keyword argument for {}'b'{}={!r}'u'{}={!r}'b'Return a new instance with specified attributes changed. + + The new instance has the same attribute values as the current object, + except for the changes passed in as keyword arguments. + + 'u'Return a new instance with specified attributes changed. + + The new instance has the same attribute values as the current object, + except for the changes passed in as keyword arguments. + + 'b'{!r} object attribute {!r} is read-only'u'{!r} object attribute {!r} is read-only'b'{!r} object has no attribute {!r}'u'{!r} object has no attribute {!r}'b'Non-default values from right operand override those from left. + + The object returned is a new instance of the subclass. + + 'u'Non-default values from right operand override those from left. + + The object returned is a new instance of the subclass. + + 'b'Controls for how messages are interpreted and formatted. + + Most of the classes and many of the methods in the email package accept + Policy objects as parameters. A Policy object contains a set of values and + functions that control how input is interpreted and how output is rendered. + For example, the parameter 'raise_on_defect' controls whether or not an RFC + violation results in an error being raised or not, while 'max_line_length' + controls the maximum length of output lines when a Message is serialized. + + Any valid attribute may be overridden when a Policy is created by passing + it as a keyword argument to the constructor. Policy objects are immutable, + but a new Policy object can be created with only certain values changed by + calling the Policy instance with keyword arguments. Policy objects can + also be added, producing a new Policy object in which the non-default + attributes set in the right hand operand overwrite those specified in the + left operand. + + Settable attributes: + + raise_on_defect -- If true, then defects should be raised as errors. + Default: False. + + linesep -- string containing the value to use as separation + between output lines. Default '\n'. + + cte_type -- Type of allowed content transfer encodings + + 7bit -- ASCII only + 8bit -- Content-Transfer-Encoding: 8bit is allowed + + Default: 8bit. Also controls the disposition of + (RFC invalid) binary data in headers; see the + documentation of the binary_fold method. + + max_line_length -- maximum length of lines, excluding 'linesep', + during serialization. None or 0 means no line + wrapping is done. Default is 78. + + mangle_from_ -- a flag that, when True escapes From_ lines in the + body of the message by putting a `>' in front of + them. This is used when the message is being + serialized by a generator. Default: True. + + message_factory -- the class to use to create new message objects. + If the value is None, the default is Message. + + 'u'Controls for how messages are interpreted and formatted. + + Most of the classes and many of the methods in the email package accept + Policy objects as parameters. A Policy object contains a set of values and + functions that control how input is interpreted and how output is rendered. + For example, the parameter 'raise_on_defect' controls whether or not an RFC + violation results in an error being raised or not, while 'max_line_length' + controls the maximum length of output lines when a Message is serialized. + + Any valid attribute may be overridden when a Policy is created by passing + it as a keyword argument to the constructor. Policy objects are immutable, + but a new Policy object can be created with only certain values changed by + calling the Policy instance with keyword arguments. Policy objects can + also be added, producing a new Policy object in which the non-default + attributes set in the right hand operand overwrite those specified in the + left operand. + + Settable attributes: + + raise_on_defect -- If true, then defects should be raised as errors. + Default: False. + + linesep -- string containing the value to use as separation + between output lines. Default '\n'. + + cte_type -- Type of allowed content transfer encodings + + 7bit -- ASCII only + 8bit -- Content-Transfer-Encoding: 8bit is allowed + + Default: 8bit. Also controls the disposition of + (RFC invalid) binary data in headers; see the + documentation of the binary_fold method. + + max_line_length -- maximum length of lines, excluding 'linesep', + during serialization. None or 0 means no line + wrapping is done. Default is 78. + + mangle_from_ -- a flag that, when True escapes From_ lines in the + body of the message by putting a `>' in front of + them. This is used when the message is being + serialized by a generator. Default: True. + + message_factory -- the class to use to create new message objects. + If the value is None, the default is Message. + + 'b'8bit'u'8bit'b'Based on policy, either raise defect or call register_defect. + + handle_defect(obj, defect) + + defect should be a Defect subclass, but in any case must be an + Exception subclass. obj is the object on which the defect should be + registered if it is not raised. If the raise_on_defect is True, the + defect is raised as an error, otherwise the object and the defect are + passed to register_defect. + + This method is intended to be called by parsers that discover defects. + The email package parsers always call it with Defect instances. + + 'u'Based on policy, either raise defect or call register_defect. + + handle_defect(obj, defect) + + defect should be a Defect subclass, but in any case must be an + Exception subclass. obj is the object on which the defect should be + registered if it is not raised. If the raise_on_defect is True, the + defect is raised as an error, otherwise the object and the defect are + passed to register_defect. + + This method is intended to be called by parsers that discover defects. + The email package parsers always call it with Defect instances. + + 'b'Record 'defect' on 'obj'. + + Called by handle_defect if raise_on_defect is False. This method is + part of the Policy API so that Policy subclasses can implement custom + defect handling. The default implementation calls the append method of + the defects attribute of obj. The objects used by the email package by + default that get passed to this method will always have a defects + attribute with an append method. + + 'u'Record 'defect' on 'obj'. + + Called by handle_defect if raise_on_defect is False. This method is + part of the Policy API so that Policy subclasses can implement custom + defect handling. The default implementation calls the append method of + the defects attribute of obj. The objects used by the email package by + default that get passed to this method will always have a defects + attribute with an append method. + + 'b'Return the maximum allowed number of headers named 'name'. + + Called when a header is added to a Message object. If the returned + value is not 0 or None, and there are already a number of headers with + the name 'name' equal to the value returned, a ValueError is raised. + + Because the default behavior of Message's __setitem__ is to append the + value to the list of headers, it is easy to create duplicate headers + without realizing it. This method allows certain headers to be limited + in the number of instances of that header that may be added to a + Message programmatically. (The limit is not observed by the parser, + which will faithfully produce as many headers as exist in the message + being parsed.) + + The default implementation returns None for all header names. + 'u'Return the maximum allowed number of headers named 'name'. + + Called when a header is added to a Message object. If the returned + value is not 0 or None, and there are already a number of headers with + the name 'name' equal to the value returned, a ValueError is raised. + + Because the default behavior of Message's __setitem__ is to append the + value to the list of headers, it is easy to create duplicate headers + without realizing it. This method allows certain headers to be limited + in the number of instances of that header that may be added to a + Message programmatically. (The limit is not observed by the parser, + which will faithfully produce as many headers as exist in the message + being parsed.) + + The default implementation returns None for all header names. + 'b'Given a list of linesep terminated strings constituting the lines of + a single header, return the (name, value) tuple that should be stored + in the model. The input lines should retain their terminating linesep + characters. The lines passed in by the email package may contain + surrogateescaped binary data. + 'u'Given a list of linesep terminated strings constituting the lines of + a single header, return the (name, value) tuple that should be stored + in the model. The input lines should retain their terminating linesep + characters. The lines passed in by the email package may contain + surrogateescaped binary data. + 'b'Given the header name and the value provided by the application + program, return the (name, value) that should be stored in the model. + 'u'Given the header name and the value provided by the application + program, return the (name, value) that should be stored in the model. + 'b'Given the header name and the value from the model, return the value + to be returned to the application program that is requesting that + header. The value passed in by the email package may contain + surrogateescaped binary data if the lines were parsed by a BytesParser. + The returned value should not contain any surrogateescaped data. + + 'u'Given the header name and the value from the model, return the value + to be returned to the application program that is requesting that + header. The value passed in by the email package may contain + surrogateescaped binary data if the lines were parsed by a BytesParser. + The returned value should not contain any surrogateescaped data. + + 'b'Given the header name and the value from the model, return a string + containing linesep characters that implement the folding of the header + according to the policy controls. The value passed in by the email + package may contain surrogateescaped binary data if the lines were + parsed by a BytesParser. The returned value should not contain any + surrogateescaped data. + + 'u'Given the header name and the value from the model, return a string + containing linesep characters that implement the folding of the header + according to the policy controls. The value passed in by the email + package may contain surrogateescaped binary data if the lines were + parsed by a BytesParser. The returned value should not contain any + surrogateescaped data. + + 'b'Given the header name and the value from the model, return binary + data containing linesep characters that implement the folding of the + header according to the policy controls. The value passed in by the + email package may contain surrogateescaped binary data. + + 'u'Given the header name and the value from the model, return binary + data containing linesep characters that implement the folding of the + header according to the policy controls. The value passed in by the + email package may contain surrogateescaped binary data. + + 'b'+ + This particular policy is the backward compatibility Policy. It + replicates the behavior of the email package version 5.1. + 'u'+ + This particular policy is the backward compatibility Policy. It + replicates the behavior of the email package version 5.1. + 'b'+ + The name is parsed as everything up to the ':' and returned unmodified. + The value is determined by stripping leading whitespace off the + remainder of the first line, joining all subsequent lines together, and + stripping any trailing carriage return or linefeed characters. + + 'u'+ + The name is parsed as everything up to the ':' and returned unmodified. + The value is determined by stripping leading whitespace off the + remainder of the first line, joining all subsequent lines together, and + stripping any trailing carriage return or linefeed characters. + + 'b'+ + The name and value are returned unmodified. + 'u'+ + The name and value are returned unmodified. + 'b'+ + If the value contains binary data, it is converted into a Header object + using the unknown-8bit charset. Otherwise it is returned unmodified. + 'u'+ + If the value contains binary data, it is converted into a Header object + using the unknown-8bit charset. Otherwise it is returned unmodified. + 'b'+ + Headers are folded using the Header folding algorithm, which preserves + existing line breaks in the value, and wraps each resulting line to the + max_line_length. Non-ASCII binary data are CTE encoded using the + unknown-8bit charset. + + 'u'+ + Headers are folded using the Header folding algorithm, which preserves + existing line breaks in the value, and wraps each resulting line to the + max_line_length. Non-ASCII binary data are CTE encoded using the + unknown-8bit charset. + + 'b'+ + Headers are folded using the Header folding algorithm, which preserves + existing line breaks in the value, and wraps each resulting line to the + max_line_length. If cte_type is 7bit, non-ascii binary data is CTE + encoded using the unknown-8bit charset. Otherwise the original source + header is used, with its existing line breaks and/or binary data. + + 'u'+ + Headers are folded using the Header folding algorithm, which preserves + existing line breaks in the value, and wraps each resulting line to the + max_line_length. If cte_type is 7bit, non-ascii binary data is CTE + encoded using the unknown-8bit charset. Otherwise the original source + header is used, with its existing line breaks and/or binary data. + + 'b'7bit'u'7bit'b'%s: 'u'%s: 'u'email._policybase'u'_policybase'u'POSIX shared memory module'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_posixshmem.cpython-38-darwin.so'u'_posixshmem'shm_openshm_unlink_posixshmemu'A POSIX helper for the subprocess module.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_posixsubprocess.cpython-38-darwin.so'u'_posixsubprocess'fork_exec_posixsubprocess_weakrefsetReturns the current ABC cache token. + + The token is an opaque object (supporting equality testing) identifying the + current version of the ABC cache for virtual subclasses. The token changes + with every call to ``register()`` on any ABC. + _abc_invalidation_counterMetaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + mclsbasesabstracts_abc_registry_abc_cache_abc_negative_cache_abc_negative_cache_versionsubclassRegister a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + Can only register classesRefusing to create an inheritance cycle_dump_registryDebug helper to print the ABC registry.Class: Inv. counter: _abc__abc_registry_clearClear the registry (for debugging or testing)._abc_caches_clearClear the caches (for debugging or testing).Override for isinstance(instance, cls).subtypeOverride for issubclass(subclass, cls).issubclass() arg 1 must be a classrclsscls# A global counter that is incremented each time a class is# registered as a virtual subclass of anything. It forces the# negative cache to be cleared before its next use.# Note: this counter is private. Use `abc.get_cache_token()` for# external code.# Compute set of abstract method names# Set up inheritance registry# Already a subclass# Subtle: test for cycles *after* testing for "already a subclass";# this means we allow X.register(X) and interpret it as a no-op.# This would create a cycle, which is bad for the algorithm below# Invalidate negative cache# Inline the cache checking# Fall back to the subclass check.# Check cache# Check negative cache; may have to invalidate# Invalidate the negative cache# Check the subclass hook# Check if it's a direct subclass# Check if it's a subclass of a registered class (recursive)# Check if it's a subclass of a subclass (recursive)# No dice; update negative cacheb'Returns the current ABC cache token. + + The token is an opaque object (supporting equality testing) identifying the + current version of the ABC cache for virtual subclasses. The token changes + with every call to ``register()`` on any ABC. + 'u'Returns the current ABC cache token. + + The token is an opaque object (supporting equality testing) identifying the + current version of the ABC cache for virtual subclasses. The token changes + with every call to ``register()`` on any ABC. + 'b'Metaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + 'u'Metaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + 'b'__isabstractmethod__'u'__isabstractmethod__'b'__abstractmethods__'u'__abstractmethods__'b'Register a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + 'u'Register a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + 'b'Can only register classes'u'Can only register classes'b'Refusing to create an inheritance cycle'u'Refusing to create an inheritance cycle'b'Debug helper to print the ABC registry.'u'Debug helper to print the ABC registry.'b'Class: 'u'Class: 'b'Inv. counter: 'u'Inv. counter: 'b'_abc_'u'_abc_'b'Clear the registry (for debugging or testing).'u'Clear the registry (for debugging or testing).'b'Clear the caches (for debugging or testing).'u'Clear the caches (for debugging or testing).'b'Override for isinstance(instance, cls).'u'Override for isinstance(instance, cls).'b'Override for issubclass(subclass, cls).'u'Override for issubclass(subclass, cls).'b'issubclass() arg 1 must be a class'u'issubclass() arg 1 must be a class'b'__mro__'u'__mro__'u'_py_abc' +This is an implementation of decimal floating point arithmetic based on +the General Decimal Arithmetic Specification: + + http://speleotrove.com/decimal/decarith.html + +and IEEE standard 854-1987: + + http://en.wikipedia.org/wiki/IEEE_854-1987 + +Decimal floating point has finite precision with arbitrarily large bounds. + +The purpose of this module is to support arithmetic using familiar +"schoolhouse" rules and to avoid some of the tricky representation +issues associated with binary floating point. The package is especially +useful for financial applications or for contexts where users have +expectations that are at odds with binary floating point (for instance, +in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead +of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected +Decimal('0.00')). + +Here are some examples of using the decimal module: + +>>> from decimal import * +>>> setcontext(ExtendedContext) +>>> Decimal(0) +Decimal('0') +>>> Decimal('1') +Decimal('1') +>>> Decimal('-.0123') +Decimal('-0.0123') +>>> Decimal(123456) +Decimal('123456') +>>> Decimal('123.45e12345678') +Decimal('1.2345E+12345680') +>>> Decimal('1.33') + Decimal('1.27') +Decimal('2.60') +>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41') +Decimal('-2.20') +>>> dig = Decimal(1) +>>> print(dig / Decimal(3)) +0.333333333 +>>> getcontext().prec = 18 +>>> print(dig / Decimal(3)) +0.333333333333333333 +>>> print(dig.sqrt()) +1 +>>> print(Decimal(3).sqrt()) +1.73205080756887729 +>>> print(Decimal(3) ** 123) +4.85192780976896427E+58 +>>> inf = Decimal(1) / Decimal(0) +>>> print(inf) +Infinity +>>> neginf = Decimal(-1) / Decimal(0) +>>> print(neginf) +-Infinity +>>> print(neginf + inf) +NaN +>>> print(neginf * inf) +-Infinity +>>> print(dig / 0) +Infinity +>>> getcontext().traps[DivisionByZero] = 1 +>>> print(dig / 0) +Traceback (most recent call last): + ... + ... + ... +decimal.DivisionByZero: x / 0 +>>> c = Context() +>>> c.traps[InvalidOperation] = 0 +>>> print(c.flags[InvalidOperation]) +0 +>>> c.divide(Decimal(0), Decimal(0)) +Decimal('NaN') +>>> c.traps[InvalidOperation] = 1 +>>> print(c.flags[InvalidOperation]) +1 +>>> c.flags[InvalidOperation] = 0 +>>> print(c.flags[InvalidOperation]) +0 +>>> print(c.divide(Decimal(0), Decimal(0))) +Traceback (most recent call last): + ... + ... + ... +decimal.InvalidOperation: 0 / 0 +>>> print(c.flags[InvalidOperation]) +1 +>>> c.flags[InvalidOperation] = 0 +>>> c.traps[InvalidOperation] = 0 +>>> print(c.divide(Decimal(0), Decimal(0))) +NaN +>>> print(c.flags[InvalidOperation]) +1 +>>> +__xname__1.702.4.2math_mathnumbers_numbers_namedtuplesign digits exponent425000000Base exception class. + + Used exceptions derive from this. + If an exception derives from another exception besides this (such as + Underflow (Inexact, Rounded, Subnormal) that indicates that it is only + called if the others are present. This isn't actually used for + anything, though. + + handle -- Called when context._raise_error is called and the + trap_enabler is not set. First argument is self, second is the + context. More arguments can be given, those being after + the explanation in _raise_error (For example, + context._raise_error(NewError, '(-x)!', self._sign) would + call NewError().handle(context, self._sign).) + + To define a new exception, it should be sufficient to have it derive + from DecimalException. + Exponent of a 0 changed to fit bounds. + + This occurs and signals clamped if the exponent of a result has been + altered in order to fit the constraints of a specific concrete + representation. This may occur when the exponent of a zero result would + be outside the bounds of a representation, or when a large normal + number would have an encoded exponent that cannot be represented. In + this latter case, the exponent is reduced to fit and the corresponding + number of zero digits are appended to the coefficient ("fold-down"). + An invalid operation was performed. + + Various bad things cause this: + + Something creates a signaling NaN + -INF + INF + 0 * (+-)INF + (+-)INF / (+-)INF + x % 0 + (+-)INF % x + x._rescale( non-integer ) + sqrt(-x) , x > 0 + 0 ** 0 + x ** (non-integer) + x ** (+-)INF + An operand is invalid + + The result of the operation after these is a quiet positive NaN, + except when the cause is a signaling NaN, in which case the result is + also a quiet NaN, but with the original sign, and an optional + diagnostic information. + _dec_from_triple_sign_intans_fix_nan_NaNTrying to convert badly formed string. + + This occurs and signals invalid-operation if a string is being + converted to a number and it does not conform to the numeric string + syntax. The result is [0,qNaN]. + Division by 0. + + This occurs and signals division-by-zero if division of a finite number + by zero was attempted (during a divide-integer or divide operation, or a + power operation with negative right-hand operand), and the dividend was + not zero. + + The result of the operation is [sign,inf], where sign is the exclusive + or of the signs of the operands for divide, or is 1 for an odd power of + -0, for power. + _SignedInfinityCannot perform the division adequately. + + This occurs and signals invalid-operation if the integer result of a + divide-integer or remainder operation had too many digits (would be + longer than precision). The result is [0,qNaN]. + Undefined result of division. + + This occurs and signals invalid-operation if division by zero was + attempted (during a divide-integer, divide, or remainder operation), and + the dividend is also zero. The result is [0,qNaN]. + Had to round, losing information. + + This occurs and signals inexact whenever the result of an operation is + not exact (that is, it needed to be rounded and any discarded digits + were non-zero), or if an overflow or underflow condition occurs. The + result in all cases is unchanged. + + The inexact signal may be tested (or trapped) to determine if a given + operation (or sequence of operations) was inexact. + Invalid context. Unknown rounding, for example. + + This occurs and signals invalid-operation if an invalid context was + detected during an operation. This can occur if contexts are not checked + on creation and either the precision exceeds the capability of the + underlying concrete representation or an unknown or unsupported rounding + was specified. These aspects of the context need only be checked when + the values are required to be used. The result is [0,qNaN]. + Number got rounded (not necessarily changed during rounding). + + This occurs and signals rounded whenever the result of an operation is + rounded (that is, some zero or non-zero digits were discarded from the + coefficient), or if an overflow or underflow condition occurs. The + result in all cases is unchanged. + + The rounded signal may be tested (or trapped) to determine if a given + operation (or sequence of operations) caused a loss of precision. + Exponent < Emin before rounding. + + This occurs and signals subnormal whenever the result of a conversion or + operation is subnormal (that is, its adjusted exponent is less than + Emin, before any rounding). The result in all cases is unchanged. + + The subnormal signal may be tested (or trapped) to determine if a given + or operation (or sequence of operations) yielded a subnormal result. + Numerical overflow. + + This occurs and signals overflow if the adjusted exponent of a result + (from a conversion or from an operation that is not an attempt to divide + by zero), after rounding, would be greater than the largest value that + can be handled by the implementation (the value Emax). + + The result depends on the rounding mode: + + For round-half-up and round-half-even (and for round-half-down and + round-up, if implemented), the result of the operation is [sign,inf], + where sign is the sign of the intermediate result. For round-down, the + result is the largest finite number that can be represented in the + current precision, with the sign of the intermediate result. For + round-ceiling, the result is the same as for round-down if the sign of + the intermediate result is 1, or is [0,inf] otherwise. For round-floor, + the result is the same as for round-down if the sign of the intermediate + result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded + will also be raised. + Numerical underflow with result rounded to 0. + + This occurs and signals underflow if a result is inexact and the + adjusted exponent of the result would be smaller (more negative) than + the smallest value that can be handled by the implementation (the value + Emin). That is, the result is both inexact and subnormal. + + The result after an underflow will be a subnormal number rounded, if + necessary, so that its exponent is not less than Etiny. This may result + in 0 with the sign of the intermediate result and an exponent of Etiny. + + In all cases, Inexact, Rounded, and Subnormal will also be raised. + Enable stricter semantics for mixing floats and Decimals. + + If the signal is not trapped (default), mixing floats and Decimals is + permitted in the Decimal() constructor, context.create_decimal() and + all comparison operators. Both conversion and comparisons are exact. + Any occurrence of a mixed operation is silently recorded by setting + FloatOperation in the context flags. Explicit conversions with + Decimal.from_float() or context.create_decimal_from_float() do not + set the flag. + + Otherwise (the signal is trapped), only equality comparisons and explicit + conversions are silent. All other mixed operations raise FloatOperation. + _signals_condition_map_rounding_modescontextvarsdecimal_context_current_context_varReturns this thread's context. + + If this thread does not yet have a context, returns + a new context and sets this thread's context. + New contexts are copies of DefaultContext. + Set this thread's context to context.Return a context manager for a copy of the supplied context + + Uses a copy of the current context if no context is specified + The returned context manager creates a local decimal context + in a with statement: + def sin(x): + with localcontext() as ctx: + ctx.prec += 2 + # Rest of sin calculation algorithm + # uses a precision 2 greater than normal + return +s # Convert result to normal precision + + def sin(x): + with localcontext(ExtendedContext): + # Rest of sin calculation algorithm + # uses the Extended Context from the + # General Decimal Arithmetic Specification + return +s # Convert result to normal context + + >>> setcontext(DefaultContext) + >>> print(getcontext().prec) + 28 + >>> with localcontext(): + ... ctx = getcontext() + ... ctx.prec += 2 + ... print(ctx.prec) + ... + 30 + >>> with localcontext(ExtendedContext): + ... print(getcontext().prec) + ... + 9 + >>> print(getcontext().prec) + 28 + _ContextManagerFloating point class for decimal arithmetic._exp_is_specialCreate a decimal point instance. + + >>> Decimal('3.14') # string input + Decimal('3.14') + >>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent) + Decimal('3.14') + >>> Decimal(314) # int + Decimal('314') + >>> Decimal(Decimal(314)) # another decimal instance + Decimal('314') + >>> Decimal(' 3.14 \n') # leading and trailing whitespace okay + Decimal('3.14') + _raise_errorInvalid literal for Decimal: %rintpartfracfracpartdiagF_WorkRepInvalid tuple size in creation of Decimal from list or tuple. The list or tuple should have exactly three elements.'Invalid tuple size in creation of Decimal ''from list or tuple. The list or tuple ''should have exactly three elements.'Invalid sign. The first value in the tuple should be an integer; either 0 for a positive number or 1 for a negative number."Invalid sign. The first value in the tuple ""should be an integer; either 0 for a ""positive number or 1 for a negative number."digitThe second value in the tuple must be composed of integers in the range 0 through 9."The second value in the tuple must ""be composed of integers in the range ""0 through 9."The third value in the tuple must be an integer, or one of the strings 'F', 'n', 'N'."The third value in the tuple must ""be an integer, or one of the ""strings 'F', 'n', 'N'."strict semantics for mixing floats and Decimals are enabled"strict semantics for mixing floats and Decimals are ""enabled"Cannot convert %r to DecimalConverts a float to a decimal number, exactly. + + Note that Decimal.from_float(0.1) is not the same as Decimal('0.1'). + Since 0.1 is not exactly representable in binary floating point, the + value is stored as the nearest representable value which is + 0x1.999999999999ap-4. The exact equivalent of the value in decimal + is 0.1000000000000000055511151231257827021181583404541015625. + + >>> Decimal.from_float(0.1) + Decimal('0.1000000000000000055511151231257827021181583404541015625') + >>> Decimal.from_float(float('nan')) + Decimal('NaN') + >>> Decimal.from_float(float('inf')) + Decimal('Infinity') + >>> Decimal.from_float(-float('inf')) + Decimal('-Infinity') + >>> Decimal.from_float(-0.0) + Decimal('-0') + + coeffisinfisnancopysignargument must be int or float._isnanReturns whether the number is not actually one. + + 0 if a number + 1 if NaN + 2 if sNaN + _isinfinityReturns whether the number is infinite + + 0 if finite or not a number + 1 if +INF + -1 if -INF + _check_nansReturns whether the number is not actually one. + + if self, other are sNaN, signal + if self, other are NaN return nan + return 0 + + Done before operations. + self_is_nanother_is_nansNaN_compare_check_nansVersion of _check_nans used for the signaling comparisons + compare_signal, __le__, __lt__, __ge__, __gt__. + + Signal InvalidOperation if either self or other is a (quiet + or signaling) NaN. Signaling NaNs take precedence over quiet + NaNs. + + Return 0 if neither operand is a NaN. + + comparison involving sNaNcomparison involving NaNReturn True if self is nonzero; otherwise return False. + + NaNs and infinities are considered nonzero. + _cmpCompare the two non-NaN decimal instances self and other. + + Returns -1 if self < other, 0 if self == other and 1 + if self > other. This routine is for internal use only.self_infother_infself_adjustedother_adjustedself_paddedother_padded_convert_for_comparisonequality_opCompare self to other. Return a decimal value: + + a or b is a NaN ==> Decimal('NaN') + a < b ==> Decimal('-1') + a == b ==> Decimal('0') + a > b ==> Decimal('1') + _convert_otherraiseitx.__hash__() <==> hash(x)Cannot hash a signaling NaN value._PyHASH_NAN_PyHASH_INF_PyHASH_MODULUSexp_hash_PyHASH_10INVhash_Represents the number as a triple tuple. + + To show the internals exactly as they are. + Express a finite Decimal instance in the form n / d. + + Returns a pair (n, d) of integers. When called on an infinity + or NaN, raises OverflowError or ValueError respectively. + + >>> Decimal('3.14').as_integer_ratio() + (157, 50) + >>> Decimal('-123e5').as_integer_ratio() + (-12300000, 1) + >>> Decimal('0.00').as_integer_ratio() + (0, 1) + + cannot convert NaN to integer ratiocannot convert Infinity to integer ratiod5d2shift2Represents the number as an instance of Decimal.Decimal('%s')engReturn string representation of the number in scientific notation. + + Captures all of the information in the underlying representation. + InfinityNaNleftdigitsdotplace%+dConvert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. + Returns a copy with the sign switched. + + Rounds, if it has reason. + _fixReturns a copy, unless it is a sNaN. + + Rounds the number (if more than precision digits) + Returns the absolute value of self. + + If the keyword argument 'round' is false, do not round. The + expression self.__abs__(round=False) is equivalent to + self.copy_abs(). + Returns self + other. + + -INF + INF (or the reverse) cause InvalidOperation errors. + -INF + INFnegativezero_rescaleop1op2_normalizeReturn self - otherReturn other - selfReturn self * other. + + (+-) INF * 0 (or its reverse) raise InvalidOperation. + resultsign(+-)INF * 00 * (+-)INFresultexpReturn self / other.(+-)INF/(+-)INFDivision by infinity0 / 0x / 0ideal_exp_divideReturn (self // other, self % other), to context.prec precision. + + Assumes that neither self nor other is a NaN, that self is not + infinite and that other is nonzero. + expdiffquotient too large in //, % or divmodSwaps self/other and returns __truediv__. + Return (self // other, self % other) + divmod(INF, INF)INF % xdivmod(0, 0)x // 0x % 0quotientSwaps self/other and returns __divmod__. + self % other + 0 % 0Swaps self/other and returns __mod__. + Remainder nearest to 0- abs(remainder-near) <= other/2 + remainder_near(infinity, x)remainder_near(x, 0)remainder_near(0, 0)ideal_exponentself // otherINF // INF0 // 0Swaps self/other and returns __floordiv__.Float representation.Cannot convert signaling NaN to float-nanConverts self to an int, truncating if necessary.Cannot convert NaN to integerCannot convert infinity to integerDecapitate the payload of a NaN to fit the contextpayloadmax_payload_lenRound if it is necessary to keep self within prec precision. + + Rounds and fixes the exponent. Does not raise on a sNaN. + + Arguments: + self - Decimal instance + context - context used. + exp_maxnew_expexp_minabove Emaxself_is_subnormal_pick_rounding_functionrounding_methodchanged_round_downAlso known as round-towards-0, truncate._all_zeros_round_upRounds away from 0._round_half_upRounds 5 up (away from 0)56789_round_half_downRound 5 down_exact_half_round_half_evenRound 5 to even, rest to nearest.02468_round_ceilingRounds up (not away from 0 if negative.)_round_floorRounds down (not towards 0 if negative)_round_05upRound down unless digit prec-1 is 0 or 5.05Round self to the nearest integer, or to a given precision. + + If only one argument is supplied, round a finite Decimal + instance self to the nearest integer. If self is infinite or + a NaN then a Python exception is raised. If self is finite + and lies exactly halfway between two integers then it is + rounded to the integer with even last digit. + + >>> round(Decimal('123.456')) + 123 + >>> round(Decimal('-456.789')) + -457 + >>> round(Decimal('-3.0')) + -3 + >>> round(Decimal('2.5')) + 2 + >>> round(Decimal('3.5')) + 4 + >>> round(Decimal('Inf')) + Traceback (most recent call last): + ... + OverflowError: cannot round an infinity + >>> round(Decimal('NaN')) + Traceback (most recent call last): + ... + ValueError: cannot round a NaN + + If a second argument n is supplied, self is rounded to n + decimal places using the rounding mode for the current + context. + + For an integer n, round(self, -n) is exactly equivalent to + self.quantize(Decimal('1En')). + + >>> round(Decimal('123.456'), 0) + Decimal('123') + >>> round(Decimal('123.456'), 2) + Decimal('123.46') + >>> round(Decimal('123.456'), -2) + Decimal('1E+2') + >>> round(Decimal('-Infinity'), 37) + Decimal('NaN') + >>> round(Decimal('sNaN123'), 0) + Decimal('NaN123') + + Second argument to round should be integralcannot round a NaNcannot round an infinityReturn the floor of self, as an integer. + + For a finite Decimal instance self, return the greatest + integer n such that n <= self. If self is infinite or a NaN + then a Python exception is raised. + + Return the ceiling of self, as an integer. + + For a finite Decimal instance self, return the least integer n + such that n >= self. If self is infinite or a NaN then a + Python exception is raised. + + thirdFused multiply-add. + + Returns self*other+third with no rounding of the intermediate + product self*other. + + self and other are multiplied together, with no rounding of + the result. The third operand is then added to the result, + and a single final rounding is performed. + productINF * 0 in fma0 * INF in fma_power_modulomoduloThree argument version of __pow__modulo_is_nan_isintegerpow() 3rd argument not allowed unless all arguments are integers'pow() 3rd argument not allowed ''unless all arguments are integers'pow() 2nd argument cannot be negative when 3rd argument specified'pow() 2nd argument cannot be ''negative when 3rd argument specified'pow() 3rd argument cannot be 0insufficient precision: pow() 3rd argument must not have more than precision digits'insufficient precision: pow() 3rd ''argument must not have more than ''precision digits'at least one of pow() 1st argument and 2nd argument must be nonzero; 0**0 is not defined'at least one of pow() 1st argument ''and 2nd argument must be nonzero; ''0**0 is not defined'_iseven_power_exactAttempt to compute self**other exactly. + + Given Decimals self and other and an integer p, attempt to + compute an exact result for the power self**other, with p + digits of precision. Return None if self**other is not + exactly representable in p digits. + + Assumes that elimination of special cases has already been + performed: self and other must both be nonspecial; self must + be positive and not numerically equal to 1; other must be + nonzero. For efficiency, other._exp should not be too large, + so that 10**abs(other._exp) is a feasible calculation.xcxeycyezeroslast_digit_nbits9365emax_decimal_lshift_exactxc_bitsrem_log10_lbstr_xcReturn self ** other [ % modulo]. + + With two arguments, compute self**other. + + With three arguments, compute (self**other) % modulo. For the + three argument form, the following restrictions on the + arguments hold: + + - all three arguments must be integral + - other must be nonnegative + - either self or other (or both) must be nonzero + - modulo must be nonzero and must have at most p digits, + where p is the context precision. + + If any of these restrictions is violated the InvalidOperation + flag is raised. + + The result of pow(self, other, modulo) is identical to the + result that would be obtained by computing (self**other) % + modulo with unbounded precision, but is computed more + efficiently. It is always exact. + 0 ** 0_Oneresult_signx ** y with x negative and y not an integermultiplierself_adj_log10_exp_boundbound_dpowernewcontexttrapsSwaps self/other and returns __pow__.Normalize- strip trailing 0s, change anything equal to 0 to 0e0Quantize self so its exponent is the same as that of exp. + + Similar to self._rescale(exp._exp) but with error checking. + quantize with one INFtarget exponent out of bounds in quantizeexponent of quantize result too large for current contextquantize result has too many digits for current contextReturn True if self and other have the same exponent; otherwise + return False. + + If either operand is a special value, the following rules are used: + * return True if both operands are infinities + * return True if both operands are NaNs + * otherwise, return False. + Rescale self so that the exponent is exp, either by padding with zeros + or by truncating digits, using the given rounding mode. + + Specials are returned without change. This operation is + quiet: it raises no flags, and uses no information from the + context. + + exp = exp to scale to (an integer) + rounding = rounding mode + this_function_roundplacesRound a nonzero, nonspecial Decimal to a fixed number of + significant figures, using the given rounding mode. + + Infinities, NaNs and zeros are returned unaltered. + + This operation is quiet: it raises no flags, and uses no + information from the context. + + argument should be at least 1 in _roundRounds to a nearby integer. + + If no rounding mode is specified, take the rounding mode from + the context. This method raises the Rounded and Inexact flags + when appropriate. + + See also: to_integral_value, which does exactly the same as + this method except that it doesn't raise Inexact or Rounded. + Rounds to the nearest integer, without raising inexact, rounded.Return the square root of self.sqrt(-x), x > 0_shallow_copy_set_roundingReturns the larger value. + + Like max(self, other) except if one is not a number, returns + NaN (and signals if one is sNaN). Also rounds. + snonReturns the smaller value. + + Like min(self, other) except if one is not a number, returns + NaN (and signals if one is sNaN). Also rounds. + Returns whether self is an integerReturns True if self is even. Assumes self is an integer.Return the adjusted exponent of selfReturns the same Decimal object. + + As we do not have different encodings for the same number, the + received object already is in its canonical form. + Compares self to the other operand numerically. + + It's pretty much like compare(), but all NaNs signal, with signaling + NaNs taking precedence over quiet NaNs. + Compares self to other using the abstract representations. + + This is not like the standard compare, which use their numerical + value. Note that a total ordering is defined for all possible abstract + representations. + _NegativeOneself_nanother_nanself_keyother_key_ZeroCompares self to other using abstract repr., ignoring sign. + + Like compare_total, but with operand's sign ignored and assumed to be 0. + Returns a copy with the sign set to 0. Returns a copy with the sign inverted.Returns self with the sign of other.Returns e ** self.adj_dexpReturn True if self is canonical; otherwise return False. + + Currently, the encoding of a Decimal instance is always + canonical, so this method returns True for any Decimal. + Return True if self is finite; otherwise return False. + + A Decimal instance is considered finite if it is neither + infinite nor a NaN. + Return True if self is infinite; otherwise return False.Return True if self is a qNaN or sNaN; otherwise return False.Return True if self is a normal number; otherwise return False.Return True if self is a quiet NaN; otherwise return False.Return True if self is negative; otherwise return False.Return True if self is a signaling NaN; otherwise return False.Return True if self is subnormal; otherwise return False.Return True if self is a zero; otherwise return False._ln_exp_boundCompute a lower bound for the adjusted exponent of self.ln(). + In other words, compute r such that self.ln() >= 10**r. Assumes + that self is finite and positive and that self != 1. + denReturns the natural (base e) logarithm of self._NegativeInfinity_Infinityln of a negative value_dlogCompute a lower bound for the adjusted exponent of self.log10(). + In other words, find r such that self.log10() >= 10**r. + Assumes that self is finite and positive and that self != 1. + 231Returns the base 10 logarithm of self.log10 of a negative value_dlog10 Returns the exponent of the magnitude of self's MSD. + + The result is the integer which is the exponent of the magnitude + of the most significant digit of self (as though it were truncated + to a single digit while maintaining the value of that digit and + without limiting the resulting exponent). + logb(0)_islogicalReturn True if self is a logical operand. + + For being logical, it must be a finite number with a sign of 0, + an exponent of 0, and a coefficient whose digits must all be + either 0 or 1. + 01_fill_logicalopaopbdifApplies an 'and' operation between self and other's digits.Invert all its digits.Applies an 'or' operation between self and other's digits.Applies an 'xor' operation between self and other's digits.Compares the values numerically with their sign ignored.Returns the largest representable number smaller than itself._ignore_all_flagsnew_selfReturns the smallest representable number larger than itself.Returns the number closest to self, in the direction towards other. + + The result is the closest representable number to self + (excluding self) that is in the direction towards other, + unless both have the same value. If the two operands are + numerically equal, then the result is a copy of self with the + sign set to be the same as the sign of other. + comparisonInfinite result from next_towardReturns an indication of the class of self. + + The class is one of the following strings: + sNaN + NaN + -Infinity + -Normal + -Subnormal + -Zero + +Zero + +Subnormal + +Normal + +Infinity + +Infinity-Infinity-Zero+Zero-Subnormal+Subnormal-Normal+NormalJust returns 10, as this is Decimal, :)Returns a rotated copy of self, value-of-other times.torotrotdigtopadrotatedReturns self operand after adding the second value to its exp.liminflimsupReturns a shifted copy of self, value-of-other times.shiftedspecifier_localeconvFormat a Decimal instance according to the given specifier. + + The specifier should be a standard format specifier, with the + form described in PEP 3101. Formatting types 'e', 'E', 'f', + 'F', 'g', 'G', 'n' and '%' are supported. If the formatting + type is omitted it defaults to 'g' or 'G', depending on the + value of context.capitals. + _parse_format_specifier_format_signbody_format_alignGprecisioneEfF%gG_format_numbercoefficientspecialCreate a decimal instance directly, without any validation, + normalization (e.g. removal of leading zeros) or argument + conversion. + + This function is for *internal use only*. + NumberContext manager class to support localcontext(). + + Sets a copy of the supplied context in __enter__() and restores + the previous decimal context in __exit__() + new_contextsaved_contextContains the context for a Decimal instance. + + Contains: + prec - precision (for use in rounding, division, square roots..) + rounding - rounding type (how you round) + traps - If traps[exception] = 1, then the exception is + raised when it is caused. Otherwise, a value is + substituted in. + flags - When an exception is caused, flags[exception] is set. + (Whether or not the trap_enabler is set) + Should be reset by user of Decimal instance. + Emin - Minimum exponent + Emax - Maximum exponent + capitals - If 1, 1*10^1 is printed as 1E+1. + If 0, printed as 1e1 + clamp - If 1, change exponents if too high (Default 0) + _ignored_flags_set_integer_checkvminvmax%s must be an integer-inf%s must be in [%s, %d]. got: %s%s must be in [%d, %s]. got: %s%s must be in [%d, %d]. got %s_set_signal_dict%s must be a signal dict%s is not a valid signal dict%s: invalid rounding mode'decimal.Context' object has no attribute '%s'%s cannot be deletedsigShow the current context.Context(prec=%(prec)d, rounding=%(rounding)s, Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, clamp=%(clamp)d'Context(prec=%(prec)d, rounding=%(rounding)s, ''Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, ''clamp=%(clamp)d'flags=[traps=[Reset all flags to zeroReset all traps to zeroReturns a shallow copy from self.ncReturns a deep copy from self.explanationHandles an error + + If the flag is in _ignored_flags, returns the default response. + Otherwise, it sets the flag, then, if the corresponding + trap_enabler is set, it reraises the exception. Otherwise, it returns + the default value after setting the flag. + Ignore all flags, if they are raised_ignore_flagsIgnore the flags, if they are raised_regard_flagsStop ignoring the flags, if they are raisedReturns Etiny (= Emin - prec + 1)Returns maximum exponent (= Emax - prec + 1)Sets the rounding type. + + Sets the rounding type, and returns the current (previous) + rounding type. Often used like: + + context = context.copy() + # so you don't change the calling context + # if an error occurs in the middle. + rounding = context._set_rounding(ROUND_UP) + val = self.__sub__(other, context=context) + context._set_rounding(rounding) + + This will make it round up for that operation. + Creates a new Decimal instance but using self as context. + + This method implements the to-number operation of the + IBM Decimal specification.trailing or leading whitespace and underscores are not permitted."trailing or leading whitespace and ""underscores are not permitted."diagnostic info too long in NaNCreates a new Decimal instance from a float but rounding using self + as the context. + + >>> context = Context(prec=5, rounding=ROUND_DOWN) + >>> context.create_decimal_from_float(3.1415926535897932) + Decimal('3.1415') + >>> context = Context(prec=5, traps=[Inexact]) + >>> context.create_decimal_from_float(3.1415926535897932) + Traceback (most recent call last): + ... + decimal.Inexact: None + + Returns the absolute value of the operand. + + If the operand is negative, the result is the same as using the minus + operation on the operand. Otherwise, the result is the same as using + the plus operation on the operand. + + >>> ExtendedContext.abs(Decimal('2.1')) + Decimal('2.1') + >>> ExtendedContext.abs(Decimal('-100')) + Decimal('100') + >>> ExtendedContext.abs(Decimal('101.5')) + Decimal('101.5') + >>> ExtendedContext.abs(Decimal('-101.5')) + Decimal('101.5') + >>> ExtendedContext.abs(-1) + Decimal('1') + Return the sum of the two operands. + + >>> ExtendedContext.add(Decimal('12'), Decimal('7.00')) + Decimal('19.00') + >>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4')) + Decimal('1.02E+4') + >>> ExtendedContext.add(1, Decimal(2)) + Decimal('3') + >>> ExtendedContext.add(Decimal(8), 5) + Decimal('13') + >>> ExtendedContext.add(5, 5) + Decimal('10') + Unable to convert %s to DecimalReturns the same Decimal object. + + As we do not have different encodings for the same number, the + received object already is in its canonical form. + + >>> ExtendedContext.canonical(Decimal('2.50')) + Decimal('2.50') + canonical requires a Decimal as an argument.Compares values numerically. + + If the signs of the operands differ, a value representing each operand + ('-1' if the operand is less than zero, '0' if the operand is zero or + negative zero, or '1' if the operand is greater than zero) is used in + place of that operand for the comparison instead of the actual + operand. + + The comparison is then effected by subtracting the second operand from + the first and then returning a value according to the result of the + subtraction: '-1' if the result is less than zero, '0' if the result is + zero or negative zero, or '1' if the result is greater than zero. + + >>> ExtendedContext.compare(Decimal('2.1'), Decimal('3')) + Decimal('-1') + >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1')) + Decimal('0') + >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10')) + Decimal('0') + >>> ExtendedContext.compare(Decimal('3'), Decimal('2.1')) + Decimal('1') + >>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3')) + Decimal('1') + >>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1')) + Decimal('-1') + >>> ExtendedContext.compare(1, 2) + Decimal('-1') + >>> ExtendedContext.compare(Decimal(1), 2) + Decimal('-1') + >>> ExtendedContext.compare(1, Decimal(2)) + Decimal('-1') + Compares the values of the two operands numerically. + + It's pretty much like compare(), but all NaNs signal, with signaling + NaNs taking precedence over quiet NaNs. + + >>> c = ExtendedContext + >>> c.compare_signal(Decimal('2.1'), Decimal('3')) + Decimal('-1') + >>> c.compare_signal(Decimal('2.1'), Decimal('2.1')) + Decimal('0') + >>> c.flags[InvalidOperation] = 0 + >>> print(c.flags[InvalidOperation]) + 0 + >>> c.compare_signal(Decimal('NaN'), Decimal('2.1')) + Decimal('NaN') + >>> print(c.flags[InvalidOperation]) + 1 + >>> c.flags[InvalidOperation] = 0 + >>> print(c.flags[InvalidOperation]) + 0 + >>> c.compare_signal(Decimal('sNaN'), Decimal('2.1')) + Decimal('NaN') + >>> print(c.flags[InvalidOperation]) + 1 + >>> c.compare_signal(-1, 2) + Decimal('-1') + >>> c.compare_signal(Decimal(-1), 2) + Decimal('-1') + >>> c.compare_signal(-1, Decimal(2)) + Decimal('-1') + Compares two operands using their abstract representation. + + This is not like the standard compare, which use their numerical + value. Note that a total ordering is defined for all possible abstract + representations. + + >>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9')) + Decimal('-1') + >>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12')) + Decimal('-1') + >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3')) + Decimal('-1') + >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30')) + Decimal('0') + >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300')) + Decimal('1') + >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN')) + Decimal('-1') + >>> ExtendedContext.compare_total(1, 2) + Decimal('-1') + >>> ExtendedContext.compare_total(Decimal(1), 2) + Decimal('-1') + >>> ExtendedContext.compare_total(1, Decimal(2)) + Decimal('-1') + Compares two operands using their abstract representation ignoring sign. + + Like compare_total, but with operand's sign ignored and assumed to be 0. + Returns a copy of the operand with the sign set to 0. + + >>> ExtendedContext.copy_abs(Decimal('2.1')) + Decimal('2.1') + >>> ExtendedContext.copy_abs(Decimal('-100')) + Decimal('100') + >>> ExtendedContext.copy_abs(-1) + Decimal('1') + Returns a copy of the decimal object. + + >>> ExtendedContext.copy_decimal(Decimal('2.1')) + Decimal('2.1') + >>> ExtendedContext.copy_decimal(Decimal('-1.00')) + Decimal('-1.00') + >>> ExtendedContext.copy_decimal(1) + Decimal('1') + Returns a copy of the operand with the sign inverted. + + >>> ExtendedContext.copy_negate(Decimal('101.5')) + Decimal('-101.5') + >>> ExtendedContext.copy_negate(Decimal('-101.5')) + Decimal('101.5') + >>> ExtendedContext.copy_negate(1) + Decimal('-1') + Copies the second operand's sign to the first one. + + In detail, it returns a copy of the first operand with the sign + equal to the sign of the second operand. + + >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33')) + Decimal('1.50') + >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33')) + Decimal('1.50') + >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33')) + Decimal('-1.50') + >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33')) + Decimal('-1.50') + >>> ExtendedContext.copy_sign(1, -2) + Decimal('-1') + >>> ExtendedContext.copy_sign(Decimal(1), -2) + Decimal('-1') + >>> ExtendedContext.copy_sign(1, Decimal(-2)) + Decimal('-1') + Decimal division in a specified context. + + >>> ExtendedContext.divide(Decimal('1'), Decimal('3')) + Decimal('0.333333333') + >>> ExtendedContext.divide(Decimal('2'), Decimal('3')) + Decimal('0.666666667') + >>> ExtendedContext.divide(Decimal('5'), Decimal('2')) + Decimal('2.5') + >>> ExtendedContext.divide(Decimal('1'), Decimal('10')) + Decimal('0.1') + >>> ExtendedContext.divide(Decimal('12'), Decimal('12')) + Decimal('1') + >>> ExtendedContext.divide(Decimal('8.00'), Decimal('2')) + Decimal('4.00') + >>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0')) + Decimal('1.20') + >>> ExtendedContext.divide(Decimal('1000'), Decimal('100')) + Decimal('10') + >>> ExtendedContext.divide(Decimal('1000'), Decimal('1')) + Decimal('1000') + >>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2')) + Decimal('1.20E+6') + >>> ExtendedContext.divide(5, 5) + Decimal('1') + >>> ExtendedContext.divide(Decimal(5), 5) + Decimal('1') + >>> ExtendedContext.divide(5, Decimal(5)) + Decimal('1') + Divides two numbers and returns the integer part of the result. + + >>> ExtendedContext.divide_int(Decimal('2'), Decimal('3')) + Decimal('0') + >>> ExtendedContext.divide_int(Decimal('10'), Decimal('3')) + Decimal('3') + >>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3')) + Decimal('3') + >>> ExtendedContext.divide_int(10, 3) + Decimal('3') + >>> ExtendedContext.divide_int(Decimal(10), 3) + Decimal('3') + >>> ExtendedContext.divide_int(10, Decimal(3)) + Decimal('3') + Return (a // b, a % b). + + >>> ExtendedContext.divmod(Decimal(8), Decimal(3)) + (Decimal('2'), Decimal('2')) + >>> ExtendedContext.divmod(Decimal(8), Decimal(4)) + (Decimal('2'), Decimal('0')) + >>> ExtendedContext.divmod(8, 4) + (Decimal('2'), Decimal('0')) + >>> ExtendedContext.divmod(Decimal(8), 4) + (Decimal('2'), Decimal('0')) + >>> ExtendedContext.divmod(8, Decimal(4)) + (Decimal('2'), Decimal('0')) + Returns e ** a. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.exp(Decimal('-Infinity')) + Decimal('0') + >>> c.exp(Decimal('-1')) + Decimal('0.367879441') + >>> c.exp(Decimal('0')) + Decimal('1') + >>> c.exp(Decimal('1')) + Decimal('2.71828183') + >>> c.exp(Decimal('0.693147181')) + Decimal('2.00000000') + >>> c.exp(Decimal('+Infinity')) + Decimal('Infinity') + >>> c.exp(10) + Decimal('22026.4658') + Returns a multiplied by b, plus c. + + The first two operands are multiplied together, using multiply, + the third operand is then added to the result of that + multiplication, using add, all with only one final rounding. + + >>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7')) + Decimal('22') + >>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7')) + Decimal('-8') + >>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578')) + Decimal('1.38435736E+12') + >>> ExtendedContext.fma(1, 3, 4) + Decimal('7') + >>> ExtendedContext.fma(1, Decimal(3), 4) + Decimal('7') + >>> ExtendedContext.fma(1, 3, Decimal(4)) + Decimal('7') + Return True if the operand is canonical; otherwise return False. + + Currently, the encoding of a Decimal instance is always + canonical, so this method returns True for any Decimal. + + >>> ExtendedContext.is_canonical(Decimal('2.50')) + True + is_canonical requires a Decimal as an argument.Return True if the operand is finite; otherwise return False. + + A Decimal instance is considered finite if it is neither + infinite nor a NaN. + + >>> ExtendedContext.is_finite(Decimal('2.50')) + True + >>> ExtendedContext.is_finite(Decimal('-0.3')) + True + >>> ExtendedContext.is_finite(Decimal('0')) + True + >>> ExtendedContext.is_finite(Decimal('Inf')) + False + >>> ExtendedContext.is_finite(Decimal('NaN')) + False + >>> ExtendedContext.is_finite(1) + True + Return True if the operand is infinite; otherwise return False. + + >>> ExtendedContext.is_infinite(Decimal('2.50')) + False + >>> ExtendedContext.is_infinite(Decimal('-Inf')) + True + >>> ExtendedContext.is_infinite(Decimal('NaN')) + False + >>> ExtendedContext.is_infinite(1) + False + Return True if the operand is a qNaN or sNaN; + otherwise return False. + + >>> ExtendedContext.is_nan(Decimal('2.50')) + False + >>> ExtendedContext.is_nan(Decimal('NaN')) + True + >>> ExtendedContext.is_nan(Decimal('-sNaN')) + True + >>> ExtendedContext.is_nan(1) + False + Return True if the operand is a normal number; + otherwise return False. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.is_normal(Decimal('2.50')) + True + >>> c.is_normal(Decimal('0.1E-999')) + False + >>> c.is_normal(Decimal('0.00')) + False + >>> c.is_normal(Decimal('-Inf')) + False + >>> c.is_normal(Decimal('NaN')) + False + >>> c.is_normal(1) + True + Return True if the operand is a quiet NaN; otherwise return False. + + >>> ExtendedContext.is_qnan(Decimal('2.50')) + False + >>> ExtendedContext.is_qnan(Decimal('NaN')) + True + >>> ExtendedContext.is_qnan(Decimal('sNaN')) + False + >>> ExtendedContext.is_qnan(1) + False + Return True if the operand is negative; otherwise return False. + + >>> ExtendedContext.is_signed(Decimal('2.50')) + False + >>> ExtendedContext.is_signed(Decimal('-12')) + True + >>> ExtendedContext.is_signed(Decimal('-0')) + True + >>> ExtendedContext.is_signed(8) + False + >>> ExtendedContext.is_signed(-8) + True + Return True if the operand is a signaling NaN; + otherwise return False. + + >>> ExtendedContext.is_snan(Decimal('2.50')) + False + >>> ExtendedContext.is_snan(Decimal('NaN')) + False + >>> ExtendedContext.is_snan(Decimal('sNaN')) + True + >>> ExtendedContext.is_snan(1) + False + Return True if the operand is subnormal; otherwise return False. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.is_subnormal(Decimal('2.50')) + False + >>> c.is_subnormal(Decimal('0.1E-999')) + True + >>> c.is_subnormal(Decimal('0.00')) + False + >>> c.is_subnormal(Decimal('-Inf')) + False + >>> c.is_subnormal(Decimal('NaN')) + False + >>> c.is_subnormal(1) + False + Return True if the operand is a zero; otherwise return False. + + >>> ExtendedContext.is_zero(Decimal('0')) + True + >>> ExtendedContext.is_zero(Decimal('2.50')) + False + >>> ExtendedContext.is_zero(Decimal('-0E+2')) + True + >>> ExtendedContext.is_zero(1) + False + >>> ExtendedContext.is_zero(0) + True + Returns the natural (base e) logarithm of the operand. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.ln(Decimal('0')) + Decimal('-Infinity') + >>> c.ln(Decimal('1.000')) + Decimal('0') + >>> c.ln(Decimal('2.71828183')) + Decimal('1.00000000') + >>> c.ln(Decimal('10')) + Decimal('2.30258509') + >>> c.ln(Decimal('+Infinity')) + Decimal('Infinity') + >>> c.ln(1) + Decimal('0') + Returns the base 10 logarithm of the operand. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.log10(Decimal('0')) + Decimal('-Infinity') + >>> c.log10(Decimal('0.001')) + Decimal('-3') + >>> c.log10(Decimal('1.000')) + Decimal('0') + >>> c.log10(Decimal('2')) + Decimal('0.301029996') + >>> c.log10(Decimal('10')) + Decimal('1') + >>> c.log10(Decimal('70')) + Decimal('1.84509804') + >>> c.log10(Decimal('+Infinity')) + Decimal('Infinity') + >>> c.log10(0) + Decimal('-Infinity') + >>> c.log10(1) + Decimal('0') + Returns the exponent of the magnitude of the operand's MSD. + + The result is the integer which is the exponent of the magnitude + of the most significant digit of the operand (as though the + operand were truncated to a single digit while maintaining the + value of that digit and without limiting the resulting exponent). + + >>> ExtendedContext.logb(Decimal('250')) + Decimal('2') + >>> ExtendedContext.logb(Decimal('2.50')) + Decimal('0') + >>> ExtendedContext.logb(Decimal('0.03')) + Decimal('-2') + >>> ExtendedContext.logb(Decimal('0')) + Decimal('-Infinity') + >>> ExtendedContext.logb(1) + Decimal('0') + >>> ExtendedContext.logb(10) + Decimal('1') + >>> ExtendedContext.logb(100) + Decimal('2') + Applies the logical operation 'and' between each operand's digits. + + The operands must be both logical numbers. + + >>> ExtendedContext.logical_and(Decimal('0'), Decimal('0')) + Decimal('0') + >>> ExtendedContext.logical_and(Decimal('0'), Decimal('1')) + Decimal('0') + >>> ExtendedContext.logical_and(Decimal('1'), Decimal('0')) + Decimal('0') + >>> ExtendedContext.logical_and(Decimal('1'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010')) + Decimal('1000') + >>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10')) + Decimal('10') + >>> ExtendedContext.logical_and(110, 1101) + Decimal('100') + >>> ExtendedContext.logical_and(Decimal(110), 1101) + Decimal('100') + >>> ExtendedContext.logical_and(110, Decimal(1101)) + Decimal('100') + Invert all the digits in the operand. + + The operand must be a logical number. + + >>> ExtendedContext.logical_invert(Decimal('0')) + Decimal('111111111') + >>> ExtendedContext.logical_invert(Decimal('1')) + Decimal('111111110') + >>> ExtendedContext.logical_invert(Decimal('111111111')) + Decimal('0') + >>> ExtendedContext.logical_invert(Decimal('101010101')) + Decimal('10101010') + >>> ExtendedContext.logical_invert(1101) + Decimal('111110010') + Applies the logical operation 'or' between each operand's digits. + + The operands must be both logical numbers. + + >>> ExtendedContext.logical_or(Decimal('0'), Decimal('0')) + Decimal('0') + >>> ExtendedContext.logical_or(Decimal('0'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.logical_or(Decimal('1'), Decimal('0')) + Decimal('1') + >>> ExtendedContext.logical_or(Decimal('1'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010')) + Decimal('1110') + >>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10')) + Decimal('1110') + >>> ExtendedContext.logical_or(110, 1101) + Decimal('1111') + >>> ExtendedContext.logical_or(Decimal(110), 1101) + Decimal('1111') + >>> ExtendedContext.logical_or(110, Decimal(1101)) + Decimal('1111') + Applies the logical operation 'xor' between each operand's digits. + + The operands must be both logical numbers. + + >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0')) + Decimal('0') + >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0')) + Decimal('1') + >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1')) + Decimal('0') + >>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010')) + Decimal('110') + >>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10')) + Decimal('1101') + >>> ExtendedContext.logical_xor(110, 1101) + Decimal('1011') + >>> ExtendedContext.logical_xor(Decimal(110), 1101) + Decimal('1011') + >>> ExtendedContext.logical_xor(110, Decimal(1101)) + Decimal('1011') + max compares two values numerically and returns the maximum. + + If either operand is a NaN then the general rules apply. + Otherwise, the operands are compared as though by the compare + operation. If they are numerically equal then the left-hand operand + is chosen as the result. Otherwise the maximum (closer to positive + infinity) of the two operands is chosen as the result. + + >>> ExtendedContext.max(Decimal('3'), Decimal('2')) + Decimal('3') + >>> ExtendedContext.max(Decimal('-10'), Decimal('3')) + Decimal('3') + >>> ExtendedContext.max(Decimal('1.0'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.max(Decimal('7'), Decimal('NaN')) + Decimal('7') + >>> ExtendedContext.max(1, 2) + Decimal('2') + >>> ExtendedContext.max(Decimal(1), 2) + Decimal('2') + >>> ExtendedContext.max(1, Decimal(2)) + Decimal('2') + Compares the values numerically with their sign ignored. + + >>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN')) + Decimal('7') + >>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10')) + Decimal('-10') + >>> ExtendedContext.max_mag(1, -2) + Decimal('-2') + >>> ExtendedContext.max_mag(Decimal(1), -2) + Decimal('-2') + >>> ExtendedContext.max_mag(1, Decimal(-2)) + Decimal('-2') + min compares two values numerically and returns the minimum. + + If either operand is a NaN then the general rules apply. + Otherwise, the operands are compared as though by the compare + operation. If they are numerically equal then the left-hand operand + is chosen as the result. Otherwise the minimum (closer to negative + infinity) of the two operands is chosen as the result. + + >>> ExtendedContext.min(Decimal('3'), Decimal('2')) + Decimal('2') + >>> ExtendedContext.min(Decimal('-10'), Decimal('3')) + Decimal('-10') + >>> ExtendedContext.min(Decimal('1.0'), Decimal('1')) + Decimal('1.0') + >>> ExtendedContext.min(Decimal('7'), Decimal('NaN')) + Decimal('7') + >>> ExtendedContext.min(1, 2) + Decimal('1') + >>> ExtendedContext.min(Decimal(1), 2) + Decimal('1') + >>> ExtendedContext.min(1, Decimal(29)) + Decimal('1') + Compares the values numerically with their sign ignored. + + >>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2')) + Decimal('-2') + >>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN')) + Decimal('-3') + >>> ExtendedContext.min_mag(1, -2) + Decimal('1') + >>> ExtendedContext.min_mag(Decimal(1), -2) + Decimal('1') + >>> ExtendedContext.min_mag(1, Decimal(-2)) + Decimal('1') + Minus corresponds to unary prefix minus in Python. + + The operation is evaluated using the same rules as subtract; the + operation minus(a) is calculated as subtract('0', a) where the '0' + has the same exponent as the operand. + + >>> ExtendedContext.minus(Decimal('1.3')) + Decimal('-1.3') + >>> ExtendedContext.minus(Decimal('-1.3')) + Decimal('1.3') + >>> ExtendedContext.minus(1) + Decimal('-1') + multiply multiplies two operands. + + If either operand is a special value then the general rules apply. + Otherwise, the operands are multiplied together + ('long multiplication'), resulting in a number which may be as long as + the sum of the lengths of the two operands. + + >>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3')) + Decimal('3.60') + >>> ExtendedContext.multiply(Decimal('7'), Decimal('3')) + Decimal('21') + >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8')) + Decimal('0.72') + >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0')) + Decimal('-0.0') + >>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321')) + Decimal('4.28135971E+11') + >>> ExtendedContext.multiply(7, 7) + Decimal('49') + >>> ExtendedContext.multiply(Decimal(7), 7) + Decimal('49') + >>> ExtendedContext.multiply(7, Decimal(7)) + Decimal('49') + Returns the largest representable number smaller than a. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> ExtendedContext.next_minus(Decimal('1')) + Decimal('0.999999999') + >>> c.next_minus(Decimal('1E-1007')) + Decimal('0E-1007') + >>> ExtendedContext.next_minus(Decimal('-1.00000003')) + Decimal('-1.00000004') + >>> c.next_minus(Decimal('Infinity')) + Decimal('9.99999999E+999') + >>> c.next_minus(1) + Decimal('0.999999999') + Returns the smallest representable number larger than a. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> ExtendedContext.next_plus(Decimal('1')) + Decimal('1.00000001') + >>> c.next_plus(Decimal('-1E-1007')) + Decimal('-0E-1007') + >>> ExtendedContext.next_plus(Decimal('-1.00000003')) + Decimal('-1.00000002') + >>> c.next_plus(Decimal('-Infinity')) + Decimal('-9.99999999E+999') + >>> c.next_plus(1) + Decimal('1.00000001') + Returns the number closest to a, in direction towards b. + + The result is the closest representable number from the first + operand (but not the first operand) that is in the direction + towards the second operand, unless the operands have the same + value. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.next_toward(Decimal('1'), Decimal('2')) + Decimal('1.00000001') + >>> c.next_toward(Decimal('-1E-1007'), Decimal('1')) + Decimal('-0E-1007') + >>> c.next_toward(Decimal('-1.00000003'), Decimal('0')) + Decimal('-1.00000002') + >>> c.next_toward(Decimal('1'), Decimal('0')) + Decimal('0.999999999') + >>> c.next_toward(Decimal('1E-1007'), Decimal('-100')) + Decimal('0E-1007') + >>> c.next_toward(Decimal('-1.00000003'), Decimal('-10')) + Decimal('-1.00000004') + >>> c.next_toward(Decimal('0.00'), Decimal('-0.0000')) + Decimal('-0.00') + >>> c.next_toward(0, 1) + Decimal('1E-1007') + >>> c.next_toward(Decimal(0), 1) + Decimal('1E-1007') + >>> c.next_toward(0, Decimal(1)) + Decimal('1E-1007') + normalize reduces an operand to its simplest form. + + Essentially a plus operation with all trailing zeros removed from the + result. + + >>> ExtendedContext.normalize(Decimal('2.1')) + Decimal('2.1') + >>> ExtendedContext.normalize(Decimal('-2.0')) + Decimal('-2') + >>> ExtendedContext.normalize(Decimal('1.200')) + Decimal('1.2') + >>> ExtendedContext.normalize(Decimal('-120')) + Decimal('-1.2E+2') + >>> ExtendedContext.normalize(Decimal('120.00')) + Decimal('1.2E+2') + >>> ExtendedContext.normalize(Decimal('0.00')) + Decimal('0') + >>> ExtendedContext.normalize(6) + Decimal('6') + Returns an indication of the class of the operand. + + The class is one of the following strings: + -sNaN + -NaN + -Infinity + -Normal + -Subnormal + -Zero + +Zero + +Subnormal + +Normal + +Infinity + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.number_class(Decimal('Infinity')) + '+Infinity' + >>> c.number_class(Decimal('1E-10')) + '+Normal' + >>> c.number_class(Decimal('2.50')) + '+Normal' + >>> c.number_class(Decimal('0.1E-999')) + '+Subnormal' + >>> c.number_class(Decimal('0')) + '+Zero' + >>> c.number_class(Decimal('-0')) + '-Zero' + >>> c.number_class(Decimal('-0.1E-999')) + '-Subnormal' + >>> c.number_class(Decimal('-1E-10')) + '-Normal' + >>> c.number_class(Decimal('-2.50')) + '-Normal' + >>> c.number_class(Decimal('-Infinity')) + '-Infinity' + >>> c.number_class(Decimal('NaN')) + 'NaN' + >>> c.number_class(Decimal('-NaN')) + 'NaN' + >>> c.number_class(Decimal('sNaN')) + 'sNaN' + >>> c.number_class(123) + '+Normal' + Plus corresponds to unary prefix plus in Python. + + The operation is evaluated using the same rules as add; the + operation plus(a) is calculated as add('0', a) where the '0' + has the same exponent as the operand. + + >>> ExtendedContext.plus(Decimal('1.3')) + Decimal('1.3') + >>> ExtendedContext.plus(Decimal('-1.3')) + Decimal('-1.3') + >>> ExtendedContext.plus(-1) + Decimal('-1') + Raises a to the power of b, to modulo if given. + + With two arguments, compute a**b. If a is negative then b + must be integral. The result will be inexact unless b is + integral and the result is finite and can be expressed exactly + in 'precision' digits. + + With three arguments, compute (a**b) % modulo. For the + three argument form, the following restrictions on the + arguments hold: + + - all three arguments must be integral + - b must be nonnegative + - at least one of a or b must be nonzero + - modulo must be nonzero and have at most 'precision' digits + + The result of pow(a, b, modulo) is identical to the result + that would be obtained by computing (a**b) % modulo with + unbounded precision, but is computed more efficiently. It is + always exact. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.power(Decimal('2'), Decimal('3')) + Decimal('8') + >>> c.power(Decimal('-2'), Decimal('3')) + Decimal('-8') + >>> c.power(Decimal('2'), Decimal('-3')) + Decimal('0.125') + >>> c.power(Decimal('1.7'), Decimal('8')) + Decimal('69.7575744') + >>> c.power(Decimal('10'), Decimal('0.301029996')) + Decimal('2.00000000') + >>> c.power(Decimal('Infinity'), Decimal('-1')) + Decimal('0') + >>> c.power(Decimal('Infinity'), Decimal('0')) + Decimal('1') + >>> c.power(Decimal('Infinity'), Decimal('1')) + Decimal('Infinity') + >>> c.power(Decimal('-Infinity'), Decimal('-1')) + Decimal('-0') + >>> c.power(Decimal('-Infinity'), Decimal('0')) + Decimal('1') + >>> c.power(Decimal('-Infinity'), Decimal('1')) + Decimal('-Infinity') + >>> c.power(Decimal('-Infinity'), Decimal('2')) + Decimal('Infinity') + >>> c.power(Decimal('0'), Decimal('0')) + Decimal('NaN') + + >>> c.power(Decimal('3'), Decimal('7'), Decimal('16')) + Decimal('11') + >>> c.power(Decimal('-3'), Decimal('7'), Decimal('16')) + Decimal('-11') + >>> c.power(Decimal('-3'), Decimal('8'), Decimal('16')) + Decimal('1') + >>> c.power(Decimal('3'), Decimal('7'), Decimal('-16')) + Decimal('11') + >>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789')) + Decimal('11729830') + >>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729')) + Decimal('-0') + >>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537')) + Decimal('1') + >>> ExtendedContext.power(7, 7) + Decimal('823543') + >>> ExtendedContext.power(Decimal(7), 7) + Decimal('823543') + >>> ExtendedContext.power(7, Decimal(7), 2) + Decimal('1') + Returns a value equal to 'a' (rounded), having the exponent of 'b'. + + The coefficient of the result is derived from that of the left-hand + operand. It may be rounded using the current rounding setting (if the + exponent is being increased), multiplied by a positive power of ten (if + the exponent is being decreased), or is unchanged (if the exponent is + already equal to that of the right-hand operand). + + Unlike other operations, if the length of the coefficient after the + quantize operation would be greater than precision then an Invalid + operation condition is raised. This guarantees that, unless there is + an error condition, the exponent of the result of a quantize is always + equal to that of the right-hand operand. + + Also unlike other operations, quantize will never raise Underflow, even + if the result is subnormal and inexact. + + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001')) + Decimal('2.170') + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01')) + Decimal('2.17') + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1')) + Decimal('2.2') + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0')) + Decimal('2') + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1')) + Decimal('0E+1') + >>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity')) + Decimal('-Infinity') + >>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity')) + Decimal('NaN') + >>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1')) + Decimal('-0') + >>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5')) + Decimal('-0E+5') + >>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2')) + Decimal('NaN') + >>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2')) + Decimal('NaN') + >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1')) + Decimal('217.0') + >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0')) + Decimal('217') + >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1')) + Decimal('2.2E+2') + >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2')) + Decimal('2E+2') + >>> ExtendedContext.quantize(1, 2) + Decimal('1') + >>> ExtendedContext.quantize(Decimal(1), 2) + Decimal('1') + >>> ExtendedContext.quantize(1, Decimal(2)) + Decimal('1') + Just returns 10, as this is Decimal, :) + + >>> ExtendedContext.radix() + Decimal('10') + Returns the remainder from integer division. + + The result is the residue of the dividend after the operation of + calculating integer division as described for divide-integer, rounded + to precision digits if necessary. The sign of the result, if + non-zero, is the same as that of the original dividend. + + This operation will fail under the same conditions as integer division + (that is, if integer division on the same two operands would fail, the + remainder cannot be calculated). + + >>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3')) + Decimal('2.1') + >>> ExtendedContext.remainder(Decimal('10'), Decimal('3')) + Decimal('1') + >>> ExtendedContext.remainder(Decimal('-10'), Decimal('3')) + Decimal('-1') + >>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1')) + Decimal('0.2') + >>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3')) + Decimal('0.1') + >>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3')) + Decimal('1.0') + >>> ExtendedContext.remainder(22, 6) + Decimal('4') + >>> ExtendedContext.remainder(Decimal(22), 6) + Decimal('4') + >>> ExtendedContext.remainder(22, Decimal(6)) + Decimal('4') + Returns to be "a - b * n", where n is the integer nearest the exact + value of "x / b" (if two integers are equally near then the even one + is chosen). If the result is equal to 0 then its sign will be the + sign of a. + + This operation will fail under the same conditions as integer division + (that is, if integer division on the same two operands would fail, the + remainder cannot be calculated). + + >>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3')) + Decimal('-0.9') + >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6')) + Decimal('-2') + >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3')) + Decimal('1') + >>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3')) + Decimal('-1') + >>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1')) + Decimal('0.2') + >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3')) + Decimal('0.1') + >>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3')) + Decimal('-0.3') + >>> ExtendedContext.remainder_near(3, 11) + Decimal('3') + >>> ExtendedContext.remainder_near(Decimal(3), 11) + Decimal('3') + >>> ExtendedContext.remainder_near(3, Decimal(11)) + Decimal('3') + Returns a rotated copy of a, b times. + + The coefficient of the result is a rotated copy of the digits in + the coefficient of the first operand. The number of places of + rotation is taken from the absolute value of the second operand, + with the rotation being to the left if the second operand is + positive or to the right otherwise. + + >>> ExtendedContext.rotate(Decimal('34'), Decimal('8')) + Decimal('400000003') + >>> ExtendedContext.rotate(Decimal('12'), Decimal('9')) + Decimal('12') + >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2')) + Decimal('891234567') + >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0')) + Decimal('123456789') + >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2')) + Decimal('345678912') + >>> ExtendedContext.rotate(1333333, 1) + Decimal('13333330') + >>> ExtendedContext.rotate(Decimal(1333333), 1) + Decimal('13333330') + >>> ExtendedContext.rotate(1333333, Decimal(1)) + Decimal('13333330') + Returns True if the two operands have the same exponent. + + The result is never affected by either the sign or the coefficient of + either operand. + + >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001')) + False + >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01')) + True + >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1')) + False + >>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf')) + True + >>> ExtendedContext.same_quantum(10000, -1) + True + >>> ExtendedContext.same_quantum(Decimal(10000), -1) + True + >>> ExtendedContext.same_quantum(10000, Decimal(-1)) + True + Returns the first operand after adding the second value its exp. + + >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2')) + Decimal('0.0750') + >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0')) + Decimal('7.50') + >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3')) + Decimal('7.50E+3') + >>> ExtendedContext.scaleb(1, 4) + Decimal('1E+4') + >>> ExtendedContext.scaleb(Decimal(1), 4) + Decimal('1E+4') + >>> ExtendedContext.scaleb(1, Decimal(4)) + Decimal('1E+4') + Returns a shifted copy of a, b times. + + The coefficient of the result is a shifted copy of the digits + in the coefficient of the first operand. The number of places + to shift is taken from the absolute value of the second operand, + with the shift being to the left if the second operand is + positive or to the right otherwise. Digits shifted into the + coefficient are zeros. + + >>> ExtendedContext.shift(Decimal('34'), Decimal('8')) + Decimal('400000000') + >>> ExtendedContext.shift(Decimal('12'), Decimal('9')) + Decimal('0') + >>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2')) + Decimal('1234567') + >>> ExtendedContext.shift(Decimal('123456789'), Decimal('0')) + Decimal('123456789') + >>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2')) + Decimal('345678900') + >>> ExtendedContext.shift(88888888, 2) + Decimal('888888800') + >>> ExtendedContext.shift(Decimal(88888888), 2) + Decimal('888888800') + >>> ExtendedContext.shift(88888888, Decimal(2)) + Decimal('888888800') + Square root of a non-negative number to context precision. + + If the result must be inexact, it is rounded using the round-half-even + algorithm. + + >>> ExtendedContext.sqrt(Decimal('0')) + Decimal('0') + >>> ExtendedContext.sqrt(Decimal('-0')) + Decimal('-0') + >>> ExtendedContext.sqrt(Decimal('0.39')) + Decimal('0.624499800') + >>> ExtendedContext.sqrt(Decimal('100')) + Decimal('10') + >>> ExtendedContext.sqrt(Decimal('1')) + Decimal('1') + >>> ExtendedContext.sqrt(Decimal('1.0')) + Decimal('1.0') + >>> ExtendedContext.sqrt(Decimal('1.00')) + Decimal('1.0') + >>> ExtendedContext.sqrt(Decimal('7')) + Decimal('2.64575131') + >>> ExtendedContext.sqrt(Decimal('10')) + Decimal('3.16227766') + >>> ExtendedContext.sqrt(2) + Decimal('1.41421356') + >>> ExtendedContext.prec + 9 + Return the difference between the two operands. + + >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07')) + Decimal('0.23') + >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30')) + Decimal('0.00') + >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07')) + Decimal('-0.77') + >>> ExtendedContext.subtract(8, 5) + Decimal('3') + >>> ExtendedContext.subtract(Decimal(8), 5) + Decimal('3') + >>> ExtendedContext.subtract(8, Decimal(5)) + Decimal('3') + Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. + + The operation is not affected by the context. + + >>> ExtendedContext.to_eng_string(Decimal('123E+1')) + '1.23E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E+3')) + '123E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E-10')) + '12.3E-9' + >>> ExtendedContext.to_eng_string(Decimal('-123E-12')) + '-123E-12' + >>> ExtendedContext.to_eng_string(Decimal('7E-7')) + '700E-9' + >>> ExtendedContext.to_eng_string(Decimal('7E+1')) + '70' + >>> ExtendedContext.to_eng_string(Decimal('0E+1')) + '0.00E+3' + + Converts a number to a string, using scientific notation. + + The operation is not affected by the context. + Rounds to an integer. + + When the operand has a negative exponent, the result is the same + as using the quantize() operation using the given operand as the + left-hand-operand, 1E+0 as the right-hand-operand, and the precision + of the operand as the precision setting; Inexact and Rounded flags + are allowed in this operation. The rounding mode is taken from the + context. + + >>> ExtendedContext.to_integral_exact(Decimal('2.1')) + Decimal('2') + >>> ExtendedContext.to_integral_exact(Decimal('100')) + Decimal('100') + >>> ExtendedContext.to_integral_exact(Decimal('100.0')) + Decimal('100') + >>> ExtendedContext.to_integral_exact(Decimal('101.5')) + Decimal('102') + >>> ExtendedContext.to_integral_exact(Decimal('-101.5')) + Decimal('-102') + >>> ExtendedContext.to_integral_exact(Decimal('10E+5')) + Decimal('1.0E+6') + >>> ExtendedContext.to_integral_exact(Decimal('7.89E+77')) + Decimal('7.89E+77') + >>> ExtendedContext.to_integral_exact(Decimal('-Inf')) + Decimal('-Infinity') + Rounds to an integer. + + When the operand has a negative exponent, the result is the same + as using the quantize() operation using the given operand as the + left-hand-operand, 1E+0 as the right-hand-operand, and the precision + of the operand as the precision setting, except that no flags will + be set. The rounding mode is taken from the context. + + >>> ExtendedContext.to_integral_value(Decimal('2.1')) + Decimal('2') + >>> ExtendedContext.to_integral_value(Decimal('100')) + Decimal('100') + >>> ExtendedContext.to_integral_value(Decimal('100.0')) + Decimal('100') + >>> ExtendedContext.to_integral_value(Decimal('101.5')) + Decimal('102') + >>> ExtendedContext.to_integral_value(Decimal('-101.5')) + Decimal('-102') + >>> ExtendedContext.to_integral_value(Decimal('10E+5')) + Decimal('1.0E+6') + >>> ExtendedContext.to_integral_value(Decimal('7.89E+77')) + Decimal('7.89E+77') + >>> ExtendedContext.to_integral_value(Decimal('-Inf')) + Decimal('-Infinity') + (%r, %r, %r)Normalizes op1, op2 to have the same exp and length of coefficient. + + Done during addition. + tmptmp_lenother_len Given integers n and e, return n * 10**e if it's an integer, else None. + + The computation is designed to avoid computing large powers of 10 + unnecessarily. + + >>> _decimal_lshift_exact(3, 4) + 30000 + >>> _decimal_lshift_exact(300, -999999999) # returns None + + str_nval_n_sqrt_nearestClosest integer to the square root of the positive integer n. a is + an initial approximation to the square root. Any positive integer + will do for a, but the closer a is to the square root of n the + faster convergence will be. + + Both arguments to _sqrt_nearest should be positive._rshift_nearestGiven an integer x and a nonnegative integer shift, return closest + integer to x / 2**shift; use round-to-even in case of a tie. + + _div_nearestClosest integer to a/b, a and b positive integers; rounds to even + in the case of a tie. + + _ilogMInteger approximation to M*log(x/M), with absolute error boundable + in terms only of x/M. + + Given positive integers x and M, return an integer approximation to + M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference + between the approximation and the exact result is at most 22. For + L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In + both cases these are upper bounds on the error; it will usually be + much smaller.RyshiftGiven integers c, e and p with c > 0, p >= 0, compute an integer + approximation to 10**p * log10(c*10**e), with an absolute error of + at most 1. Assumes that c*10**e is not exactly 1.log_d_log10_digitslog_10log_tenpowerGiven integers c, e and p with c > 0, compute an integer + approximation to 10**p * log(c*10**e), with an absolute error of + at most 1. Assumes that c*10**e is not exactly 1.f_log_ten_Log10MemoizeClass to compute, store, and allow retrieval of, digits of the + constant log(10) = 2.302585.... This constant is needed by + Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__.23025850929940456840179914546843642076011014886getdigitsGiven an integer p >= 0, return floor(10**p)*log(10). + + For example, self.getdigits(3) returns 2302. + p should be nonnegative_iexpGiven integers x and M, M > 0, such that x/M is small in absolute + value, compute an integer approximation to M*exp(x/M). For 0 <= + x/M <= 2.4, the absolute error in the result is bounded by 60 (and + is usually much smaller).MshiftCompute an approximation to exp(c*10**e), with p decimal places of + precision. + + Returns integers d, f such that: + + 10**(p-1) <= d <= 10**p, and + (d-1)*10**f < exp(c*10**e) < (d+1)*10**f + + In other words, d*10**f is an approximation to exp(c*10**e) with p + digits of precision, and with an error in d of at most 1. This is + almost, but not quite, the same as the error being < 1ulp: when d + = 10**(p-1) the error could be up to 10 ulp.cshiftquotGiven integers xc, xe, yc and ye representing Decimals x = xc*10**xe and + y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that: + + 10**(p-1) <= c <= 10**p, and + (c-1)*10**e < x**y < (c+1)*10**e + + in other words, c*10**e is an approximation to x**y with p digits + of precision, and with an error in c of at most 1. (This is + almost, but not quite, the same as the error being < 1ulp: when c + == 10**(p-1) we can only guarantee error < 10ulp.) + + We assume that: x is positive and not equal to 1, and y is nonzero. + lxcpc70correctionCompute a lower bound for 100*log10(c) for a positive integer c.The argument to _log10_lb should be nonnegative.str_callow_floatConvert other to Decimal. + + Verifies that it's ok to use in an implicit construction. + If allow_float is true, allow conversion from float; this + is used in the comparison methods (__eq__ and friends). + + Given a Decimal instance self and a Python object other, return + a pair (s, o) of Decimal instances such that "s op o" is + equivalent to "self op other" for any of the 6 comparison + operators "op". + + RationalComplex999999 # A numeric string consists of: +# \s* + (?P[-+])? # an optional sign, followed by either... + ( + (?=\d|\.\d) # ...a number (with at least one digit) + (?P\d*) # having a (possibly empty) integer part + (\.(?P\d*))? # followed by an optional fractional part + (E(?P[-+]?\d+))? # followed by an optional exponent, or... + | + Inf(inity)? # ...an infinity, or... + | + (?Ps)? # ...an (optionally signaling) + NaN # NaN + (?P\d*) # with (possibly empty) diagnostic info. + ) +# \s* + \Z +0*$50*$\A +(?: + (?P.)? + (?P[<>=^]) +)? +(?P[-+ ])? +(?P\#)? +(?P0)? +(?P(?!0)\d+)? +(?P,)? +(?:\.(?P0|(?!0)\d+))? +(?P[eEfFgG@�qSm�x \ No newline at end of file diff --git a/example/codeql-db/db-python/default/pools/0/pageDump/page-000000001 b/example/codeql-db/db-python/default/pools/0/pageDump/page-000000001 new file mode 100644 index 0000000000000000000000000000000000000000..f38648d2f4ad60f439ee56f90dde6391870676c3 --- /dev/null +++ b/example/codeql-db/db-python/default/pools/0/pageDump/page-000000001 @@ -0,0 +1,31265 @@ +\A +(?: + (?P.)? + (?P[<>=^]) +)? +(?P[-+ ])? +(?P\#)? +(?P0)? +(?P(?!0)\d+)? +(?P,)? +(?:\.(?P0|(?!0)\d+))? +(?P[eEfFgGn%])? +\Z +DOTALL_parse_format_specifier_regexformat_specParse and validate a format specifier. + + Turns a standard numeric format specifier into a dict, with the + following entries: + + fill: fill character to pad field to minimum width + align: alignment type, either '<', '>', '=' or '^' + sign: either '+', '-' or ' ' + minimumwidth: nonnegative integer giving minimum width + zeropad: boolean, indicating whether to pad with zeros + thousands_sep: string to use as thousands separator, or '' + grouping: grouping for thousands separators, in format + used by localeconv + decimal_point: string to use for decimal point + precision: nonnegative integer giving precision, or None + type: one of the characters 'eEfFgG%', or None + + Invalid format specifier: format_dictfillalignzeropadFill character conflicts with '0' in format specifier: "Fill character conflicts with '0'"" in format specifier: "Alignment conflicts with '0' in format specifier: "Alignment conflicts with '0' in ""format specifier: "minimumwidthgGnthousands_sepExplicit thousands separator conflicts with 'n' type in format specifier: "Explicit thousands separator conflicts with ""'n' type in format specifier: "groupingdecimal_pointGiven an unpadded, non-aligned numeric string 'body' and sign + string 'sign', add padding and alignment conforming to the given + format specifier dictionary 'spec' (as produced by + parse_format_specifier). + + padding=^halfUnrecognised alignment field_group_lengthsConvert a localeconv-style grouping into a (possibly infinite) + iterable of integers representing group lengths. + + unrecognised format for grouping_insert_thousands_sepmin_widthInsert thousands separators into a digit string. + + spec is a dictionary whose keys should include 'thousands_sep' and + 'grouping'; typically it's the result of parsing the format + specifier using _parse_format_specifier. + + The min_width keyword argument gives the minimum length of the + result, which will be padded on the left with zeros if necessary. + + If necessary, the zero padding adds an extra '0' on the left to + avoid a leading thousands separator. For example, inserting + commas every three digits in '123456', with min_width=8, gives + '0,123,456', even though that has length 9. + + groupsgroup length should be positiveis_negativeDetermine sign character. +Format a number, given the following data: + + is_negative: true if the number is negative, else false + intpart: string of digits that must appear before the decimal point + fracpart: string of digits that must come after the point + exp: exponent, as an integer + spec: dictionary resulting from parsing the format specifier + + This function uses the information in spec to: + insert separators (decimal separator and thousands separators) + format the sign + format the exponent + add trailing '%' for the '%' type + zero-pad if necessary + fill and align if necessary + altechar{0}{1:+}Inf-Inf# Copyright (c) 2004 Python Software Foundation.# All rights reserved.# Written by Eric Price # and Facundo Batista # and Raymond Hettinger # and Aahz # and Tim Peters# This module should be kept in sync with the latest updates of the# IBM specification as it evolves. Those updates will be treated# as bug fixes (deviation from the spec is a compatibility, usability# bug) and will be backported. At this point the spec is stabilizing# and the updates are becoming fewer, smaller, and less significant.# Two major classes# Named tuple representation# Contexts# Exceptions# Exceptional conditions that trigger InvalidOperation# Constants for use in setting up contexts# Functions for manipulating contexts# Limits for the C version for compatibility# C version: compile time choice that enables the thread local context (deprecated, now always true)# C version: compile time choice that enables the coroutine local context# sys.modules lookup (--without-threads)# For pickling# Highest version of the spec this complies with# See http://speleotrove.com/decimal/# compatible libmpdec version# Rounding# Compatibility with the C version# Errors# List of public traps and flags# Map conditions (per the spec) to signals# Valid rounding modes##### Context Functions ################################################### The getcontext() and setcontext() function manage access to a thread-local# current context.# Don't contaminate the namespace##### Decimal class ######################################################## Do not subclass Decimal from numbers.Real and do not register it as such# (because Decimals are not interoperable with floats). See the notes in# numbers.py for more detail.# Generally, the value of the Decimal instance is given by# (-1)**_sign * _int * 10**_exp# Special values are signified by _is_special == True# We're immutable, so use __new__ not __init__# Note that the coefficient, self._int, is actually stored as# a string rather than as a tuple of digits. This speeds up# the "digits to integer" and "integer to digits" conversions# that are used in almost every arithmetic operation on# Decimals. This is an internal detail: the as_tuple function# and the Decimal constructor still deal with tuples of# digits.# From a string# REs insist on real strings, so we can too.# finite number# NaN# infinity# From an integer# From another decimal# From an internal working value# tuple/list conversion (possibly from as_tuple())# process sign. The isinstance test rejects floats# infinity: value[1] is ignored# process and validate the digits in value[1]# skip leading zeros# NaN: digits form the diagnostic# finite number: digits give the coefficient# handle integer inputs# check for zeros; Decimal('0') == Decimal('-0')# If different signs, neg one is less# self_adjusted < other_adjusted# Note: The Decimal standard doesn't cover rich comparisons for# Decimals. In particular, the specification is silent on the# subject of what should happen for a comparison involving a NaN.# We take the following approach:# == comparisons involving a quiet NaN always return False# != comparisons involving a quiet NaN always return True# == or != comparisons involving a signaling NaN signal# InvalidOperation, and return False or True as above if the# InvalidOperation is not trapped.# <, >, <= and >= comparisons involving a (quiet or signaling)# NaN signal InvalidOperation, and return False if the# This behavior is designed to conform as closely as possible to# that specified by IEEE 754.# Compare(NaN, NaN) = NaN# In order to make sure that the hash of a Decimal instance# agrees with the hash of a numerically equal integer, float# or Fraction, we follow the rules for numeric hashes outlined# in the documentation. (See library docs, 'Built-in Types').# Find n, d in lowest terms such that abs(self) == n / d;# we'll deal with the sign later.# self is an integer.# Find d2, d5 such that abs(self) = n / (2**d2 * 5**d5).# (n & -n).bit_length() - 1 counts trailing zeros in binary# representation of n (provided n is nonzero).# Invariant: eval(repr(d)) == d# self._exp == 'N'# number of digits of self._int to left of decimal point# dotplace is number of digits of self._int to the left of the# decimal point in the mantissa of the output string (that is,# after adjusting the exponent)# no exponent required# usual scientific notation: 1 digit on left of the point# engineering notation, zero# engineering notation, nonzero# -Decimal('0') is Decimal('0'), not Decimal('-0'), except# in ROUND_FLOOR rounding mode.# + (-0) = 0, except in ROUND_FLOOR rounding mode.# If both INF, same sign => same as both, opposite => error.# Can't both be infinity here# If the answer is 0, the sign should be negative, in this case.# Equal and opposite# OK, now abs(op1) > abs(op2)# So we know the sign, and op1 > 0.# Now, op1 > abs(op2) > 0# self - other is computed as self + other.copy_negate()# Special case for multiplying by zero# Fixing in case the exponent is out of bounds# Special case for multiplying by power of 10# Special cases for zeroes# OK, so neither = 0, INF or NaN# result is not exact; adjust to ensure correct rounding# result is exact; get as close to ideal exponent as possible# Here the quotient is too large to be representable# self == +/-infinity -> InvalidOperation# other == 0 -> either InvalidOperation or DivisionUndefined# other = +/-infinity -> remainder = self# self = 0 -> remainder = self, with ideal exponent# catch most cases of large or small quotient# expdiff >= prec+1 => abs(self/other) > 10**prec# expdiff <= -2 => abs(self/other) < 0.1# adjust both arguments to have the same exponent, then divide# remainder is r*10**ideal_exponent; other is +/-op2.int *# 10**ideal_exponent. Apply correction to ensure that# abs(remainder) <= abs(other)/2# result has same sign as self unless r is negative# maximum length of payload is precision if clamp=0,# precision-1 if clamp=1.# decapitate payload if necessary# self is +/-Infinity; return unaltered# if self is zero then exponent should be between Etiny and# Emax if clamp==0, and between Etiny and Etop if clamp==1.# exp_min is the smallest allowable exponent of the result,# equal to max(self.adjusted()-context.prec+1, Etiny)# overflow: exp_min > Etop iff self.adjusted() > Emax# round if self has too many digits# check whether the rounding pushed the exponent out of range# raise the appropriate signals, taking care to respect# the precedence described in the specification# raise Clamped on underflow to 0# fold down if clamp == 1 and self has too few digits# here self was representable to begin with; return unchanged# for each of the rounding functions below:# self is a finite, nonzero Decimal# prec is an integer satisfying 0 <= prec < len(self._int)# each function returns either -1, 0, or 1, as follows:# 1 indicates that self should be rounded up (away from zero)# 0 indicates that self should be truncated, and that all the# digits to be truncated are zeros (so the value is unchanged)# -1 indicates that there are nonzero digits to be truncated# two-argument form: use the equivalent quantize call# one-argument form# compute product; raise InvalidOperation if either operand is# a signaling NaN or if the product is zero times infinity.# deal with NaNs: if there are any sNaNs then first one wins,# (i.e. behaviour for NaNs is identical to that of fma)# check inputs: we apply same restrictions as Python's pow()# additional restriction for decimal: the modulus must be less# than 10**prec in absolute value# define 0**0 == NaN, for consistency with two-argument pow# (even though it hurts!)# compute sign of result# convert modulo to a Python integer, and self and other to# Decimal integers (i.e. force their exponents to be >= 0)# compute result using integer pow()# In the comments below, we write x for the value of self and y for the# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc# and yc positive integers not divisible by 10.# The main purpose of this method is to identify the *failure*# of x**y to be exactly representable with as little effort as# possible. So we look for cheap and easy tests that# eliminate the possibility of x**y being exact. Only if all# these tests are passed do we go on to actually compute x**y.# Here's the main idea. Express y as a rational number m/n, with m and# n relatively prime and n>0. Then for x**y to be exactly# representable (at *any* precision), xc must be the nth power of a# positive integer and xe must be divisible by n. If y is negative# then additionally xc must be a power of either 2 or 5, hence a power# of 2**n or 5**n.# There's a limit to how small |y| can be: if y=m/n as above# then:# (1) if xc != 1 then for the result to be representable we# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=# 2**(1/|y|), hence xc**|y| < 2 and the result is not# representable.# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if# |y| < 1/|xe| then the result is not representable.# Note that since x is not equal to 1, at least one of (1) and# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.# There's also a limit to how large y can be, at least if it's# positive: the normalized result will have coefficient xc**y,# so if it's representable then xc**y < 10**p, and y <# p/log10(xc). Hence if y*log10(xc) >= p then the result is# not exactly representable.# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,# so |y| < 1/xe and the result is not representable.# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|# < 1/nbits(xc).# case where xc == 1: result is 10**(xe*y), with xe*y# required to be an integer# result is now 10**(xe * 10**ye); xe * 10**ye must be integral# if other is a nonnegative integer, use ideal exponent# case where y is negative: xc must be either a power# of 2 or a power of 5.# quick test for power of 2# now xc is a power of 2; e is its exponent# We now have:# x = 2**e * 10**xe, e > 0, and y < 0.# The exact result is:# x**y = 5**(-e*y) * 10**(e*y + xe*y)# provided that both e*y and xe*y are integers. Note that if# 5**(-e*y) >= 10**p, then the result can't be expressed# exactly with p digits of precision.# Using the above, we can guard against large values of ye.# 93/65 is an upper bound for log(10)/log(5), so if# ye >= len(str(93*p//65))# then# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),# so 5**(-e*y) >= 10**p, and the coefficient of the result# can't be expressed in p digits.# emax >= largest e such that 5**e < 10**p.# Find -e*y and -xe*y; both must be integers# e >= log_5(xc) if xc is a power of 5; we have# equality all the way up to xc=5**2658# Guard against large values of ye, using the same logic as in# the 'xc is a power of 2' branch. 10/3 is an upper bound for# log(10)/log(2).# now y is positive; find m and n such that y = m/n# compute nth root of xc*10**xe# if 1 < xc < 2**n then xc isn't an nth power# compute nth root of xc using Newton's method# initial estimate# now xc*10**xe is the nth root of the original xc*10**xe# compute mth power of xc*10**xe# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m ># 10**p and the result is not representable.# by this point the result *is* exactly representable# adjust the exponent to get as close as possible to the ideal# exponent, if necessary# either argument is a NaN => result is NaN# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)# result has sign 1 iff self._sign is 1 and other is an odd integer# -ve**noninteger = NaN# (-0)**noninteger = 0**noninteger# negate self, without doing any unwanted rounding# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0# 1**other = 1, but the choice of exponent and the flags# depend on the exponent of self, and on whether other is a# positive integer, a negative integer, or neither# exp = max(self._exp*max(int(other), 0),# 1-context.prec) but evaluating int(other) directly# is dangerous until we know other is small (other# could be 1e999999999)# compute adjusted exponent of self# self ** infinity is infinity if self > 1, 0 if self < 1# self ** -infinity is infinity if self < 1, 0 if self > 1# from here on, the result always goes through the call# to _fix at the end of this function.# crude test to catch cases of extreme overflow/underflow. If# log10(self)*other >= 10**bound and bound >= len(str(Emax))# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence# self**other >= 10**(Emax+1), so overflow occurs. The test# for underflow is similar.# self > 1 and other +ve, or self < 1 and other -ve# possibility of overflow# self > 1 and other -ve, or self < 1 and other +ve# possibility of underflow to 0# try for an exact result with precision +1# usual case: inexact result, x**y computed directly as exp(y*log(x))# compute correctly rounded result: start with precision +3,# then increase precision until result is unambiguously roundable# unlike exp, ln and log10, the power function respects the# rounding mode; no need to switch to ROUND_HALF_EVEN here# There's a difficulty here when 'other' is not an integer and# the result is exact. In this case, the specification# requires that the Inexact flag be raised (in spite of# exactness), but since the result is exact _fix won't do this# for us. (Correspondingly, the Underflow signal should also# be raised for subnormal results.) We can't directly raise# these signals either before or after calling _fix, since# that would violate the precedence for signals. So we wrap# the ._fix call in a temporary context, and reraise# afterwards.# pad with zeros up to length context.prec+1 if necessary; this# ensures that the Rounded signal will be raised.# create a copy of the current context, with cleared flags/traps# round in the new context# raise Inexact, and if necessary, Underflow# propagate signals to the original context; _fix could# have raised any of Overflow, Underflow, Subnormal,# Inexact, Rounded, Clamped. Overflow needs the correct# arguments. Note that the order of the exceptions is# important here.# if both are inf, it is OK# exp._exp should be between Etiny and Emax# raise appropriate flags# call to fix takes care of any necessary folddown, and# signals Clamped if necessary# pad answer with zeros if necessary# too many digits; round and lose data. If self.adjusted() <# exp-1, replace self by 10**(exp-1) before rounding# it can happen that the rescale alters the adjusted exponent;# for example when rounding 99.97 to 3 significant figures.# When this happens we end up with an extra 0 at the end of# the number; a second rescale fixes this.# the method name changed, but we provide also the old one, for compatibility# exponent = self._exp // 2. sqrt(-0) = -0# At this point self represents a positive number. Let p be# the desired precision and express self in the form c*100**e# with c a positive real number and e an integer, c and e# being chosen so that 100**(p-1) <= c < 100**p. Then the# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)# <= sqrt(c) < 10**p, so the closest representable Decimal at# precision p is n*10**e where n = round_half_even(sqrt(c)),# the closest integer to sqrt(c) with the even integer chosen# in the case of a tie.# To ensure correct rounding in all cases, we use the# following trick: we compute the square root to an extra# place (precision p+1 instead of precision p), rounding down.# Then, if the result is inexact and its last digit is 0 or 5,# we increase the last digit to 1 or 6 respectively; if it's# exact we leave the last digit alone. Now the final round to# p places (or fewer in the case of underflow) will round# correctly and raise the appropriate flags.# use an extra digit of precision# write argument in the form c*100**e where e = self._exp//2# is the 'ideal' exponent, to be used if the square root is# exactly representable. l is the number of 'digits' of c in# base 100, so that 100**(l-1) <= c < 100**l.# rescale so that c has exactly prec base 100 'digits'# find n = floor(sqrt(c)) using Newton's method# result is exact; rescale to use ideal exponent e# assert n % 10**shift == 0# result is not exact; fix last digit as described above# round, and fit to current context# If one operand is a quiet NaN and the other is number, then the# number is always returned# If both operands are finite and equal in numerical value# then an ordering is applied:# If the signs differ then max returns the operand with the# positive sign and min returns the operand with the negative sign# If the signs are the same then the exponent is used to select# the result. This is exactly the ordering used in compare_total.# If NaN or Infinity, self._exp is string# if one is negative and the other is positive, it's easy# let's handle both NaN types# compare payloads as though they're integers# exp(NaN) = NaN# exp(-Infinity) = 0# exp(0) = 1# exp(Infinity) = Infinity# the result is now guaranteed to be inexact (the true# mathematical result is transcendental). There's no need to# raise Rounded and Inexact here---they'll always be raised as# a result of the call to _fix.# we only need to do any computation for quite a small range# of adjusted exponents---for example, -29 <= adj <= 10 for# the default context. For smaller exponent the result is# indistinguishable from 1 at the given precision, while for# larger exponent the result either overflows or underflows.# overflow# underflow to 0# p+1 digits; final round will raise correct flags# general case# compute correctly rounded result: increase precision by# 3 digits at a time until we get an unambiguously# roundable result# at this stage, ans should round correctly with *any*# rounding mode, not just with ROUND_HALF_EVEN# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)# argument <= 0.1# 1 < self < 10# adj == -1, 0.1 <= self < 1# ln(NaN) = NaN# ln(0.0) == -Infinity# ln(Infinity) = Infinity# ln(1.0) == 0.0# ln(negative) raises InvalidOperation# result is irrational, so necessarily inexact# correctly rounded result: repeatedly increase precision by 3# until we get an unambiguously roundable result# at least p+3 places# assert len(str(abs(coeff)))-p >= 1# For x >= 10 or x < 0.1 we only need a bound on the integer# part of log10(self), and this comes directly from the# exponent of x. For 0.1 <= x <= 10 we use the inequalities# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| ># (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0# self >= 10# self < 0.1# log10(NaN) = NaN# log10(0.0) == -Infinity# log10(Infinity) = Infinity# log10(negative or -Infinity) raises InvalidOperation# log10(10**n) = n# answer may need rounding# correctly rounded result: repeatedly increase precision# until result is unambiguously roundable# logb(NaN) = NaN# logb(+/-Inf) = +Inf# logb(0) = -Inf, DivisionByZero# otherwise, simply return the adjusted exponent of self, as a# Decimal. Note that no attempt is made to fit the result# into the current context.# fill to context.prec# make the operation, and clean starting zeroes# comparison == 1# decide which flags to raise using value of ans# if precision == 1 then we don't raise Clamped for a# result 0E-Etiny.# just a normal, regular, boring number, :)# get values, pad if necessary# let's rotate!# let's shift!# Support for pickling, copy, and deepcopy# I'm immutable; therefore I am my own clone# My components are also immutable# PEP 3101 support. the _localeconv keyword argument should be# considered private: it's provided for ease of testing only.# Note: PEP 3101 says that if the type is not present then# there should be at least one digit after the decimal point.# We take the liberty of ignoring this requirement for# Decimal---it's presumably there to make sure that# format(float, '') behaves similarly to str(float).# special values don't care about the type or precision# a type of None defaults to 'g' or 'G', depending on context# if type is '%', adjust exponent of self accordingly# round if necessary, taking rounding mode from the context# special case: zeros with a positive exponent can't be# represented in fixed point; rescale them to 0e0.# figure out placement of the decimal point# find digits before and after decimal point, and get exponent# done with the decimal-specific stuff; hand over the rest# of the formatting to the _format_number function# Register Decimal as a kind of Number (an abstract base class).# However, do not register it as Real (because Decimals are not# interoperable with floats).##### Context class ######################################################## Set defaults; for everything except flags and _ignored_flags,# inherit from DefaultContext.# raise TypeError even for strings to have consistency# among various implementations.# Don't touch the flag# The errors define how to handle themselves.# Errors should only be risked on copies of the context# self._ignored_flags = []# Do not mutate-- This way, copies of a context leave the original# alone.# We inherit object.__hash__, so we must deny this explicitly# An exact conversion# Apply the context rounding# Methods# sign: 0 or 1# int: int# exp: None, int, or string# assert isinstance(value, tuple)# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).# Then adding 10**exp to tmp has the same effect (after rounding)# as adding any positive quantity smaller than 10**exp; similarly# for subtraction. So if other is smaller than 10**exp we replace# it with 10**exp. This avoids tmp.exp - other.exp getting too large.##### Integer arithmetic functions used by ln, log10, exp and __pow__ ###### val_n = largest power of 10 dividing n.# The basic algorithm is the following: let log1p be the function# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use# the reduction# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))# repeatedly until the argument to log1p is small (< 2**-L in# absolute value). For small y we can use the Taylor series# expansion# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T# truncating at T such that y**T is small enough. The whole# computation is carried out in a form of fixed-point arithmetic,# with a real number z being represented by an integer# approximation to z*M. To avoid loss of precision, the y below# is actually an integer approximation to 2**R*y*M, where R is the# number of reductions performed so far.# argument reduction; R = number of reductions performed# Taylor series with T terms# increase precision by 2; compensate for this by dividing# final result by 100# write c*10**e as d*10**f with either:# f >= 0 and 1 <= d <= 10, or# f <= 0 and 0.1 <= d <= 1.# Thus for c*10**e close to 1, f = 0# error < 5 + 22 = 27# error < 1# exact# error < 2.31# error < 0.5# Increase precision by 2. The precision increase is compensated# for at the end with a division by 100.# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)# as 10**p * log(d) + 10**p*f * log(10).# compute approximation to 10**p*log(d), with error < 27# error of <= 0.5 in c# _ilog magnifies existing error in c by a factor of at most 10# p <= 0: just approximate the whole thing by 0; error < 2.31# compute approximation to f*10**p*log(10), with error < 11.# error in f * _log10_digits(p+extra) < |f| * 1 = |f|# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1# digits are stored as a string, for quick conversion to# integer in the case that we've already computed enough# digits; the stored digits should always be correct# (truncated, not rounded to nearest).# compute p+3, p+6, p+9, ... digits; continue until at# least one of the extra digits is nonzero# compute p+extra digits, correct to within 1ulp# keep all reliable digits so far; remove trailing zeros# and next nonzero digit# Algorithm: to compute exp(z) for a real number z, first divide z# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor# series# expm1(x) = x + x**2/2! + x**3/3! + ...# Now use the identity# expm1(2x) = expm1(x)*(expm1(x)+2)# R times to compute the sequence expm1(z/2**R),# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).# Find R such that x/2**R/M <= 2**-L# Taylor series. (2**L)**T > M# Expansion# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision# compute log(10) with extra precision = adjusted exponent of c*10**e# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),# rounding down# reduce remainder back to original precision# error in result of _iexp < 120; error after division < 0.62# Find b such that 10**(b-1) <= |y| <= 10**b# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)# we prefer a result that isn't exactly 1; this makes it# easier to compute a correctly rounded result in __pow__# if x**y > 1:##### Helper Functions ##################################################### Comparison with a Rational instance (also includes integers):# self op n/d <=> self*d op n (for n and d integers, d positive).# A NaN or infinity can be left unchanged without affecting the# comparison result.# Comparisons with float and complex types. == and != comparisons# with complex numbers should succeed, returning either True or False# as appropriate. Other comparisons return NotImplemented.##### Setup Specific Contexts ############################################# The default context prototype used by Context()# Is mutable, so that new contexts can have different default values# Pre-made alternate contexts offered by the specification# Don't change these; the user should be able to select these# contexts and be able to reproduce results from other implementations# of the spec.##### crud for parsing strings ############################################## Regular expression used for parsing numeric strings. Additional# comments:# 1. Uncomment the two '\s*' lines to allow leading and/or trailing# whitespace. But note that the specification disallows whitespace in# a numeric string.# 2. For finite numbers (not infinities and NaNs) the body of the# number between the optional sign and the optional exponent must have# at least one decimal digit, possibly after the decimal point. The# lookahead expression '(?=\d|\.\d)' checks this.##### PEP3101 support functions ############################################### The functions in this section have little to do with the Decimal# class, and could potentially be reused or adapted for other pure# Python numeric classes that want to implement __format__# A format specifier for Decimal looks like:# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]# The locale module is only needed for the 'n' format specifier. The# rest of the PEP 3101 code functions quite happily without it, so we# don't care too much if locale isn't present.# get the dictionary# zeropad; defaults for fill and alignment. If zero padding# is requested, the fill and align fields should be absent.# PEP 3101 originally specified that the default alignment should# be left; it was later agreed that right-aligned makes more sense# for numeric types. See http://bugs.python.org/issue6857.# default sign handling: '-' for negative, '' for positive# minimumwidth defaults to 0; precision remains None if not given# if format type is 'g' or 'G' then a precision of 0 makes little# sense; convert it to 1. Same if format type is unspecified.# determine thousands separator, grouping, and decimal separator, and# add appropriate entries to format_dict# apart from separators, 'n' behaves just like 'g'# how much extra space do we have to play with?# The result from localeconv()['grouping'], and the input to this# function, should be a list of integers in one of the# following three forms:# (1) an empty list, or# (2) nonempty list of positive integers + [0]# (3) list of positive integers + [locale.CHAR_MAX], or# max(..., 1) forces at least 1 digit to the left of a separator##### Useful Constants (internal use only) ################################# Reusable defaults# _SignedInfinity[sign] is infinity w/ that sign# Constants related to the hash implementation; hash(x) is based# on the reduction of x modulo _PyHASH_MODULUS# hash values to use for positive and negative infinities, and nans# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUSb' +This is an implementation of decimal floating point arithmetic based on +the General Decimal Arithmetic Specification: + + http://speleotrove.com/decimal/decarith.html + +and IEEE standard 854-1987: + + http://en.wikipedia.org/wiki/IEEE_854-1987 + +Decimal floating point has finite precision with arbitrarily large bounds. + +The purpose of this module is to support arithmetic using familiar +"schoolhouse" rules and to avoid some of the tricky representation +issues associated with binary floating point. The package is especially +useful for financial applications or for contexts where users have +expectations that are at odds with binary floating point (for instance, +in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead +of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected +Decimal('0.00')). + +Here are some examples of using the decimal module: + +>>> from decimal import * +>>> setcontext(ExtendedContext) +>>> Decimal(0) +Decimal('0') +>>> Decimal('1') +Decimal('1') +>>> Decimal('-.0123') +Decimal('-0.0123') +>>> Decimal(123456) +Decimal('123456') +>>> Decimal('123.45e12345678') +Decimal('1.2345E+12345680') +>>> Decimal('1.33') + Decimal('1.27') +Decimal('2.60') +>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41') +Decimal('-2.20') +>>> dig = Decimal(1) +>>> print(dig / Decimal(3)) +0.333333333 +>>> getcontext().prec = 18 +>>> print(dig / Decimal(3)) +0.333333333333333333 +>>> print(dig.sqrt()) +1 +>>> print(Decimal(3).sqrt()) +1.73205080756887729 +>>> print(Decimal(3) ** 123) +4.85192780976896427E+58 +>>> inf = Decimal(1) / Decimal(0) +>>> print(inf) +Infinity +>>> neginf = Decimal(-1) / Decimal(0) +>>> print(neginf) +-Infinity +>>> print(neginf + inf) +NaN +>>> print(neginf * inf) +-Infinity +>>> print(dig / 0) +Infinity +>>> getcontext().traps[DivisionByZero] = 1 +>>> print(dig / 0) +Traceback (most recent call last): + ... + ... + ... +decimal.DivisionByZero: x / 0 +>>> c = Context() +>>> c.traps[InvalidOperation] = 0 +>>> print(c.flags[InvalidOperation]) +0 +>>> c.divide(Decimal(0), Decimal(0)) +Decimal('NaN') +>>> c.traps[InvalidOperation] = 1 +>>> print(c.flags[InvalidOperation]) +1 +>>> c.flags[InvalidOperation] = 0 +>>> print(c.flags[InvalidOperation]) +0 +>>> print(c.divide(Decimal(0), Decimal(0))) +Traceback (most recent call last): + ... + ... + ... +decimal.InvalidOperation: 0 / 0 +>>> print(c.flags[InvalidOperation]) +1 +>>> c.flags[InvalidOperation] = 0 +>>> c.traps[InvalidOperation] = 0 +>>> print(c.divide(Decimal(0), Decimal(0))) +NaN +>>> print(c.flags[InvalidOperation]) +1 +>>> +'u' +This is an implementation of decimal floating point arithmetic based on +the General Decimal Arithmetic Specification: + + http://speleotrove.com/decimal/decarith.html + +and IEEE standard 854-1987: + + http://en.wikipedia.org/wiki/IEEE_854-1987 + +Decimal floating point has finite precision with arbitrarily large bounds. + +The purpose of this module is to support arithmetic using familiar +"schoolhouse" rules and to avoid some of the tricky representation +issues associated with binary floating point. The package is especially +useful for financial applications or for contexts where users have +expectations that are at odds with binary floating point (for instance, +in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead +of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected +Decimal('0.00')). + +Here are some examples of using the decimal module: + +>>> from decimal import * +>>> setcontext(ExtendedContext) +>>> Decimal(0) +Decimal('0') +>>> Decimal('1') +Decimal('1') +>>> Decimal('-.0123') +Decimal('-0.0123') +>>> Decimal(123456) +Decimal('123456') +>>> Decimal('123.45e12345678') +Decimal('1.2345E+12345680') +>>> Decimal('1.33') + Decimal('1.27') +Decimal('2.60') +>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41') +Decimal('-2.20') +>>> dig = Decimal(1) +>>> print(dig / Decimal(3)) +0.333333333 +>>> getcontext().prec = 18 +>>> print(dig / Decimal(3)) +0.333333333333333333 +>>> print(dig.sqrt()) +1 +>>> print(Decimal(3).sqrt()) +1.73205080756887729 +>>> print(Decimal(3) ** 123) +4.85192780976896427E+58 +>>> inf = Decimal(1) / Decimal(0) +>>> print(inf) +Infinity +>>> neginf = Decimal(-1) / Decimal(0) +>>> print(neginf) +-Infinity +>>> print(neginf + inf) +NaN +>>> print(neginf * inf) +-Infinity +>>> print(dig / 0) +Infinity +>>> getcontext().traps[DivisionByZero] = 1 +>>> print(dig / 0) +Traceback (most recent call last): + ... + ... + ... +decimal.DivisionByZero: x / 0 +>>> c = Context() +>>> c.traps[InvalidOperation] = 0 +>>> print(c.flags[InvalidOperation]) +0 +>>> c.divide(Decimal(0), Decimal(0)) +Decimal('NaN') +>>> c.traps[InvalidOperation] = 1 +>>> print(c.flags[InvalidOperation]) +1 +>>> c.flags[InvalidOperation] = 0 +>>> print(c.flags[InvalidOperation]) +0 +>>> print(c.divide(Decimal(0), Decimal(0))) +Traceback (most recent call last): + ... + ... + ... +decimal.InvalidOperation: 0 / 0 +>>> print(c.flags[InvalidOperation]) +1 +>>> c.flags[InvalidOperation] = 0 +>>> c.traps[InvalidOperation] = 0 +>>> print(c.divide(Decimal(0), Decimal(0))) +NaN +>>> print(c.flags[InvalidOperation]) +1 +>>> +'b'Decimal'u'Decimal'b'Context'u'Context'b'DecimalTuple'u'DecimalTuple'b'DefaultContext'u'DefaultContext'b'BasicContext'u'BasicContext'b'ExtendedContext'u'ExtendedContext'b'DecimalException'u'DecimalException'b'Clamped'u'Clamped'b'InvalidOperation'u'InvalidOperation'b'DivisionByZero'u'DivisionByZero'b'Inexact'u'Inexact'b'Rounded'u'Rounded'b'Subnormal'u'Subnormal'b'Overflow'u'Overflow'b'Underflow'u'Underflow'b'FloatOperation'u'FloatOperation'b'DivisionImpossible'u'DivisionImpossible'b'InvalidContext'u'InvalidContext'b'ConversionSyntax'u'ConversionSyntax'b'DivisionUndefined'u'DivisionUndefined'b'ROUND_DOWN'b'ROUND_HALF_UP'b'ROUND_HALF_EVEN'b'ROUND_CEILING'b'ROUND_FLOOR'b'ROUND_UP'b'ROUND_HALF_DOWN'b'ROUND_05UP'b'setcontext'u'setcontext'b'getcontext'u'getcontext'b'localcontext'u'localcontext'b'MAX_PREC'u'MAX_PREC'b'MAX_EMAX'u'MAX_EMAX'b'MIN_EMIN'u'MIN_EMIN'b'MIN_ETINY'u'MIN_ETINY'b'HAVE_THREADS'u'HAVE_THREADS'b'HAVE_CONTEXTVAR'u'HAVE_CONTEXTVAR'b'decimal'b'1.70'b'2.4.2'b'sign digits exponent'u'sign digits exponent'b'Base exception class. + + Used exceptions derive from this. + If an exception derives from another exception besides this (such as + Underflow (Inexact, Rounded, Subnormal) that indicates that it is only + called if the others are present. This isn't actually used for + anything, though. + + handle -- Called when context._raise_error is called and the + trap_enabler is not set. First argument is self, second is the + context. More arguments can be given, those being after + the explanation in _raise_error (For example, + context._raise_error(NewError, '(-x)!', self._sign) would + call NewError().handle(context, self._sign).) + + To define a new exception, it should be sufficient to have it derive + from DecimalException. + 'u'Base exception class. + + Used exceptions derive from this. + If an exception derives from another exception besides this (such as + Underflow (Inexact, Rounded, Subnormal) that indicates that it is only + called if the others are present. This isn't actually used for + anything, though. + + handle -- Called when context._raise_error is called and the + trap_enabler is not set. First argument is self, second is the + context. More arguments can be given, those being after + the explanation in _raise_error (For example, + context._raise_error(NewError, '(-x)!', self._sign) would + call NewError().handle(context, self._sign).) + + To define a new exception, it should be sufficient to have it derive + from DecimalException. + 'b'Exponent of a 0 changed to fit bounds. + + This occurs and signals clamped if the exponent of a result has been + altered in order to fit the constraints of a specific concrete + representation. This may occur when the exponent of a zero result would + be outside the bounds of a representation, or when a large normal + number would have an encoded exponent that cannot be represented. In + this latter case, the exponent is reduced to fit and the corresponding + number of zero digits are appended to the coefficient ("fold-down"). + 'u'Exponent of a 0 changed to fit bounds. + + This occurs and signals clamped if the exponent of a result has been + altered in order to fit the constraints of a specific concrete + representation. This may occur when the exponent of a zero result would + be outside the bounds of a representation, or when a large normal + number would have an encoded exponent that cannot be represented. In + this latter case, the exponent is reduced to fit and the corresponding + number of zero digits are appended to the coefficient ("fold-down"). + 'b'An invalid operation was performed. + + Various bad things cause this: + + Something creates a signaling NaN + -INF + INF + 0 * (+-)INF + (+-)INF / (+-)INF + x % 0 + (+-)INF % x + x._rescale( non-integer ) + sqrt(-x) , x > 0 + 0 ** 0 + x ** (non-integer) + x ** (+-)INF + An operand is invalid + + The result of the operation after these is a quiet positive NaN, + except when the cause is a signaling NaN, in which case the result is + also a quiet NaN, but with the original sign, and an optional + diagnostic information. + 'u'An invalid operation was performed. + + Various bad things cause this: + + Something creates a signaling NaN + -INF + INF + 0 * (+-)INF + (+-)INF / (+-)INF + x % 0 + (+-)INF % x + x._rescale( non-integer ) + sqrt(-x) , x > 0 + 0 ** 0 + x ** (non-integer) + x ** (+-)INF + An operand is invalid + + The result of the operation after these is a quiet positive NaN, + except when the cause is a signaling NaN, in which case the result is + also a quiet NaN, but with the original sign, and an optional + diagnostic information. + 'b'Trying to convert badly formed string. + + This occurs and signals invalid-operation if a string is being + converted to a number and it does not conform to the numeric string + syntax. The result is [0,qNaN]. + 'u'Trying to convert badly formed string. + + This occurs and signals invalid-operation if a string is being + converted to a number and it does not conform to the numeric string + syntax. The result is [0,qNaN]. + 'b'Division by 0. + + This occurs and signals division-by-zero if division of a finite number + by zero was attempted (during a divide-integer or divide operation, or a + power operation with negative right-hand operand), and the dividend was + not zero. + + The result of the operation is [sign,inf], where sign is the exclusive + or of the signs of the operands for divide, or is 1 for an odd power of + -0, for power. + 'u'Division by 0. + + This occurs and signals division-by-zero if division of a finite number + by zero was attempted (during a divide-integer or divide operation, or a + power operation with negative right-hand operand), and the dividend was + not zero. + + The result of the operation is [sign,inf], where sign is the exclusive + or of the signs of the operands for divide, or is 1 for an odd power of + -0, for power. + 'b'Cannot perform the division adequately. + + This occurs and signals invalid-operation if the integer result of a + divide-integer or remainder operation had too many digits (would be + longer than precision). The result is [0,qNaN]. + 'u'Cannot perform the division adequately. + + This occurs and signals invalid-operation if the integer result of a + divide-integer or remainder operation had too many digits (would be + longer than precision). The result is [0,qNaN]. + 'b'Undefined result of division. + + This occurs and signals invalid-operation if division by zero was + attempted (during a divide-integer, divide, or remainder operation), and + the dividend is also zero. The result is [0,qNaN]. + 'u'Undefined result of division. + + This occurs and signals invalid-operation if division by zero was + attempted (during a divide-integer, divide, or remainder operation), and + the dividend is also zero. The result is [0,qNaN]. + 'b'Had to round, losing information. + + This occurs and signals inexact whenever the result of an operation is + not exact (that is, it needed to be rounded and any discarded digits + were non-zero), or if an overflow or underflow condition occurs. The + result in all cases is unchanged. + + The inexact signal may be tested (or trapped) to determine if a given + operation (or sequence of operations) was inexact. + 'u'Had to round, losing information. + + This occurs and signals inexact whenever the result of an operation is + not exact (that is, it needed to be rounded and any discarded digits + were non-zero), or if an overflow or underflow condition occurs. The + result in all cases is unchanged. + + The inexact signal may be tested (or trapped) to determine if a given + operation (or sequence of operations) was inexact. + 'b'Invalid context. Unknown rounding, for example. + + This occurs and signals invalid-operation if an invalid context was + detected during an operation. This can occur if contexts are not checked + on creation and either the precision exceeds the capability of the + underlying concrete representation or an unknown or unsupported rounding + was specified. These aspects of the context need only be checked when + the values are required to be used. The result is [0,qNaN]. + 'u'Invalid context. Unknown rounding, for example. + + This occurs and signals invalid-operation if an invalid context was + detected during an operation. This can occur if contexts are not checked + on creation and either the precision exceeds the capability of the + underlying concrete representation or an unknown or unsupported rounding + was specified. These aspects of the context need only be checked when + the values are required to be used. The result is [0,qNaN]. + 'b'Number got rounded (not necessarily changed during rounding). + + This occurs and signals rounded whenever the result of an operation is + rounded (that is, some zero or non-zero digits were discarded from the + coefficient), or if an overflow or underflow condition occurs. The + result in all cases is unchanged. + + The rounded signal may be tested (or trapped) to determine if a given + operation (or sequence of operations) caused a loss of precision. + 'u'Number got rounded (not necessarily changed during rounding). + + This occurs and signals rounded whenever the result of an operation is + rounded (that is, some zero or non-zero digits were discarded from the + coefficient), or if an overflow or underflow condition occurs. The + result in all cases is unchanged. + + The rounded signal may be tested (or trapped) to determine if a given + operation (or sequence of operations) caused a loss of precision. + 'b'Exponent < Emin before rounding. + + This occurs and signals subnormal whenever the result of a conversion or + operation is subnormal (that is, its adjusted exponent is less than + Emin, before any rounding). The result in all cases is unchanged. + + The subnormal signal may be tested (or trapped) to determine if a given + or operation (or sequence of operations) yielded a subnormal result. + 'u'Exponent < Emin before rounding. + + This occurs and signals subnormal whenever the result of a conversion or + operation is subnormal (that is, its adjusted exponent is less than + Emin, before any rounding). The result in all cases is unchanged. + + The subnormal signal may be tested (or trapped) to determine if a given + or operation (or sequence of operations) yielded a subnormal result. + 'b'Numerical overflow. + + This occurs and signals overflow if the adjusted exponent of a result + (from a conversion or from an operation that is not an attempt to divide + by zero), after rounding, would be greater than the largest value that + can be handled by the implementation (the value Emax). + + The result depends on the rounding mode: + + For round-half-up and round-half-even (and for round-half-down and + round-up, if implemented), the result of the operation is [sign,inf], + where sign is the sign of the intermediate result. For round-down, the + result is the largest finite number that can be represented in the + current precision, with the sign of the intermediate result. For + round-ceiling, the result is the same as for round-down if the sign of + the intermediate result is 1, or is [0,inf] otherwise. For round-floor, + the result is the same as for round-down if the sign of the intermediate + result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded + will also be raised. + 'u'Numerical overflow. + + This occurs and signals overflow if the adjusted exponent of a result + (from a conversion or from an operation that is not an attempt to divide + by zero), after rounding, would be greater than the largest value that + can be handled by the implementation (the value Emax). + + The result depends on the rounding mode: + + For round-half-up and round-half-even (and for round-half-down and + round-up, if implemented), the result of the operation is [sign,inf], + where sign is the sign of the intermediate result. For round-down, the + result is the largest finite number that can be represented in the + current precision, with the sign of the intermediate result. For + round-ceiling, the result is the same as for round-down if the sign of + the intermediate result is 1, or is [0,inf] otherwise. For round-floor, + the result is the same as for round-down if the sign of the intermediate + result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded + will also be raised. + 'b'Numerical underflow with result rounded to 0. + + This occurs and signals underflow if a result is inexact and the + adjusted exponent of the result would be smaller (more negative) than + the smallest value that can be handled by the implementation (the value + Emin). That is, the result is both inexact and subnormal. + + The result after an underflow will be a subnormal number rounded, if + necessary, so that its exponent is not less than Etiny. This may result + in 0 with the sign of the intermediate result and an exponent of Etiny. + + In all cases, Inexact, Rounded, and Subnormal will also be raised. + 'u'Numerical underflow with result rounded to 0. + + This occurs and signals underflow if a result is inexact and the + adjusted exponent of the result would be smaller (more negative) than + the smallest value that can be handled by the implementation (the value + Emin). That is, the result is both inexact and subnormal. + + The result after an underflow will be a subnormal number rounded, if + necessary, so that its exponent is not less than Etiny. This may result + in 0 with the sign of the intermediate result and an exponent of Etiny. + + In all cases, Inexact, Rounded, and Subnormal will also be raised. + 'b'Enable stricter semantics for mixing floats and Decimals. + + If the signal is not trapped (default), mixing floats and Decimals is + permitted in the Decimal() constructor, context.create_decimal() and + all comparison operators. Both conversion and comparisons are exact. + Any occurrence of a mixed operation is silently recorded by setting + FloatOperation in the context flags. Explicit conversions with + Decimal.from_float() or context.create_decimal_from_float() do not + set the flag. + + Otherwise (the signal is trapped), only equality comparisons and explicit + conversions are silent. All other mixed operations raise FloatOperation. + 'u'Enable stricter semantics for mixing floats and Decimals. + + If the signal is not trapped (default), mixing floats and Decimals is + permitted in the Decimal() constructor, context.create_decimal() and + all comparison operators. Both conversion and comparisons are exact. + Any occurrence of a mixed operation is silently recorded by setting + FloatOperation in the context flags. Explicit conversions with + Decimal.from_float() or context.create_decimal_from_float() do not + set the flag. + + Otherwise (the signal is trapped), only equality comparisons and explicit + conversions are silent. All other mixed operations raise FloatOperation. + 'b'decimal_context'u'decimal_context'b'Returns this thread's context. + + If this thread does not yet have a context, returns + a new context and sets this thread's context. + New contexts are copies of DefaultContext. + 'u'Returns this thread's context. + + If this thread does not yet have a context, returns + a new context and sets this thread's context. + New contexts are copies of DefaultContext. + 'b'Set this thread's context to context.'u'Set this thread's context to context.'b'Return a context manager for a copy of the supplied context + + Uses a copy of the current context if no context is specified + The returned context manager creates a local decimal context + in a with statement: + def sin(x): + with localcontext() as ctx: + ctx.prec += 2 + # Rest of sin calculation algorithm + # uses a precision 2 greater than normal + return +s # Convert result to normal precision + + def sin(x): + with localcontext(ExtendedContext): + # Rest of sin calculation algorithm + # uses the Extended Context from the + # General Decimal Arithmetic Specification + return +s # Convert result to normal context + + >>> setcontext(DefaultContext) + >>> print(getcontext().prec) + 28 + >>> with localcontext(): + ... ctx = getcontext() + ... ctx.prec += 2 + ... print(ctx.prec) + ... + 30 + >>> with localcontext(ExtendedContext): + ... print(getcontext().prec) + ... + 9 + >>> print(getcontext().prec) + 28 + 'u'Return a context manager for a copy of the supplied context + + Uses a copy of the current context if no context is specified + The returned context manager creates a local decimal context + in a with statement: + def sin(x): + with localcontext() as ctx: + ctx.prec += 2 + # Rest of sin calculation algorithm + # uses a precision 2 greater than normal + return +s # Convert result to normal precision + + def sin(x): + with localcontext(ExtendedContext): + # Rest of sin calculation algorithm + # uses the Extended Context from the + # General Decimal Arithmetic Specification + return +s # Convert result to normal context + + >>> setcontext(DefaultContext) + >>> print(getcontext().prec) + 28 + >>> with localcontext(): + ... ctx = getcontext() + ... ctx.prec += 2 + ... print(ctx.prec) + ... + 30 + >>> with localcontext(ExtendedContext): + ... print(getcontext().prec) + ... + 9 + >>> print(getcontext().prec) + 28 + 'b'Floating point class for decimal arithmetic.'u'Floating point class for decimal arithmetic.'b'_exp'u'_exp'b'_int'u'_int'b'_sign'u'_sign'b'_is_special'u'_is_special'b'Create a decimal point instance. + + >>> Decimal('3.14') # string input + Decimal('3.14') + >>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent) + Decimal('3.14') + >>> Decimal(314) # int + Decimal('314') + >>> Decimal(Decimal(314)) # another decimal instance + Decimal('314') + >>> Decimal(' 3.14 \n') # leading and trailing whitespace okay + Decimal('3.14') + 'u'Create a decimal point instance. + + >>> Decimal('3.14') # string input + Decimal('3.14') + >>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent) + Decimal('3.14') + >>> Decimal(314) # int + Decimal('314') + >>> Decimal(Decimal(314)) # another decimal instance + Decimal('314') + >>> Decimal(' 3.14 \n') # leading and trailing whitespace okay + Decimal('3.14') + 'b'Invalid literal for Decimal: %r'u'Invalid literal for Decimal: %r'b'sign'b'frac'u'frac'b'exp'u'exp'b'diag'u'diag'b'signal'u'signal'b'N'u'N'b'F'u'F'b'Invalid tuple size in creation of Decimal from list or tuple. The list or tuple should have exactly three elements.'u'Invalid tuple size in creation of Decimal from list or tuple. The list or tuple should have exactly three elements.'b'Invalid sign. The first value in the tuple should be an integer; either 0 for a positive number or 1 for a negative number.'u'Invalid sign. The first value in the tuple should be an integer; either 0 for a positive number or 1 for a negative number.'b'The second value in the tuple must be composed of integers in the range 0 through 9.'u'The second value in the tuple must be composed of integers in the range 0 through 9.'b'The third value in the tuple must be an integer, or one of the strings 'F', 'n', 'N'.'u'The third value in the tuple must be an integer, or one of the strings 'F', 'n', 'N'.'b'strict semantics for mixing floats and Decimals are enabled'u'strict semantics for mixing floats and Decimals are enabled'b'Cannot convert %r to Decimal'u'Cannot convert %r to Decimal'b'Converts a float to a decimal number, exactly. + + Note that Decimal.from_float(0.1) is not the same as Decimal('0.1'). + Since 0.1 is not exactly representable in binary floating point, the + value is stored as the nearest representable value which is + 0x1.999999999999ap-4. The exact equivalent of the value in decimal + is 0.1000000000000000055511151231257827021181583404541015625. + + >>> Decimal.from_float(0.1) + Decimal('0.1000000000000000055511151231257827021181583404541015625') + >>> Decimal.from_float(float('nan')) + Decimal('NaN') + >>> Decimal.from_float(float('inf')) + Decimal('Infinity') + >>> Decimal.from_float(-float('inf')) + Decimal('-Infinity') + >>> Decimal.from_float(-0.0) + Decimal('-0') + + 'u'Converts a float to a decimal number, exactly. + + Note that Decimal.from_float(0.1) is not the same as Decimal('0.1'). + Since 0.1 is not exactly representable in binary floating point, the + value is stored as the nearest representable value which is + 0x1.999999999999ap-4. The exact equivalent of the value in decimal + is 0.1000000000000000055511151231257827021181583404541015625. + + >>> Decimal.from_float(0.1) + Decimal('0.1000000000000000055511151231257827021181583404541015625') + >>> Decimal.from_float(float('nan')) + Decimal('NaN') + >>> Decimal.from_float(float('inf')) + Decimal('Infinity') + >>> Decimal.from_float(-float('inf')) + Decimal('-Infinity') + >>> Decimal.from_float(-0.0) + Decimal('-0') + + 'b'argument must be int or float.'u'argument must be int or float.'b'Returns whether the number is not actually one. + + 0 if a number + 1 if NaN + 2 if sNaN + 'u'Returns whether the number is not actually one. + + 0 if a number + 1 if NaN + 2 if sNaN + 'b'Returns whether the number is infinite + + 0 if finite or not a number + 1 if +INF + -1 if -INF + 'u'Returns whether the number is infinite + + 0 if finite or not a number + 1 if +INF + -1 if -INF + 'b'Returns whether the number is not actually one. + + if self, other are sNaN, signal + if self, other are NaN return nan + return 0 + + Done before operations. + 'u'Returns whether the number is not actually one. + + if self, other are sNaN, signal + if self, other are NaN return nan + return 0 + + Done before operations. + 'b'sNaN'u'sNaN'b'Version of _check_nans used for the signaling comparisons + compare_signal, __le__, __lt__, __ge__, __gt__. + + Signal InvalidOperation if either self or other is a (quiet + or signaling) NaN. Signaling NaNs take precedence over quiet + NaNs. + + Return 0 if neither operand is a NaN. + + 'u'Version of _check_nans used for the signaling comparisons + compare_signal, __le__, __lt__, __ge__, __gt__. + + Signal InvalidOperation if either self or other is a (quiet + or signaling) NaN. Signaling NaNs take precedence over quiet + NaNs. + + Return 0 if neither operand is a NaN. + + 'b'comparison involving sNaN'u'comparison involving sNaN'b'comparison involving NaN'u'comparison involving NaN'b'Return True if self is nonzero; otherwise return False. + + NaNs and infinities are considered nonzero. + 'u'Return True if self is nonzero; otherwise return False. + + NaNs and infinities are considered nonzero. + 'b'Compare the two non-NaN decimal instances self and other. + + Returns -1 if self < other, 0 if self == other and 1 + if self > other. This routine is for internal use only.'u'Compare the two non-NaN decimal instances self and other. + + Returns -1 if self < other, 0 if self == other and 1 + if self > other. This routine is for internal use only.'b'Compare self to other. Return a decimal value: + + a or b is a NaN ==> Decimal('NaN') + a < b ==> Decimal('-1') + a == b ==> Decimal('0') + a > b ==> Decimal('1') + 'u'Compare self to other. Return a decimal value: + + a or b is a NaN ==> Decimal('NaN') + a < b ==> Decimal('-1') + a == b ==> Decimal('0') + a > b ==> Decimal('1') + 'b'x.__hash__() <==> hash(x)'u'x.__hash__() <==> hash(x)'b'Cannot hash a signaling NaN value.'u'Cannot hash a signaling NaN value.'b'Represents the number as a triple tuple. + + To show the internals exactly as they are. + 'u'Represents the number as a triple tuple. + + To show the internals exactly as they are. + 'b'Express a finite Decimal instance in the form n / d. + + Returns a pair (n, d) of integers. When called on an infinity + or NaN, raises OverflowError or ValueError respectively. + + >>> Decimal('3.14').as_integer_ratio() + (157, 50) + >>> Decimal('-123e5').as_integer_ratio() + (-12300000, 1) + >>> Decimal('0.00').as_integer_ratio() + (0, 1) + + 'u'Express a finite Decimal instance in the form n / d. + + Returns a pair (n, d) of integers. When called on an infinity + or NaN, raises OverflowError or ValueError respectively. + + >>> Decimal('3.14').as_integer_ratio() + (157, 50) + >>> Decimal('-123e5').as_integer_ratio() + (-12300000, 1) + >>> Decimal('0.00').as_integer_ratio() + (0, 1) + + 'b'cannot convert NaN to integer ratio'u'cannot convert NaN to integer ratio'b'cannot convert Infinity to integer ratio'u'cannot convert Infinity to integer ratio'b'Represents the number as an instance of Decimal.'u'Represents the number as an instance of Decimal.'b'Decimal('%s')'u'Decimal('%s')'b'Return string representation of the number in scientific notation. + + Captures all of the information in the underlying representation. + 'u'Return string representation of the number in scientific notation. + + Captures all of the information in the underlying representation. + 'b'Infinity'u'Infinity'b'NaN'u'NaN'b'e'u'e'b'E'u'E'b'%+d'u'%+d'b'Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. + 'u'Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. + 'b'Returns a copy with the sign switched. + + Rounds, if it has reason. + 'u'Returns a copy with the sign switched. + + Rounds, if it has reason. + 'b'Returns a copy, unless it is a sNaN. + + Rounds the number (if more than precision digits) + 'u'Returns a copy, unless it is a sNaN. + + Rounds the number (if more than precision digits) + 'b'Returns the absolute value of self. + + If the keyword argument 'round' is false, do not round. The + expression self.__abs__(round=False) is equivalent to + self.copy_abs(). + 'u'Returns the absolute value of self. + + If the keyword argument 'round' is false, do not round. The + expression self.__abs__(round=False) is equivalent to + self.copy_abs(). + 'b'Returns self + other. + + -INF + INF (or the reverse) cause InvalidOperation errors. + 'u'Returns self + other. + + -INF + INF (or the reverse) cause InvalidOperation errors. + 'b'-INF + INF'u'-INF + INF'b'Return self - other'u'Return self - other'b'Return other - self'u'Return other - self'b'Return self * other. + + (+-) INF * 0 (or its reverse) raise InvalidOperation. + 'u'Return self * other. + + (+-) INF * 0 (or its reverse) raise InvalidOperation. + 'b'(+-)INF * 0'u'(+-)INF * 0'b'0 * (+-)INF'u'0 * (+-)INF'b'Return self / other.'u'Return self / other.'b'(+-)INF/(+-)INF'u'(+-)INF/(+-)INF'b'Division by infinity'u'Division by infinity'b'0 / 0'u'0 / 0'b'x / 0'u'x / 0'b'Return (self // other, self % other), to context.prec precision. + + Assumes that neither self nor other is a NaN, that self is not + infinite and that other is nonzero. + 'u'Return (self // other, self % other), to context.prec precision. + + Assumes that neither self nor other is a NaN, that self is not + infinite and that other is nonzero. + 'b'quotient too large in //, % or divmod'u'quotient too large in //, % or divmod'b'Swaps self/other and returns __truediv__.'u'Swaps self/other and returns __truediv__.'b' + Return (self // other, self % other) + 'u' + Return (self // other, self % other) + 'b'divmod(INF, INF)'u'divmod(INF, INF)'b'INF % x'u'INF % x'b'divmod(0, 0)'u'divmod(0, 0)'b'x // 0'u'x // 0'b'x % 0'u'x % 0'b'Swaps self/other and returns __divmod__.'u'Swaps self/other and returns __divmod__.'b' + self % other + 'u' + self % other + 'b'0 % 0'u'0 % 0'b'Swaps self/other and returns __mod__.'u'Swaps self/other and returns __mod__.'b' + Remainder nearest to 0- abs(remainder-near) <= other/2 + 'u' + Remainder nearest to 0- abs(remainder-near) <= other/2 + 'b'remainder_near(infinity, x)'u'remainder_near(infinity, x)'b'remainder_near(x, 0)'u'remainder_near(x, 0)'b'remainder_near(0, 0)'u'remainder_near(0, 0)'b'self // other'u'self // other'b'INF // INF'u'INF // INF'b'0 // 0'u'0 // 0'b'Swaps self/other and returns __floordiv__.'u'Swaps self/other and returns __floordiv__.'b'Float representation.'u'Float representation.'b'Cannot convert signaling NaN to float'u'Cannot convert signaling NaN to float'b'-nan'u'-nan'b'nan'u'nan'b'Converts self to an int, truncating if necessary.'u'Converts self to an int, truncating if necessary.'b'Cannot convert NaN to integer'u'Cannot convert NaN to integer'b'Cannot convert infinity to integer'u'Cannot convert infinity to integer'b'Decapitate the payload of a NaN to fit the context'u'Decapitate the payload of a NaN to fit the context'b'Round if it is necessary to keep self within prec precision. + + Rounds and fixes the exponent. Does not raise on a sNaN. + + Arguments: + self - Decimal instance + context - context used. + 'u'Round if it is necessary to keep self within prec precision. + + Rounds and fixes the exponent. Does not raise on a sNaN. + + Arguments: + self - Decimal instance + context - context used. + 'b'above Emax'u'above Emax'b'Also known as round-towards-0, truncate.'u'Also known as round-towards-0, truncate.'b'Rounds away from 0.'u'Rounds away from 0.'b'Rounds 5 up (away from 0)'u'Rounds 5 up (away from 0)'b'56789'u'56789'b'Round 5 down'u'Round 5 down'b'Round 5 to even, rest to nearest.'u'Round 5 to even, rest to nearest.'b'02468'u'02468'b'Rounds up (not away from 0 if negative.)'u'Rounds up (not away from 0 if negative.)'b'Rounds down (not towards 0 if negative)'u'Rounds down (not towards 0 if negative)'b'Round down unless digit prec-1 is 0 or 5.'u'Round down unless digit prec-1 is 0 or 5.'b'05'u'05'b'Round self to the nearest integer, or to a given precision. + + If only one argument is supplied, round a finite Decimal + instance self to the nearest integer. If self is infinite or + a NaN then a Python exception is raised. If self is finite + and lies exactly halfway between two integers then it is + rounded to the integer with even last digit. + + >>> round(Decimal('123.456')) + 123 + >>> round(Decimal('-456.789')) + -457 + >>> round(Decimal('-3.0')) + -3 + >>> round(Decimal('2.5')) + 2 + >>> round(Decimal('3.5')) + 4 + >>> round(Decimal('Inf')) + Traceback (most recent call last): + ... + OverflowError: cannot round an infinity + >>> round(Decimal('NaN')) + Traceback (most recent call last): + ... + ValueError: cannot round a NaN + + If a second argument n is supplied, self is rounded to n + decimal places using the rounding mode for the current + context. + + For an integer n, round(self, -n) is exactly equivalent to + self.quantize(Decimal('1En')). + + >>> round(Decimal('123.456'), 0) + Decimal('123') + >>> round(Decimal('123.456'), 2) + Decimal('123.46') + >>> round(Decimal('123.456'), -2) + Decimal('1E+2') + >>> round(Decimal('-Infinity'), 37) + Decimal('NaN') + >>> round(Decimal('sNaN123'), 0) + Decimal('NaN123') + + 'u'Round self to the nearest integer, or to a given precision. + + If only one argument is supplied, round a finite Decimal + instance self to the nearest integer. If self is infinite or + a NaN then a Python exception is raised. If self is finite + and lies exactly halfway between two integers then it is + rounded to the integer with even last digit. + + >>> round(Decimal('123.456')) + 123 + >>> round(Decimal('-456.789')) + -457 + >>> round(Decimal('-3.0')) + -3 + >>> round(Decimal('2.5')) + 2 + >>> round(Decimal('3.5')) + 4 + >>> round(Decimal('Inf')) + Traceback (most recent call last): + ... + OverflowError: cannot round an infinity + >>> round(Decimal('NaN')) + Traceback (most recent call last): + ... + ValueError: cannot round a NaN + + If a second argument n is supplied, self is rounded to n + decimal places using the rounding mode for the current + context. + + For an integer n, round(self, -n) is exactly equivalent to + self.quantize(Decimal('1En')). + + >>> round(Decimal('123.456'), 0) + Decimal('123') + >>> round(Decimal('123.456'), 2) + Decimal('123.46') + >>> round(Decimal('123.456'), -2) + Decimal('1E+2') + >>> round(Decimal('-Infinity'), 37) + Decimal('NaN') + >>> round(Decimal('sNaN123'), 0) + Decimal('NaN123') + + 'b'Second argument to round should be integral'u'Second argument to round should be integral'b'cannot round a NaN'u'cannot round a NaN'b'cannot round an infinity'u'cannot round an infinity'b'Return the floor of self, as an integer. + + For a finite Decimal instance self, return the greatest + integer n such that n <= self. If self is infinite or a NaN + then a Python exception is raised. + + 'u'Return the floor of self, as an integer. + + For a finite Decimal instance self, return the greatest + integer n such that n <= self. If self is infinite or a NaN + then a Python exception is raised. + + 'b'Return the ceiling of self, as an integer. + + For a finite Decimal instance self, return the least integer n + such that n >= self. If self is infinite or a NaN then a + Python exception is raised. + + 'u'Return the ceiling of self, as an integer. + + For a finite Decimal instance self, return the least integer n + such that n >= self. If self is infinite or a NaN then a + Python exception is raised. + + 'b'Fused multiply-add. + + Returns self*other+third with no rounding of the intermediate + product self*other. + + self and other are multiplied together, with no rounding of + the result. The third operand is then added to the result, + and a single final rounding is performed. + 'u'Fused multiply-add. + + Returns self*other+third with no rounding of the intermediate + product self*other. + + self and other are multiplied together, with no rounding of + the result. The third operand is then added to the result, + and a single final rounding is performed. + 'b'INF * 0 in fma'u'INF * 0 in fma'b'0 * INF in fma'u'0 * INF in fma'b'Three argument version of __pow__'u'Three argument version of __pow__'b'pow() 3rd argument not allowed unless all arguments are integers'u'pow() 3rd argument not allowed unless all arguments are integers'b'pow() 2nd argument cannot be negative when 3rd argument specified'u'pow() 2nd argument cannot be negative when 3rd argument specified'b'pow() 3rd argument cannot be 0'u'pow() 3rd argument cannot be 0'b'insufficient precision: pow() 3rd argument must not have more than precision digits'u'insufficient precision: pow() 3rd argument must not have more than precision digits'b'at least one of pow() 1st argument and 2nd argument must be nonzero; 0**0 is not defined'u'at least one of pow() 1st argument and 2nd argument must be nonzero; 0**0 is not defined'b'Attempt to compute self**other exactly. + + Given Decimals self and other and an integer p, attempt to + compute an exact result for the power self**other, with p + digits of precision. Return None if self**other is not + exactly representable in p digits. + + Assumes that elimination of special cases has already been + performed: self and other must both be nonspecial; self must + be positive and not numerically equal to 1; other must be + nonzero. For efficiency, other._exp should not be too large, + so that 10**abs(other._exp) is a feasible calculation.'u'Attempt to compute self**other exactly. + + Given Decimals self and other and an integer p, attempt to + compute an exact result for the power self**other, with p + digits of precision. Return None if self**other is not + exactly representable in p digits. + + Assumes that elimination of special cases has already been + performed: self and other must both be nonspecial; self must + be positive and not numerically equal to 1; other must be + nonzero. For efficiency, other._exp should not be too large, + so that 10**abs(other._exp) is a feasible calculation.'b'Return self ** other [ % modulo]. + + With two arguments, compute self**other. + + With three arguments, compute (self**other) % modulo. For the + three argument form, the following restrictions on the + arguments hold: + + - all three arguments must be integral + - other must be nonnegative + - either self or other (or both) must be nonzero + - modulo must be nonzero and must have at most p digits, + where p is the context precision. + + If any of these restrictions is violated the InvalidOperation + flag is raised. + + The result of pow(self, other, modulo) is identical to the + result that would be obtained by computing (self**other) % + modulo with unbounded precision, but is computed more + efficiently. It is always exact. + 'u'Return self ** other [ % modulo]. + + With two arguments, compute self**other. + + With three arguments, compute (self**other) % modulo. For the + three argument form, the following restrictions on the + arguments hold: + + - all three arguments must be integral + - other must be nonnegative + - either self or other (or both) must be nonzero + - modulo must be nonzero and must have at most p digits, + where p is the context precision. + + If any of these restrictions is violated the InvalidOperation + flag is raised. + + The result of pow(self, other, modulo) is identical to the + result that would be obtained by computing (self**other) % + modulo with unbounded precision, but is computed more + efficiently. It is always exact. + 'b'0 ** 0'u'0 ** 0'b'x ** y with x negative and y not an integer'u'x ** y with x negative and y not an integer'b'Swaps self/other and returns __pow__.'u'Swaps self/other and returns __pow__.'b'Normalize- strip trailing 0s, change anything equal to 0 to 0e0'u'Normalize- strip trailing 0s, change anything equal to 0 to 0e0'b'Quantize self so its exponent is the same as that of exp. + + Similar to self._rescale(exp._exp) but with error checking. + 'u'Quantize self so its exponent is the same as that of exp. + + Similar to self._rescale(exp._exp) but with error checking. + 'b'quantize with one INF'u'quantize with one INF'b'target exponent out of bounds in quantize'u'target exponent out of bounds in quantize'b'exponent of quantize result too large for current context'u'exponent of quantize result too large for current context'b'quantize result has too many digits for current context'u'quantize result has too many digits for current context'b'Return True if self and other have the same exponent; otherwise + return False. + + If either operand is a special value, the following rules are used: + * return True if both operands are infinities + * return True if both operands are NaNs + * otherwise, return False. + 'u'Return True if self and other have the same exponent; otherwise + return False. + + If either operand is a special value, the following rules are used: + * return True if both operands are infinities + * return True if both operands are NaNs + * otherwise, return False. + 'b'Rescale self so that the exponent is exp, either by padding with zeros + or by truncating digits, using the given rounding mode. + + Specials are returned without change. This operation is + quiet: it raises no flags, and uses no information from the + context. + + exp = exp to scale to (an integer) + rounding = rounding mode + 'u'Rescale self so that the exponent is exp, either by padding with zeros + or by truncating digits, using the given rounding mode. + + Specials are returned without change. This operation is + quiet: it raises no flags, and uses no information from the + context. + + exp = exp to scale to (an integer) + rounding = rounding mode + 'b'Round a nonzero, nonspecial Decimal to a fixed number of + significant figures, using the given rounding mode. + + Infinities, NaNs and zeros are returned unaltered. + + This operation is quiet: it raises no flags, and uses no + information from the context. + + 'u'Round a nonzero, nonspecial Decimal to a fixed number of + significant figures, using the given rounding mode. + + Infinities, NaNs and zeros are returned unaltered. + + This operation is quiet: it raises no flags, and uses no + information from the context. + + 'b'argument should be at least 1 in _round'u'argument should be at least 1 in _round'b'Rounds to a nearby integer. + + If no rounding mode is specified, take the rounding mode from + the context. This method raises the Rounded and Inexact flags + when appropriate. + + See also: to_integral_value, which does exactly the same as + this method except that it doesn't raise Inexact or Rounded. + 'u'Rounds to a nearby integer. + + If no rounding mode is specified, take the rounding mode from + the context. This method raises the Rounded and Inexact flags + when appropriate. + + See also: to_integral_value, which does exactly the same as + this method except that it doesn't raise Inexact or Rounded. + 'b'Rounds to the nearest integer, without raising inexact, rounded.'u'Rounds to the nearest integer, without raising inexact, rounded.'b'Return the square root of self.'u'Return the square root of self.'b'sqrt(-x), x > 0'u'sqrt(-x), x > 0'b'Returns the larger value. + + Like max(self, other) except if one is not a number, returns + NaN (and signals if one is sNaN). Also rounds. + 'u'Returns the larger value. + + Like max(self, other) except if one is not a number, returns + NaN (and signals if one is sNaN). Also rounds. + 'b'Returns the smaller value. + + Like min(self, other) except if one is not a number, returns + NaN (and signals if one is sNaN). Also rounds. + 'u'Returns the smaller value. + + Like min(self, other) except if one is not a number, returns + NaN (and signals if one is sNaN). Also rounds. + 'b'Returns whether self is an integer'u'Returns whether self is an integer'b'Returns True if self is even. Assumes self is an integer.'u'Returns True if self is even. Assumes self is an integer.'b'Return the adjusted exponent of self'u'Return the adjusted exponent of self'b'Returns the same Decimal object. + + As we do not have different encodings for the same number, the + received object already is in its canonical form. + 'u'Returns the same Decimal object. + + As we do not have different encodings for the same number, the + received object already is in its canonical form. + 'b'Compares self to the other operand numerically. + + It's pretty much like compare(), but all NaNs signal, with signaling + NaNs taking precedence over quiet NaNs. + 'u'Compares self to the other operand numerically. + + It's pretty much like compare(), but all NaNs signal, with signaling + NaNs taking precedence over quiet NaNs. + 'b'Compares self to other using the abstract representations. + + This is not like the standard compare, which use their numerical + value. Note that a total ordering is defined for all possible abstract + representations. + 'u'Compares self to other using the abstract representations. + + This is not like the standard compare, which use their numerical + value. Note that a total ordering is defined for all possible abstract + representations. + 'b'Compares self to other using abstract repr., ignoring sign. + + Like compare_total, but with operand's sign ignored and assumed to be 0. + 'u'Compares self to other using abstract repr., ignoring sign. + + Like compare_total, but with operand's sign ignored and assumed to be 0. + 'b'Returns a copy with the sign set to 0. 'u'Returns a copy with the sign set to 0. 'b'Returns a copy with the sign inverted.'u'Returns a copy with the sign inverted.'b'Returns self with the sign of other.'u'Returns self with the sign of other.'b'Returns e ** self.'u'Returns e ** self.'b'Return True if self is canonical; otherwise return False. + + Currently, the encoding of a Decimal instance is always + canonical, so this method returns True for any Decimal. + 'u'Return True if self is canonical; otherwise return False. + + Currently, the encoding of a Decimal instance is always + canonical, so this method returns True for any Decimal. + 'b'Return True if self is finite; otherwise return False. + + A Decimal instance is considered finite if it is neither + infinite nor a NaN. + 'u'Return True if self is finite; otherwise return False. + + A Decimal instance is considered finite if it is neither + infinite nor a NaN. + 'b'Return True if self is infinite; otherwise return False.'u'Return True if self is infinite; otherwise return False.'b'Return True if self is a qNaN or sNaN; otherwise return False.'u'Return True if self is a qNaN or sNaN; otherwise return False.'b'Return True if self is a normal number; otherwise return False.'u'Return True if self is a normal number; otherwise return False.'b'Return True if self is a quiet NaN; otherwise return False.'u'Return True if self is a quiet NaN; otherwise return False.'b'Return True if self is negative; otherwise return False.'u'Return True if self is negative; otherwise return False.'b'Return True if self is a signaling NaN; otherwise return False.'u'Return True if self is a signaling NaN; otherwise return False.'b'Return True if self is subnormal; otherwise return False.'u'Return True if self is subnormal; otherwise return False.'b'Return True if self is a zero; otherwise return False.'u'Return True if self is a zero; otherwise return False.'b'Compute a lower bound for the adjusted exponent of self.ln(). + In other words, compute r such that self.ln() >= 10**r. Assumes + that self is finite and positive and that self != 1. + 'u'Compute a lower bound for the adjusted exponent of self.ln(). + In other words, compute r such that self.ln() >= 10**r. Assumes + that self is finite and positive and that self != 1. + 'b'Returns the natural (base e) logarithm of self.'u'Returns the natural (base e) logarithm of self.'b'ln of a negative value'u'ln of a negative value'b'Compute a lower bound for the adjusted exponent of self.log10(). + In other words, find r such that self.log10() >= 10**r. + Assumes that self is finite and positive and that self != 1. + 'u'Compute a lower bound for the adjusted exponent of self.log10(). + In other words, find r such that self.log10() >= 10**r. + Assumes that self is finite and positive and that self != 1. + 'b'231'u'231'b'Returns the base 10 logarithm of self.'u'Returns the base 10 logarithm of self.'b'log10 of a negative value'u'log10 of a negative value'b' Returns the exponent of the magnitude of self's MSD. + + The result is the integer which is the exponent of the magnitude + of the most significant digit of self (as though it were truncated + to a single digit while maintaining the value of that digit and + without limiting the resulting exponent). + 'u' Returns the exponent of the magnitude of self's MSD. + + The result is the integer which is the exponent of the magnitude + of the most significant digit of self (as though it were truncated + to a single digit while maintaining the value of that digit and + without limiting the resulting exponent). + 'b'logb(0)'u'logb(0)'b'Return True if self is a logical operand. + + For being logical, it must be a finite number with a sign of 0, + an exponent of 0, and a coefficient whose digits must all be + either 0 or 1. + 'u'Return True if self is a logical operand. + + For being logical, it must be a finite number with a sign of 0, + an exponent of 0, and a coefficient whose digits must all be + either 0 or 1. + 'b'01'u'01'b'Applies an 'and' operation between self and other's digits.'u'Applies an 'and' operation between self and other's digits.'b'Invert all its digits.'u'Invert all its digits.'b'Applies an 'or' operation between self and other's digits.'u'Applies an 'or' operation between self and other's digits.'b'Applies an 'xor' operation between self and other's digits.'u'Applies an 'xor' operation between self and other's digits.'b'Compares the values numerically with their sign ignored.'u'Compares the values numerically with their sign ignored.'b'Returns the largest representable number smaller than itself.'u'Returns the largest representable number smaller than itself.'b'Returns the smallest representable number larger than itself.'u'Returns the smallest representable number larger than itself.'b'Returns the number closest to self, in the direction towards other. + + The result is the closest representable number to self + (excluding self) that is in the direction towards other, + unless both have the same value. If the two operands are + numerically equal, then the result is a copy of self with the + sign set to be the same as the sign of other. + 'u'Returns the number closest to self, in the direction towards other. + + The result is the closest representable number to self + (excluding self) that is in the direction towards other, + unless both have the same value. If the two operands are + numerically equal, then the result is a copy of self with the + sign set to be the same as the sign of other. + 'b'Infinite result from next_toward'u'Infinite result from next_toward'b'Returns an indication of the class of self. + + The class is one of the following strings: + sNaN + NaN + -Infinity + -Normal + -Subnormal + -Zero + +Zero + +Subnormal + +Normal + +Infinity + 'u'Returns an indication of the class of self. + + The class is one of the following strings: + sNaN + NaN + -Infinity + -Normal + -Subnormal + -Zero + +Zero + +Subnormal + +Normal + +Infinity + 'b'+Infinity'u'+Infinity'b'-Infinity'u'-Infinity'b'-Zero'u'-Zero'b'+Zero'u'+Zero'b'-Subnormal'u'-Subnormal'b'+Subnormal'u'+Subnormal'b'-Normal'u'-Normal'b'+Normal'u'+Normal'b'Just returns 10, as this is Decimal, :)'u'Just returns 10, as this is Decimal, :)'b'Returns a rotated copy of self, value-of-other times.'u'Returns a rotated copy of self, value-of-other times.'b'Returns self operand after adding the second value to its exp.'u'Returns self operand after adding the second value to its exp.'b'Returns a shifted copy of self, value-of-other times.'u'Returns a shifted copy of self, value-of-other times.'b'Format a Decimal instance according to the given specifier. + + The specifier should be a standard format specifier, with the + form described in PEP 3101. Formatting types 'e', 'E', 'f', + 'F', 'g', 'G', 'n' and '%' are supported. If the formatting + type is omitted it defaults to 'g' or 'G', depending on the + value of context.capitals. + 'u'Format a Decimal instance according to the given specifier. + + The specifier should be a standard format specifier, with the + form described in PEP 3101. Formatting types 'e', 'E', 'f', + 'F', 'g', 'G', 'n' and '%' are supported. If the formatting + type is omitted it defaults to 'g' or 'G', depending on the + value of context.capitals. + 'b'G'u'G'b'precision'u'precision'b'eE'u'eE'b'fF%'u'fF%'b'gG'u'gG'b'Create a decimal instance directly, without any validation, + normalization (e.g. removal of leading zeros) or argument + conversion. + + This function is for *internal use only*. + 'u'Create a decimal instance directly, without any validation, + normalization (e.g. removal of leading zeros) or argument + conversion. + + This function is for *internal use only*. + 'b'Context manager class to support localcontext(). + + Sets a copy of the supplied context in __enter__() and restores + the previous decimal context in __exit__() + 'u'Context manager class to support localcontext(). + + Sets a copy of the supplied context in __enter__() and restores + the previous decimal context in __exit__() + 'b'Contains the context for a Decimal instance. + + Contains: + prec - precision (for use in rounding, division, square roots..) + rounding - rounding type (how you round) + traps - If traps[exception] = 1, then the exception is + raised when it is caused. Otherwise, a value is + substituted in. + flags - When an exception is caused, flags[exception] is set. + (Whether or not the trap_enabler is set) + Should be reset by user of Decimal instance. + Emin - Minimum exponent + Emax - Maximum exponent + capitals - If 1, 1*10^1 is printed as 1E+1. + If 0, printed as 1e1 + clamp - If 1, change exponents if too high (Default 0) + 'u'Contains the context for a Decimal instance. + + Contains: + prec - precision (for use in rounding, division, square roots..) + rounding - rounding type (how you round) + traps - If traps[exception] = 1, then the exception is + raised when it is caused. Otherwise, a value is + substituted in. + flags - When an exception is caused, flags[exception] is set. + (Whether or not the trap_enabler is set) + Should be reset by user of Decimal instance. + Emin - Minimum exponent + Emax - Maximum exponent + capitals - If 1, 1*10^1 is printed as 1E+1. + If 0, printed as 1e1 + clamp - If 1, change exponents if too high (Default 0) + 'b'%s must be an integer'u'%s must be an integer'b'-inf'u'-inf'b'%s must be in [%s, %d]. got: %s'u'%s must be in [%s, %d]. got: %s'b'inf'u'inf'b'%s must be in [%d, %s]. got: %s'u'%s must be in [%d, %s]. got: %s'b'%s must be in [%d, %d]. got %s'u'%s must be in [%d, %d]. got %s'b'%s must be a signal dict'u'%s must be a signal dict'b'%s is not a valid signal dict'u'%s is not a valid signal dict'b'prec'u'prec'b'Emin'u'Emin'b'Emax'u'Emax'b'capitals'u'capitals'b'clamp'u'clamp'b'rounding'u'rounding'b'%s: invalid rounding mode'u'%s: invalid rounding mode'b'flags'u'flags'b'traps'u'traps'b'_ignored_flags'u'_ignored_flags'b''decimal.Context' object has no attribute '%s''u''decimal.Context' object has no attribute '%s''b'%s cannot be deleted'u'%s cannot be deleted'b'Show the current context.'u'Show the current context.'b'Context(prec=%(prec)d, rounding=%(rounding)s, Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, clamp=%(clamp)d'u'Context(prec=%(prec)d, rounding=%(rounding)s, Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, clamp=%(clamp)d'b'flags=['u'flags=['b'traps=['u'traps=['b'Reset all flags to zero'u'Reset all flags to zero'b'Reset all traps to zero'u'Reset all traps to zero'b'Returns a shallow copy from self.'u'Returns a shallow copy from self.'b'Returns a deep copy from self.'u'Returns a deep copy from self.'b'Handles an error + + If the flag is in _ignored_flags, returns the default response. + Otherwise, it sets the flag, then, if the corresponding + trap_enabler is set, it reraises the exception. Otherwise, it returns + the default value after setting the flag. + 'u'Handles an error + + If the flag is in _ignored_flags, returns the default response. + Otherwise, it sets the flag, then, if the corresponding + trap_enabler is set, it reraises the exception. Otherwise, it returns + the default value after setting the flag. + 'b'Ignore all flags, if they are raised'u'Ignore all flags, if they are raised'b'Ignore the flags, if they are raised'u'Ignore the flags, if they are raised'b'Stop ignoring the flags, if they are raised'u'Stop ignoring the flags, if they are raised'b'Returns Etiny (= Emin - prec + 1)'u'Returns Etiny (= Emin - prec + 1)'b'Returns maximum exponent (= Emax - prec + 1)'u'Returns maximum exponent (= Emax - prec + 1)'b'Sets the rounding type. + + Sets the rounding type, and returns the current (previous) + rounding type. Often used like: + + context = context.copy() + # so you don't change the calling context + # if an error occurs in the middle. + rounding = context._set_rounding(ROUND_UP) + val = self.__sub__(other, context=context) + context._set_rounding(rounding) + + This will make it round up for that operation. + 'u'Sets the rounding type. + + Sets the rounding type, and returns the current (previous) + rounding type. Often used like: + + context = context.copy() + # so you don't change the calling context + # if an error occurs in the middle. + rounding = context._set_rounding(ROUND_UP) + val = self.__sub__(other, context=context) + context._set_rounding(rounding) + + This will make it round up for that operation. + 'b'Creates a new Decimal instance but using self as context. + + This method implements the to-number operation of the + IBM Decimal specification.'u'Creates a new Decimal instance but using self as context. + + This method implements the to-number operation of the + IBM Decimal specification.'b'trailing or leading whitespace and underscores are not permitted.'u'trailing or leading whitespace and underscores are not permitted.'b'diagnostic info too long in NaN'u'diagnostic info too long in NaN'b'Creates a new Decimal instance from a float but rounding using self + as the context. + + >>> context = Context(prec=5, rounding=ROUND_DOWN) + >>> context.create_decimal_from_float(3.1415926535897932) + Decimal('3.1415') + >>> context = Context(prec=5, traps=[Inexact]) + >>> context.create_decimal_from_float(3.1415926535897932) + Traceback (most recent call last): + ... + decimal.Inexact: None + + 'u'Creates a new Decimal instance from a float but rounding using self + as the context. + + >>> context = Context(prec=5, rounding=ROUND_DOWN) + >>> context.create_decimal_from_float(3.1415926535897932) + Decimal('3.1415') + >>> context = Context(prec=5, traps=[Inexact]) + >>> context.create_decimal_from_float(3.1415926535897932) + Traceback (most recent call last): + ... + decimal.Inexact: None + + 'b'Returns the absolute value of the operand. + + If the operand is negative, the result is the same as using the minus + operation on the operand. Otherwise, the result is the same as using + the plus operation on the operand. + + >>> ExtendedContext.abs(Decimal('2.1')) + Decimal('2.1') + >>> ExtendedContext.abs(Decimal('-100')) + Decimal('100') + >>> ExtendedContext.abs(Decimal('101.5')) + Decimal('101.5') + >>> ExtendedContext.abs(Decimal('-101.5')) + Decimal('101.5') + >>> ExtendedContext.abs(-1) + Decimal('1') + 'u'Returns the absolute value of the operand. + + If the operand is negative, the result is the same as using the minus + operation on the operand. Otherwise, the result is the same as using + the plus operation on the operand. + + >>> ExtendedContext.abs(Decimal('2.1')) + Decimal('2.1') + >>> ExtendedContext.abs(Decimal('-100')) + Decimal('100') + >>> ExtendedContext.abs(Decimal('101.5')) + Decimal('101.5') + >>> ExtendedContext.abs(Decimal('-101.5')) + Decimal('101.5') + >>> ExtendedContext.abs(-1) + Decimal('1') + 'b'Return the sum of the two operands. + + >>> ExtendedContext.add(Decimal('12'), Decimal('7.00')) + Decimal('19.00') + >>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4')) + Decimal('1.02E+4') + >>> ExtendedContext.add(1, Decimal(2)) + Decimal('3') + >>> ExtendedContext.add(Decimal(8), 5) + Decimal('13') + >>> ExtendedContext.add(5, 5) + Decimal('10') + 'u'Return the sum of the two operands. + + >>> ExtendedContext.add(Decimal('12'), Decimal('7.00')) + Decimal('19.00') + >>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4')) + Decimal('1.02E+4') + >>> ExtendedContext.add(1, Decimal(2)) + Decimal('3') + >>> ExtendedContext.add(Decimal(8), 5) + Decimal('13') + >>> ExtendedContext.add(5, 5) + Decimal('10') + 'b'Unable to convert %s to Decimal'u'Unable to convert %s to Decimal'b'Returns the same Decimal object. + + As we do not have different encodings for the same number, the + received object already is in its canonical form. + + >>> ExtendedContext.canonical(Decimal('2.50')) + Decimal('2.50') + 'u'Returns the same Decimal object. + + As we do not have different encodings for the same number, the + received object already is in its canonical form. + + >>> ExtendedContext.canonical(Decimal('2.50')) + Decimal('2.50') + 'b'canonical requires a Decimal as an argument.'u'canonical requires a Decimal as an argument.'b'Compares values numerically. + + If the signs of the operands differ, a value representing each operand + ('-1' if the operand is less than zero, '0' if the operand is zero or + negative zero, or '1' if the operand is greater than zero) is used in + place of that operand for the comparison instead of the actual + operand. + + The comparison is then effected by subtracting the second operand from + the first and then returning a value according to the result of the + subtraction: '-1' if the result is less than zero, '0' if the result is + zero or negative zero, or '1' if the result is greater than zero. + + >>> ExtendedContext.compare(Decimal('2.1'), Decimal('3')) + Decimal('-1') + >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1')) + Decimal('0') + >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10')) + Decimal('0') + >>> ExtendedContext.compare(Decimal('3'), Decimal('2.1')) + Decimal('1') + >>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3')) + Decimal('1') + >>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1')) + Decimal('-1') + >>> ExtendedContext.compare(1, 2) + Decimal('-1') + >>> ExtendedContext.compare(Decimal(1), 2) + Decimal('-1') + >>> ExtendedContext.compare(1, Decimal(2)) + Decimal('-1') + 'u'Compares values numerically. + + If the signs of the operands differ, a value representing each operand + ('-1' if the operand is less than zero, '0' if the operand is zero or + negative zero, or '1' if the operand is greater than zero) is used in + place of that operand for the comparison instead of the actual + operand. + + The comparison is then effected by subtracting the second operand from + the first and then returning a value according to the result of the + subtraction: '-1' if the result is less than zero, '0' if the result is + zero or negative zero, or '1' if the result is greater than zero. + + >>> ExtendedContext.compare(Decimal('2.1'), Decimal('3')) + Decimal('-1') + >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1')) + Decimal('0') + >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10')) + Decimal('0') + >>> ExtendedContext.compare(Decimal('3'), Decimal('2.1')) + Decimal('1') + >>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3')) + Decimal('1') + >>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1')) + Decimal('-1') + >>> ExtendedContext.compare(1, 2) + Decimal('-1') + >>> ExtendedContext.compare(Decimal(1), 2) + Decimal('-1') + >>> ExtendedContext.compare(1, Decimal(2)) + Decimal('-1') + 'b'Compares the values of the two operands numerically. + + It's pretty much like compare(), but all NaNs signal, with signaling + NaNs taking precedence over quiet NaNs. + + >>> c = ExtendedContext + >>> c.compare_signal(Decimal('2.1'), Decimal('3')) + Decimal('-1') + >>> c.compare_signal(Decimal('2.1'), Decimal('2.1')) + Decimal('0') + >>> c.flags[InvalidOperation] = 0 + >>> print(c.flags[InvalidOperation]) + 0 + >>> c.compare_signal(Decimal('NaN'), Decimal('2.1')) + Decimal('NaN') + >>> print(c.flags[InvalidOperation]) + 1 + >>> c.flags[InvalidOperation] = 0 + >>> print(c.flags[InvalidOperation]) + 0 + >>> c.compare_signal(Decimal('sNaN'), Decimal('2.1')) + Decimal('NaN') + >>> print(c.flags[InvalidOperation]) + 1 + >>> c.compare_signal(-1, 2) + Decimal('-1') + >>> c.compare_signal(Decimal(-1), 2) + Decimal('-1') + >>> c.compare_signal(-1, Decimal(2)) + Decimal('-1') + 'u'Compares the values of the two operands numerically. + + It's pretty much like compare(), but all NaNs signal, with signaling + NaNs taking precedence over quiet NaNs. + + >>> c = ExtendedContext + >>> c.compare_signal(Decimal('2.1'), Decimal('3')) + Decimal('-1') + >>> c.compare_signal(Decimal('2.1'), Decimal('2.1')) + Decimal('0') + >>> c.flags[InvalidOperation] = 0 + >>> print(c.flags[InvalidOperation]) + 0 + >>> c.compare_signal(Decimal('NaN'), Decimal('2.1')) + Decimal('NaN') + >>> print(c.flags[InvalidOperation]) + 1 + >>> c.flags[InvalidOperation] = 0 + >>> print(c.flags[InvalidOperation]) + 0 + >>> c.compare_signal(Decimal('sNaN'), Decimal('2.1')) + Decimal('NaN') + >>> print(c.flags[InvalidOperation]) + 1 + >>> c.compare_signal(-1, 2) + Decimal('-1') + >>> c.compare_signal(Decimal(-1), 2) + Decimal('-1') + >>> c.compare_signal(-1, Decimal(2)) + Decimal('-1') + 'b'Compares two operands using their abstract representation. + + This is not like the standard compare, which use their numerical + value. Note that a total ordering is defined for all possible abstract + representations. + + >>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9')) + Decimal('-1') + >>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12')) + Decimal('-1') + >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3')) + Decimal('-1') + >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30')) + Decimal('0') + >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300')) + Decimal('1') + >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN')) + Decimal('-1') + >>> ExtendedContext.compare_total(1, 2) + Decimal('-1') + >>> ExtendedContext.compare_total(Decimal(1), 2) + Decimal('-1') + >>> ExtendedContext.compare_total(1, Decimal(2)) + Decimal('-1') + 'u'Compares two operands using their abstract representation. + + This is not like the standard compare, which use their numerical + value. Note that a total ordering is defined for all possible abstract + representations. + + >>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9')) + Decimal('-1') + >>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12')) + Decimal('-1') + >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3')) + Decimal('-1') + >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30')) + Decimal('0') + >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300')) + Decimal('1') + >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN')) + Decimal('-1') + >>> ExtendedContext.compare_total(1, 2) + Decimal('-1') + >>> ExtendedContext.compare_total(Decimal(1), 2) + Decimal('-1') + >>> ExtendedContext.compare_total(1, Decimal(2)) + Decimal('-1') + 'b'Compares two operands using their abstract representation ignoring sign. + + Like compare_total, but with operand's sign ignored and assumed to be 0. + 'u'Compares two operands using their abstract representation ignoring sign. + + Like compare_total, but with operand's sign ignored and assumed to be 0. + 'b'Returns a copy of the operand with the sign set to 0. + + >>> ExtendedContext.copy_abs(Decimal('2.1')) + Decimal('2.1') + >>> ExtendedContext.copy_abs(Decimal('-100')) + Decimal('100') + >>> ExtendedContext.copy_abs(-1) + Decimal('1') + 'u'Returns a copy of the operand with the sign set to 0. + + >>> ExtendedContext.copy_abs(Decimal('2.1')) + Decimal('2.1') + >>> ExtendedContext.copy_abs(Decimal('-100')) + Decimal('100') + >>> ExtendedContext.copy_abs(-1) + Decimal('1') + 'b'Returns a copy of the decimal object. + + >>> ExtendedContext.copy_decimal(Decimal('2.1')) + Decimal('2.1') + >>> ExtendedContext.copy_decimal(Decimal('-1.00')) + Decimal('-1.00') + >>> ExtendedContext.copy_decimal(1) + Decimal('1') + 'u'Returns a copy of the decimal object. + + >>> ExtendedContext.copy_decimal(Decimal('2.1')) + Decimal('2.1') + >>> ExtendedContext.copy_decimal(Decimal('-1.00')) + Decimal('-1.00') + >>> ExtendedContext.copy_decimal(1) + Decimal('1') + 'b'Returns a copy of the operand with the sign inverted. + + >>> ExtendedContext.copy_negate(Decimal('101.5')) + Decimal('-101.5') + >>> ExtendedContext.copy_negate(Decimal('-101.5')) + Decimal('101.5') + >>> ExtendedContext.copy_negate(1) + Decimal('-1') + 'u'Returns a copy of the operand with the sign inverted. + + >>> ExtendedContext.copy_negate(Decimal('101.5')) + Decimal('-101.5') + >>> ExtendedContext.copy_negate(Decimal('-101.5')) + Decimal('101.5') + >>> ExtendedContext.copy_negate(1) + Decimal('-1') + 'b'Copies the second operand's sign to the first one. + + In detail, it returns a copy of the first operand with the sign + equal to the sign of the second operand. + + >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33')) + Decimal('1.50') + >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33')) + Decimal('1.50') + >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33')) + Decimal('-1.50') + >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33')) + Decimal('-1.50') + >>> ExtendedContext.copy_sign(1, -2) + Decimal('-1') + >>> ExtendedContext.copy_sign(Decimal(1), -2) + Decimal('-1') + >>> ExtendedContext.copy_sign(1, Decimal(-2)) + Decimal('-1') + 'u'Copies the second operand's sign to the first one. + + In detail, it returns a copy of the first operand with the sign + equal to the sign of the second operand. + + >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33')) + Decimal('1.50') + >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33')) + Decimal('1.50') + >>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33')) + Decimal('-1.50') + >>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33')) + Decimal('-1.50') + >>> ExtendedContext.copy_sign(1, -2) + Decimal('-1') + >>> ExtendedContext.copy_sign(Decimal(1), -2) + Decimal('-1') + >>> ExtendedContext.copy_sign(1, Decimal(-2)) + Decimal('-1') + 'b'Decimal division in a specified context. + + >>> ExtendedContext.divide(Decimal('1'), Decimal('3')) + Decimal('0.333333333') + >>> ExtendedContext.divide(Decimal('2'), Decimal('3')) + Decimal('0.666666667') + >>> ExtendedContext.divide(Decimal('5'), Decimal('2')) + Decimal('2.5') + >>> ExtendedContext.divide(Decimal('1'), Decimal('10')) + Decimal('0.1') + >>> ExtendedContext.divide(Decimal('12'), Decimal('12')) + Decimal('1') + >>> ExtendedContext.divide(Decimal('8.00'), Decimal('2')) + Decimal('4.00') + >>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0')) + Decimal('1.20') + >>> ExtendedContext.divide(Decimal('1000'), Decimal('100')) + Decimal('10') + >>> ExtendedContext.divide(Decimal('1000'), Decimal('1')) + Decimal('1000') + >>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2')) + Decimal('1.20E+6') + >>> ExtendedContext.divide(5, 5) + Decimal('1') + >>> ExtendedContext.divide(Decimal(5), 5) + Decimal('1') + >>> ExtendedContext.divide(5, Decimal(5)) + Decimal('1') + 'u'Decimal division in a specified context. + + >>> ExtendedContext.divide(Decimal('1'), Decimal('3')) + Decimal('0.333333333') + >>> ExtendedContext.divide(Decimal('2'), Decimal('3')) + Decimal('0.666666667') + >>> ExtendedContext.divide(Decimal('5'), Decimal('2')) + Decimal('2.5') + >>> ExtendedContext.divide(Decimal('1'), Decimal('10')) + Decimal('0.1') + >>> ExtendedContext.divide(Decimal('12'), Decimal('12')) + Decimal('1') + >>> ExtendedContext.divide(Decimal('8.00'), Decimal('2')) + Decimal('4.00') + >>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0')) + Decimal('1.20') + >>> ExtendedContext.divide(Decimal('1000'), Decimal('100')) + Decimal('10') + >>> ExtendedContext.divide(Decimal('1000'), Decimal('1')) + Decimal('1000') + >>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2')) + Decimal('1.20E+6') + >>> ExtendedContext.divide(5, 5) + Decimal('1') + >>> ExtendedContext.divide(Decimal(5), 5) + Decimal('1') + >>> ExtendedContext.divide(5, Decimal(5)) + Decimal('1') + 'b'Divides two numbers and returns the integer part of the result. + + >>> ExtendedContext.divide_int(Decimal('2'), Decimal('3')) + Decimal('0') + >>> ExtendedContext.divide_int(Decimal('10'), Decimal('3')) + Decimal('3') + >>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3')) + Decimal('3') + >>> ExtendedContext.divide_int(10, 3) + Decimal('3') + >>> ExtendedContext.divide_int(Decimal(10), 3) + Decimal('3') + >>> ExtendedContext.divide_int(10, Decimal(3)) + Decimal('3') + 'u'Divides two numbers and returns the integer part of the result. + + >>> ExtendedContext.divide_int(Decimal('2'), Decimal('3')) + Decimal('0') + >>> ExtendedContext.divide_int(Decimal('10'), Decimal('3')) + Decimal('3') + >>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3')) + Decimal('3') + >>> ExtendedContext.divide_int(10, 3) + Decimal('3') + >>> ExtendedContext.divide_int(Decimal(10), 3) + Decimal('3') + >>> ExtendedContext.divide_int(10, Decimal(3)) + Decimal('3') + 'b'Return (a // b, a % b). + + >>> ExtendedContext.divmod(Decimal(8), Decimal(3)) + (Decimal('2'), Decimal('2')) + >>> ExtendedContext.divmod(Decimal(8), Decimal(4)) + (Decimal('2'), Decimal('0')) + >>> ExtendedContext.divmod(8, 4) + (Decimal('2'), Decimal('0')) + >>> ExtendedContext.divmod(Decimal(8), 4) + (Decimal('2'), Decimal('0')) + >>> ExtendedContext.divmod(8, Decimal(4)) + (Decimal('2'), Decimal('0')) + 'u'Return (a // b, a % b). + + >>> ExtendedContext.divmod(Decimal(8), Decimal(3)) + (Decimal('2'), Decimal('2')) + >>> ExtendedContext.divmod(Decimal(8), Decimal(4)) + (Decimal('2'), Decimal('0')) + >>> ExtendedContext.divmod(8, 4) + (Decimal('2'), Decimal('0')) + >>> ExtendedContext.divmod(Decimal(8), 4) + (Decimal('2'), Decimal('0')) + >>> ExtendedContext.divmod(8, Decimal(4)) + (Decimal('2'), Decimal('0')) + 'b'Returns e ** a. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.exp(Decimal('-Infinity')) + Decimal('0') + >>> c.exp(Decimal('-1')) + Decimal('0.367879441') + >>> c.exp(Decimal('0')) + Decimal('1') + >>> c.exp(Decimal('1')) + Decimal('2.71828183') + >>> c.exp(Decimal('0.693147181')) + Decimal('2.00000000') + >>> c.exp(Decimal('+Infinity')) + Decimal('Infinity') + >>> c.exp(10) + Decimal('22026.4658') + 'u'Returns e ** a. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.exp(Decimal('-Infinity')) + Decimal('0') + >>> c.exp(Decimal('-1')) + Decimal('0.367879441') + >>> c.exp(Decimal('0')) + Decimal('1') + >>> c.exp(Decimal('1')) + Decimal('2.71828183') + >>> c.exp(Decimal('0.693147181')) + Decimal('2.00000000') + >>> c.exp(Decimal('+Infinity')) + Decimal('Infinity') + >>> c.exp(10) + Decimal('22026.4658') + 'b'Returns a multiplied by b, plus c. + + The first two operands are multiplied together, using multiply, + the third operand is then added to the result of that + multiplication, using add, all with only one final rounding. + + >>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7')) + Decimal('22') + >>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7')) + Decimal('-8') + >>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578')) + Decimal('1.38435736E+12') + >>> ExtendedContext.fma(1, 3, 4) + Decimal('7') + >>> ExtendedContext.fma(1, Decimal(3), 4) + Decimal('7') + >>> ExtendedContext.fma(1, 3, Decimal(4)) + Decimal('7') + 'u'Returns a multiplied by b, plus c. + + The first two operands are multiplied together, using multiply, + the third operand is then added to the result of that + multiplication, using add, all with only one final rounding. + + >>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7')) + Decimal('22') + >>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7')) + Decimal('-8') + >>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578')) + Decimal('1.38435736E+12') + >>> ExtendedContext.fma(1, 3, 4) + Decimal('7') + >>> ExtendedContext.fma(1, Decimal(3), 4) + Decimal('7') + >>> ExtendedContext.fma(1, 3, Decimal(4)) + Decimal('7') + 'b'Return True if the operand is canonical; otherwise return False. + + Currently, the encoding of a Decimal instance is always + canonical, so this method returns True for any Decimal. + + >>> ExtendedContext.is_canonical(Decimal('2.50')) + True + 'u'Return True if the operand is canonical; otherwise return False. + + Currently, the encoding of a Decimal instance is always + canonical, so this method returns True for any Decimal. + + >>> ExtendedContext.is_canonical(Decimal('2.50')) + True + 'b'is_canonical requires a Decimal as an argument.'u'is_canonical requires a Decimal as an argument.'b'Return True if the operand is finite; otherwise return False. + + A Decimal instance is considered finite if it is neither + infinite nor a NaN. + + >>> ExtendedContext.is_finite(Decimal('2.50')) + True + >>> ExtendedContext.is_finite(Decimal('-0.3')) + True + >>> ExtendedContext.is_finite(Decimal('0')) + True + >>> ExtendedContext.is_finite(Decimal('Inf')) + False + >>> ExtendedContext.is_finite(Decimal('NaN')) + False + >>> ExtendedContext.is_finite(1) + True + 'u'Return True if the operand is finite; otherwise return False. + + A Decimal instance is considered finite if it is neither + infinite nor a NaN. + + >>> ExtendedContext.is_finite(Decimal('2.50')) + True + >>> ExtendedContext.is_finite(Decimal('-0.3')) + True + >>> ExtendedContext.is_finite(Decimal('0')) + True + >>> ExtendedContext.is_finite(Decimal('Inf')) + False + >>> ExtendedContext.is_finite(Decimal('NaN')) + False + >>> ExtendedContext.is_finite(1) + True + 'b'Return True if the operand is infinite; otherwise return False. + + >>> ExtendedContext.is_infinite(Decimal('2.50')) + False + >>> ExtendedContext.is_infinite(Decimal('-Inf')) + True + >>> ExtendedContext.is_infinite(Decimal('NaN')) + False + >>> ExtendedContext.is_infinite(1) + False + 'u'Return True if the operand is infinite; otherwise return False. + + >>> ExtendedContext.is_infinite(Decimal('2.50')) + False + >>> ExtendedContext.is_infinite(Decimal('-Inf')) + True + >>> ExtendedContext.is_infinite(Decimal('NaN')) + False + >>> ExtendedContext.is_infinite(1) + False + 'b'Return True if the operand is a qNaN or sNaN; + otherwise return False. + + >>> ExtendedContext.is_nan(Decimal('2.50')) + False + >>> ExtendedContext.is_nan(Decimal('NaN')) + True + >>> ExtendedContext.is_nan(Decimal('-sNaN')) + True + >>> ExtendedContext.is_nan(1) + False + 'u'Return True if the operand is a qNaN or sNaN; + otherwise return False. + + >>> ExtendedContext.is_nan(Decimal('2.50')) + False + >>> ExtendedContext.is_nan(Decimal('NaN')) + True + >>> ExtendedContext.is_nan(Decimal('-sNaN')) + True + >>> ExtendedContext.is_nan(1) + False + 'b'Return True if the operand is a normal number; + otherwise return False. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.is_normal(Decimal('2.50')) + True + >>> c.is_normal(Decimal('0.1E-999')) + False + >>> c.is_normal(Decimal('0.00')) + False + >>> c.is_normal(Decimal('-Inf')) + False + >>> c.is_normal(Decimal('NaN')) + False + >>> c.is_normal(1) + True + 'u'Return True if the operand is a normal number; + otherwise return False. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.is_normal(Decimal('2.50')) + True + >>> c.is_normal(Decimal('0.1E-999')) + False + >>> c.is_normal(Decimal('0.00')) + False + >>> c.is_normal(Decimal('-Inf')) + False + >>> c.is_normal(Decimal('NaN')) + False + >>> c.is_normal(1) + True + 'b'Return True if the operand is a quiet NaN; otherwise return False. + + >>> ExtendedContext.is_qnan(Decimal('2.50')) + False + >>> ExtendedContext.is_qnan(Decimal('NaN')) + True + >>> ExtendedContext.is_qnan(Decimal('sNaN')) + False + >>> ExtendedContext.is_qnan(1) + False + 'u'Return True if the operand is a quiet NaN; otherwise return False. + + >>> ExtendedContext.is_qnan(Decimal('2.50')) + False + >>> ExtendedContext.is_qnan(Decimal('NaN')) + True + >>> ExtendedContext.is_qnan(Decimal('sNaN')) + False + >>> ExtendedContext.is_qnan(1) + False + 'b'Return True if the operand is negative; otherwise return False. + + >>> ExtendedContext.is_signed(Decimal('2.50')) + False + >>> ExtendedContext.is_signed(Decimal('-12')) + True + >>> ExtendedContext.is_signed(Decimal('-0')) + True + >>> ExtendedContext.is_signed(8) + False + >>> ExtendedContext.is_signed(-8) + True + 'u'Return True if the operand is negative; otherwise return False. + + >>> ExtendedContext.is_signed(Decimal('2.50')) + False + >>> ExtendedContext.is_signed(Decimal('-12')) + True + >>> ExtendedContext.is_signed(Decimal('-0')) + True + >>> ExtendedContext.is_signed(8) + False + >>> ExtendedContext.is_signed(-8) + True + 'b'Return True if the operand is a signaling NaN; + otherwise return False. + + >>> ExtendedContext.is_snan(Decimal('2.50')) + False + >>> ExtendedContext.is_snan(Decimal('NaN')) + False + >>> ExtendedContext.is_snan(Decimal('sNaN')) + True + >>> ExtendedContext.is_snan(1) + False + 'u'Return True if the operand is a signaling NaN; + otherwise return False. + + >>> ExtendedContext.is_snan(Decimal('2.50')) + False + >>> ExtendedContext.is_snan(Decimal('NaN')) + False + >>> ExtendedContext.is_snan(Decimal('sNaN')) + True + >>> ExtendedContext.is_snan(1) + False + 'b'Return True if the operand is subnormal; otherwise return False. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.is_subnormal(Decimal('2.50')) + False + >>> c.is_subnormal(Decimal('0.1E-999')) + True + >>> c.is_subnormal(Decimal('0.00')) + False + >>> c.is_subnormal(Decimal('-Inf')) + False + >>> c.is_subnormal(Decimal('NaN')) + False + >>> c.is_subnormal(1) + False + 'u'Return True if the operand is subnormal; otherwise return False. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.is_subnormal(Decimal('2.50')) + False + >>> c.is_subnormal(Decimal('0.1E-999')) + True + >>> c.is_subnormal(Decimal('0.00')) + False + >>> c.is_subnormal(Decimal('-Inf')) + False + >>> c.is_subnormal(Decimal('NaN')) + False + >>> c.is_subnormal(1) + False + 'b'Return True if the operand is a zero; otherwise return False. + + >>> ExtendedContext.is_zero(Decimal('0')) + True + >>> ExtendedContext.is_zero(Decimal('2.50')) + False + >>> ExtendedContext.is_zero(Decimal('-0E+2')) + True + >>> ExtendedContext.is_zero(1) + False + >>> ExtendedContext.is_zero(0) + True + 'u'Return True if the operand is a zero; otherwise return False. + + >>> ExtendedContext.is_zero(Decimal('0')) + True + >>> ExtendedContext.is_zero(Decimal('2.50')) + False + >>> ExtendedContext.is_zero(Decimal('-0E+2')) + True + >>> ExtendedContext.is_zero(1) + False + >>> ExtendedContext.is_zero(0) + True + 'b'Returns the natural (base e) logarithm of the operand. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.ln(Decimal('0')) + Decimal('-Infinity') + >>> c.ln(Decimal('1.000')) + Decimal('0') + >>> c.ln(Decimal('2.71828183')) + Decimal('1.00000000') + >>> c.ln(Decimal('10')) + Decimal('2.30258509') + >>> c.ln(Decimal('+Infinity')) + Decimal('Infinity') + >>> c.ln(1) + Decimal('0') + 'u'Returns the natural (base e) logarithm of the operand. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.ln(Decimal('0')) + Decimal('-Infinity') + >>> c.ln(Decimal('1.000')) + Decimal('0') + >>> c.ln(Decimal('2.71828183')) + Decimal('1.00000000') + >>> c.ln(Decimal('10')) + Decimal('2.30258509') + >>> c.ln(Decimal('+Infinity')) + Decimal('Infinity') + >>> c.ln(1) + Decimal('0') + 'b'Returns the base 10 logarithm of the operand. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.log10(Decimal('0')) + Decimal('-Infinity') + >>> c.log10(Decimal('0.001')) + Decimal('-3') + >>> c.log10(Decimal('1.000')) + Decimal('0') + >>> c.log10(Decimal('2')) + Decimal('0.301029996') + >>> c.log10(Decimal('10')) + Decimal('1') + >>> c.log10(Decimal('70')) + Decimal('1.84509804') + >>> c.log10(Decimal('+Infinity')) + Decimal('Infinity') + >>> c.log10(0) + Decimal('-Infinity') + >>> c.log10(1) + Decimal('0') + 'u'Returns the base 10 logarithm of the operand. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.log10(Decimal('0')) + Decimal('-Infinity') + >>> c.log10(Decimal('0.001')) + Decimal('-3') + >>> c.log10(Decimal('1.000')) + Decimal('0') + >>> c.log10(Decimal('2')) + Decimal('0.301029996') + >>> c.log10(Decimal('10')) + Decimal('1') + >>> c.log10(Decimal('70')) + Decimal('1.84509804') + >>> c.log10(Decimal('+Infinity')) + Decimal('Infinity') + >>> c.log10(0) + Decimal('-Infinity') + >>> c.log10(1) + Decimal('0') + 'b' Returns the exponent of the magnitude of the operand's MSD. + + The result is the integer which is the exponent of the magnitude + of the most significant digit of the operand (as though the + operand were truncated to a single digit while maintaining the + value of that digit and without limiting the resulting exponent). + + >>> ExtendedContext.logb(Decimal('250')) + Decimal('2') + >>> ExtendedContext.logb(Decimal('2.50')) + Decimal('0') + >>> ExtendedContext.logb(Decimal('0.03')) + Decimal('-2') + >>> ExtendedContext.logb(Decimal('0')) + Decimal('-Infinity') + >>> ExtendedContext.logb(1) + Decimal('0') + >>> ExtendedContext.logb(10) + Decimal('1') + >>> ExtendedContext.logb(100) + Decimal('2') + 'u' Returns the exponent of the magnitude of the operand's MSD. + + The result is the integer which is the exponent of the magnitude + of the most significant digit of the operand (as though the + operand were truncated to a single digit while maintaining the + value of that digit and without limiting the resulting exponent). + + >>> ExtendedContext.logb(Decimal('250')) + Decimal('2') + >>> ExtendedContext.logb(Decimal('2.50')) + Decimal('0') + >>> ExtendedContext.logb(Decimal('0.03')) + Decimal('-2') + >>> ExtendedContext.logb(Decimal('0')) + Decimal('-Infinity') + >>> ExtendedContext.logb(1) + Decimal('0') + >>> ExtendedContext.logb(10) + Decimal('1') + >>> ExtendedContext.logb(100) + Decimal('2') + 'b'Applies the logical operation 'and' between each operand's digits. + + The operands must be both logical numbers. + + >>> ExtendedContext.logical_and(Decimal('0'), Decimal('0')) + Decimal('0') + >>> ExtendedContext.logical_and(Decimal('0'), Decimal('1')) + Decimal('0') + >>> ExtendedContext.logical_and(Decimal('1'), Decimal('0')) + Decimal('0') + >>> ExtendedContext.logical_and(Decimal('1'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010')) + Decimal('1000') + >>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10')) + Decimal('10') + >>> ExtendedContext.logical_and(110, 1101) + Decimal('100') + >>> ExtendedContext.logical_and(Decimal(110), 1101) + Decimal('100') + >>> ExtendedContext.logical_and(110, Decimal(1101)) + Decimal('100') + 'u'Applies the logical operation 'and' between each operand's digits. + + The operands must be both logical numbers. + + >>> ExtendedContext.logical_and(Decimal('0'), Decimal('0')) + Decimal('0') + >>> ExtendedContext.logical_and(Decimal('0'), Decimal('1')) + Decimal('0') + >>> ExtendedContext.logical_and(Decimal('1'), Decimal('0')) + Decimal('0') + >>> ExtendedContext.logical_and(Decimal('1'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010')) + Decimal('1000') + >>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10')) + Decimal('10') + >>> ExtendedContext.logical_and(110, 1101) + Decimal('100') + >>> ExtendedContext.logical_and(Decimal(110), 1101) + Decimal('100') + >>> ExtendedContext.logical_and(110, Decimal(1101)) + Decimal('100') + 'b'Invert all the digits in the operand. + + The operand must be a logical number. + + >>> ExtendedContext.logical_invert(Decimal('0')) + Decimal('111111111') + >>> ExtendedContext.logical_invert(Decimal('1')) + Decimal('111111110') + >>> ExtendedContext.logical_invert(Decimal('111111111')) + Decimal('0') + >>> ExtendedContext.logical_invert(Decimal('101010101')) + Decimal('10101010') + >>> ExtendedContext.logical_invert(1101) + Decimal('111110010') + 'u'Invert all the digits in the operand. + + The operand must be a logical number. + + >>> ExtendedContext.logical_invert(Decimal('0')) + Decimal('111111111') + >>> ExtendedContext.logical_invert(Decimal('1')) + Decimal('111111110') + >>> ExtendedContext.logical_invert(Decimal('111111111')) + Decimal('0') + >>> ExtendedContext.logical_invert(Decimal('101010101')) + Decimal('10101010') + >>> ExtendedContext.logical_invert(1101) + Decimal('111110010') + 'b'Applies the logical operation 'or' between each operand's digits. + + The operands must be both logical numbers. + + >>> ExtendedContext.logical_or(Decimal('0'), Decimal('0')) + Decimal('0') + >>> ExtendedContext.logical_or(Decimal('0'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.logical_or(Decimal('1'), Decimal('0')) + Decimal('1') + >>> ExtendedContext.logical_or(Decimal('1'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010')) + Decimal('1110') + >>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10')) + Decimal('1110') + >>> ExtendedContext.logical_or(110, 1101) + Decimal('1111') + >>> ExtendedContext.logical_or(Decimal(110), 1101) + Decimal('1111') + >>> ExtendedContext.logical_or(110, Decimal(1101)) + Decimal('1111') + 'u'Applies the logical operation 'or' between each operand's digits. + + The operands must be both logical numbers. + + >>> ExtendedContext.logical_or(Decimal('0'), Decimal('0')) + Decimal('0') + >>> ExtendedContext.logical_or(Decimal('0'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.logical_or(Decimal('1'), Decimal('0')) + Decimal('1') + >>> ExtendedContext.logical_or(Decimal('1'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010')) + Decimal('1110') + >>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10')) + Decimal('1110') + >>> ExtendedContext.logical_or(110, 1101) + Decimal('1111') + >>> ExtendedContext.logical_or(Decimal(110), 1101) + Decimal('1111') + >>> ExtendedContext.logical_or(110, Decimal(1101)) + Decimal('1111') + 'b'Applies the logical operation 'xor' between each operand's digits. + + The operands must be both logical numbers. + + >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0')) + Decimal('0') + >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0')) + Decimal('1') + >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1')) + Decimal('0') + >>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010')) + Decimal('110') + >>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10')) + Decimal('1101') + >>> ExtendedContext.logical_xor(110, 1101) + Decimal('1011') + >>> ExtendedContext.logical_xor(Decimal(110), 1101) + Decimal('1011') + >>> ExtendedContext.logical_xor(110, Decimal(1101)) + Decimal('1011') + 'u'Applies the logical operation 'xor' between each operand's digits. + + The operands must be both logical numbers. + + >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0')) + Decimal('0') + >>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0')) + Decimal('1') + >>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1')) + Decimal('0') + >>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010')) + Decimal('110') + >>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10')) + Decimal('1101') + >>> ExtendedContext.logical_xor(110, 1101) + Decimal('1011') + >>> ExtendedContext.logical_xor(Decimal(110), 1101) + Decimal('1011') + >>> ExtendedContext.logical_xor(110, Decimal(1101)) + Decimal('1011') + 'b'max compares two values numerically and returns the maximum. + + If either operand is a NaN then the general rules apply. + Otherwise, the operands are compared as though by the compare + operation. If they are numerically equal then the left-hand operand + is chosen as the result. Otherwise the maximum (closer to positive + infinity) of the two operands is chosen as the result. + + >>> ExtendedContext.max(Decimal('3'), Decimal('2')) + Decimal('3') + >>> ExtendedContext.max(Decimal('-10'), Decimal('3')) + Decimal('3') + >>> ExtendedContext.max(Decimal('1.0'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.max(Decimal('7'), Decimal('NaN')) + Decimal('7') + >>> ExtendedContext.max(1, 2) + Decimal('2') + >>> ExtendedContext.max(Decimal(1), 2) + Decimal('2') + >>> ExtendedContext.max(1, Decimal(2)) + Decimal('2') + 'u'max compares two values numerically and returns the maximum. + + If either operand is a NaN then the general rules apply. + Otherwise, the operands are compared as though by the compare + operation. If they are numerically equal then the left-hand operand + is chosen as the result. Otherwise the maximum (closer to positive + infinity) of the two operands is chosen as the result. + + >>> ExtendedContext.max(Decimal('3'), Decimal('2')) + Decimal('3') + >>> ExtendedContext.max(Decimal('-10'), Decimal('3')) + Decimal('3') + >>> ExtendedContext.max(Decimal('1.0'), Decimal('1')) + Decimal('1') + >>> ExtendedContext.max(Decimal('7'), Decimal('NaN')) + Decimal('7') + >>> ExtendedContext.max(1, 2) + Decimal('2') + >>> ExtendedContext.max(Decimal(1), 2) + Decimal('2') + >>> ExtendedContext.max(1, Decimal(2)) + Decimal('2') + 'b'Compares the values numerically with their sign ignored. + + >>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN')) + Decimal('7') + >>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10')) + Decimal('-10') + >>> ExtendedContext.max_mag(1, -2) + Decimal('-2') + >>> ExtendedContext.max_mag(Decimal(1), -2) + Decimal('-2') + >>> ExtendedContext.max_mag(1, Decimal(-2)) + Decimal('-2') + 'u'Compares the values numerically with their sign ignored. + + >>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN')) + Decimal('7') + >>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10')) + Decimal('-10') + >>> ExtendedContext.max_mag(1, -2) + Decimal('-2') + >>> ExtendedContext.max_mag(Decimal(1), -2) + Decimal('-2') + >>> ExtendedContext.max_mag(1, Decimal(-2)) + Decimal('-2') + 'b'min compares two values numerically and returns the minimum. + + If either operand is a NaN then the general rules apply. + Otherwise, the operands are compared as though by the compare + operation. If they are numerically equal then the left-hand operand + is chosen as the result. Otherwise the minimum (closer to negative + infinity) of the two operands is chosen as the result. + + >>> ExtendedContext.min(Decimal('3'), Decimal('2')) + Decimal('2') + >>> ExtendedContext.min(Decimal('-10'), Decimal('3')) + Decimal('-10') + >>> ExtendedContext.min(Decimal('1.0'), Decimal('1')) + Decimal('1.0') + >>> ExtendedContext.min(Decimal('7'), Decimal('NaN')) + Decimal('7') + >>> ExtendedContext.min(1, 2) + Decimal('1') + >>> ExtendedContext.min(Decimal(1), 2) + Decimal('1') + >>> ExtendedContext.min(1, Decimal(29)) + Decimal('1') + 'u'min compares two values numerically and returns the minimum. + + If either operand is a NaN then the general rules apply. + Otherwise, the operands are compared as though by the compare + operation. If they are numerically equal then the left-hand operand + is chosen as the result. Otherwise the minimum (closer to negative + infinity) of the two operands is chosen as the result. + + >>> ExtendedContext.min(Decimal('3'), Decimal('2')) + Decimal('2') + >>> ExtendedContext.min(Decimal('-10'), Decimal('3')) + Decimal('-10') + >>> ExtendedContext.min(Decimal('1.0'), Decimal('1')) + Decimal('1.0') + >>> ExtendedContext.min(Decimal('7'), Decimal('NaN')) + Decimal('7') + >>> ExtendedContext.min(1, 2) + Decimal('1') + >>> ExtendedContext.min(Decimal(1), 2) + Decimal('1') + >>> ExtendedContext.min(1, Decimal(29)) + Decimal('1') + 'b'Compares the values numerically with their sign ignored. + + >>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2')) + Decimal('-2') + >>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN')) + Decimal('-3') + >>> ExtendedContext.min_mag(1, -2) + Decimal('1') + >>> ExtendedContext.min_mag(Decimal(1), -2) + Decimal('1') + >>> ExtendedContext.min_mag(1, Decimal(-2)) + Decimal('1') + 'u'Compares the values numerically with their sign ignored. + + >>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2')) + Decimal('-2') + >>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN')) + Decimal('-3') + >>> ExtendedContext.min_mag(1, -2) + Decimal('1') + >>> ExtendedContext.min_mag(Decimal(1), -2) + Decimal('1') + >>> ExtendedContext.min_mag(1, Decimal(-2)) + Decimal('1') + 'b'Minus corresponds to unary prefix minus in Python. + + The operation is evaluated using the same rules as subtract; the + operation minus(a) is calculated as subtract('0', a) where the '0' + has the same exponent as the operand. + + >>> ExtendedContext.minus(Decimal('1.3')) + Decimal('-1.3') + >>> ExtendedContext.minus(Decimal('-1.3')) + Decimal('1.3') + >>> ExtendedContext.minus(1) + Decimal('-1') + 'u'Minus corresponds to unary prefix minus in Python. + + The operation is evaluated using the same rules as subtract; the + operation minus(a) is calculated as subtract('0', a) where the '0' + has the same exponent as the operand. + + >>> ExtendedContext.minus(Decimal('1.3')) + Decimal('-1.3') + >>> ExtendedContext.minus(Decimal('-1.3')) + Decimal('1.3') + >>> ExtendedContext.minus(1) + Decimal('-1') + 'b'multiply multiplies two operands. + + If either operand is a special value then the general rules apply. + Otherwise, the operands are multiplied together + ('long multiplication'), resulting in a number which may be as long as + the sum of the lengths of the two operands. + + >>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3')) + Decimal('3.60') + >>> ExtendedContext.multiply(Decimal('7'), Decimal('3')) + Decimal('21') + >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8')) + Decimal('0.72') + >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0')) + Decimal('-0.0') + >>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321')) + Decimal('4.28135971E+11') + >>> ExtendedContext.multiply(7, 7) + Decimal('49') + >>> ExtendedContext.multiply(Decimal(7), 7) + Decimal('49') + >>> ExtendedContext.multiply(7, Decimal(7)) + Decimal('49') + 'u'multiply multiplies two operands. + + If either operand is a special value then the general rules apply. + Otherwise, the operands are multiplied together + ('long multiplication'), resulting in a number which may be as long as + the sum of the lengths of the two operands. + + >>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3')) + Decimal('3.60') + >>> ExtendedContext.multiply(Decimal('7'), Decimal('3')) + Decimal('21') + >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8')) + Decimal('0.72') + >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0')) + Decimal('-0.0') + >>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321')) + Decimal('4.28135971E+11') + >>> ExtendedContext.multiply(7, 7) + Decimal('49') + >>> ExtendedContext.multiply(Decimal(7), 7) + Decimal('49') + >>> ExtendedContext.multiply(7, Decimal(7)) + Decimal('49') + 'b'Returns the largest representable number smaller than a. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> ExtendedContext.next_minus(Decimal('1')) + Decimal('0.999999999') + >>> c.next_minus(Decimal('1E-1007')) + Decimal('0E-1007') + >>> ExtendedContext.next_minus(Decimal('-1.00000003')) + Decimal('-1.00000004') + >>> c.next_minus(Decimal('Infinity')) + Decimal('9.99999999E+999') + >>> c.next_minus(1) + Decimal('0.999999999') + 'u'Returns the largest representable number smaller than a. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> ExtendedContext.next_minus(Decimal('1')) + Decimal('0.999999999') + >>> c.next_minus(Decimal('1E-1007')) + Decimal('0E-1007') + >>> ExtendedContext.next_minus(Decimal('-1.00000003')) + Decimal('-1.00000004') + >>> c.next_minus(Decimal('Infinity')) + Decimal('9.99999999E+999') + >>> c.next_minus(1) + Decimal('0.999999999') + 'b'Returns the smallest representable number larger than a. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> ExtendedContext.next_plus(Decimal('1')) + Decimal('1.00000001') + >>> c.next_plus(Decimal('-1E-1007')) + Decimal('-0E-1007') + >>> ExtendedContext.next_plus(Decimal('-1.00000003')) + Decimal('-1.00000002') + >>> c.next_plus(Decimal('-Infinity')) + Decimal('-9.99999999E+999') + >>> c.next_plus(1) + Decimal('1.00000001') + 'u'Returns the smallest representable number larger than a. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> ExtendedContext.next_plus(Decimal('1')) + Decimal('1.00000001') + >>> c.next_plus(Decimal('-1E-1007')) + Decimal('-0E-1007') + >>> ExtendedContext.next_plus(Decimal('-1.00000003')) + Decimal('-1.00000002') + >>> c.next_plus(Decimal('-Infinity')) + Decimal('-9.99999999E+999') + >>> c.next_plus(1) + Decimal('1.00000001') + 'b'Returns the number closest to a, in direction towards b. + + The result is the closest representable number from the first + operand (but not the first operand) that is in the direction + towards the second operand, unless the operands have the same + value. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.next_toward(Decimal('1'), Decimal('2')) + Decimal('1.00000001') + >>> c.next_toward(Decimal('-1E-1007'), Decimal('1')) + Decimal('-0E-1007') + >>> c.next_toward(Decimal('-1.00000003'), Decimal('0')) + Decimal('-1.00000002') + >>> c.next_toward(Decimal('1'), Decimal('0')) + Decimal('0.999999999') + >>> c.next_toward(Decimal('1E-1007'), Decimal('-100')) + Decimal('0E-1007') + >>> c.next_toward(Decimal('-1.00000003'), Decimal('-10')) + Decimal('-1.00000004') + >>> c.next_toward(Decimal('0.00'), Decimal('-0.0000')) + Decimal('-0.00') + >>> c.next_toward(0, 1) + Decimal('1E-1007') + >>> c.next_toward(Decimal(0), 1) + Decimal('1E-1007') + >>> c.next_toward(0, Decimal(1)) + Decimal('1E-1007') + 'u'Returns the number closest to a, in direction towards b. + + The result is the closest representable number from the first + operand (but not the first operand) that is in the direction + towards the second operand, unless the operands have the same + value. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.next_toward(Decimal('1'), Decimal('2')) + Decimal('1.00000001') + >>> c.next_toward(Decimal('-1E-1007'), Decimal('1')) + Decimal('-0E-1007') + >>> c.next_toward(Decimal('-1.00000003'), Decimal('0')) + Decimal('-1.00000002') + >>> c.next_toward(Decimal('1'), Decimal('0')) + Decimal('0.999999999') + >>> c.next_toward(Decimal('1E-1007'), Decimal('-100')) + Decimal('0E-1007') + >>> c.next_toward(Decimal('-1.00000003'), Decimal('-10')) + Decimal('-1.00000004') + >>> c.next_toward(Decimal('0.00'), Decimal('-0.0000')) + Decimal('-0.00') + >>> c.next_toward(0, 1) + Decimal('1E-1007') + >>> c.next_toward(Decimal(0), 1) + Decimal('1E-1007') + >>> c.next_toward(0, Decimal(1)) + Decimal('1E-1007') + 'b'normalize reduces an operand to its simplest form. + + Essentially a plus operation with all trailing zeros removed from the + result. + + >>> ExtendedContext.normalize(Decimal('2.1')) + Decimal('2.1') + >>> ExtendedContext.normalize(Decimal('-2.0')) + Decimal('-2') + >>> ExtendedContext.normalize(Decimal('1.200')) + Decimal('1.2') + >>> ExtendedContext.normalize(Decimal('-120')) + Decimal('-1.2E+2') + >>> ExtendedContext.normalize(Decimal('120.00')) + Decimal('1.2E+2') + >>> ExtendedContext.normalize(Decimal('0.00')) + Decimal('0') + >>> ExtendedContext.normalize(6) + Decimal('6') + 'u'normalize reduces an operand to its simplest form. + + Essentially a plus operation with all trailing zeros removed from the + result. + + >>> ExtendedContext.normalize(Decimal('2.1')) + Decimal('2.1') + >>> ExtendedContext.normalize(Decimal('-2.0')) + Decimal('-2') + >>> ExtendedContext.normalize(Decimal('1.200')) + Decimal('1.2') + >>> ExtendedContext.normalize(Decimal('-120')) + Decimal('-1.2E+2') + >>> ExtendedContext.normalize(Decimal('120.00')) + Decimal('1.2E+2') + >>> ExtendedContext.normalize(Decimal('0.00')) + Decimal('0') + >>> ExtendedContext.normalize(6) + Decimal('6') + 'b'Returns an indication of the class of the operand. + + The class is one of the following strings: + -sNaN + -NaN + -Infinity + -Normal + -Subnormal + -Zero + +Zero + +Subnormal + +Normal + +Infinity + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.number_class(Decimal('Infinity')) + '+Infinity' + >>> c.number_class(Decimal('1E-10')) + '+Normal' + >>> c.number_class(Decimal('2.50')) + '+Normal' + >>> c.number_class(Decimal('0.1E-999')) + '+Subnormal' + >>> c.number_class(Decimal('0')) + '+Zero' + >>> c.number_class(Decimal('-0')) + '-Zero' + >>> c.number_class(Decimal('-0.1E-999')) + '-Subnormal' + >>> c.number_class(Decimal('-1E-10')) + '-Normal' + >>> c.number_class(Decimal('-2.50')) + '-Normal' + >>> c.number_class(Decimal('-Infinity')) + '-Infinity' + >>> c.number_class(Decimal('NaN')) + 'NaN' + >>> c.number_class(Decimal('-NaN')) + 'NaN' + >>> c.number_class(Decimal('sNaN')) + 'sNaN' + >>> c.number_class(123) + '+Normal' + 'u'Returns an indication of the class of the operand. + + The class is one of the following strings: + -sNaN + -NaN + -Infinity + -Normal + -Subnormal + -Zero + +Zero + +Subnormal + +Normal + +Infinity + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.number_class(Decimal('Infinity')) + '+Infinity' + >>> c.number_class(Decimal('1E-10')) + '+Normal' + >>> c.number_class(Decimal('2.50')) + '+Normal' + >>> c.number_class(Decimal('0.1E-999')) + '+Subnormal' + >>> c.number_class(Decimal('0')) + '+Zero' + >>> c.number_class(Decimal('-0')) + '-Zero' + >>> c.number_class(Decimal('-0.1E-999')) + '-Subnormal' + >>> c.number_class(Decimal('-1E-10')) + '-Normal' + >>> c.number_class(Decimal('-2.50')) + '-Normal' + >>> c.number_class(Decimal('-Infinity')) + '-Infinity' + >>> c.number_class(Decimal('NaN')) + 'NaN' + >>> c.number_class(Decimal('-NaN')) + 'NaN' + >>> c.number_class(Decimal('sNaN')) + 'sNaN' + >>> c.number_class(123) + '+Normal' + 'b'Plus corresponds to unary prefix plus in Python. + + The operation is evaluated using the same rules as add; the + operation plus(a) is calculated as add('0', a) where the '0' + has the same exponent as the operand. + + >>> ExtendedContext.plus(Decimal('1.3')) + Decimal('1.3') + >>> ExtendedContext.plus(Decimal('-1.3')) + Decimal('-1.3') + >>> ExtendedContext.plus(-1) + Decimal('-1') + 'u'Plus corresponds to unary prefix plus in Python. + + The operation is evaluated using the same rules as add; the + operation plus(a) is calculated as add('0', a) where the '0' + has the same exponent as the operand. + + >>> ExtendedContext.plus(Decimal('1.3')) + Decimal('1.3') + >>> ExtendedContext.plus(Decimal('-1.3')) + Decimal('-1.3') + >>> ExtendedContext.plus(-1) + Decimal('-1') + 'b'Raises a to the power of b, to modulo if given. + + With two arguments, compute a**b. If a is negative then b + must be integral. The result will be inexact unless b is + integral and the result is finite and can be expressed exactly + in 'precision' digits. + + With three arguments, compute (a**b) % modulo. For the + three argument form, the following restrictions on the + arguments hold: + + - all three arguments must be integral + - b must be nonnegative + - at least one of a or b must be nonzero + - modulo must be nonzero and have at most 'precision' digits + + The result of pow(a, b, modulo) is identical to the result + that would be obtained by computing (a**b) % modulo with + unbounded precision, but is computed more efficiently. It is + always exact. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.power(Decimal('2'), Decimal('3')) + Decimal('8') + >>> c.power(Decimal('-2'), Decimal('3')) + Decimal('-8') + >>> c.power(Decimal('2'), Decimal('-3')) + Decimal('0.125') + >>> c.power(Decimal('1.7'), Decimal('8')) + Decimal('69.7575744') + >>> c.power(Decimal('10'), Decimal('0.301029996')) + Decimal('2.00000000') + >>> c.power(Decimal('Infinity'), Decimal('-1')) + Decimal('0') + >>> c.power(Decimal('Infinity'), Decimal('0')) + Decimal('1') + >>> c.power(Decimal('Infinity'), Decimal('1')) + Decimal('Infinity') + >>> c.power(Decimal('-Infinity'), Decimal('-1')) + Decimal('-0') + >>> c.power(Decimal('-Infinity'), Decimal('0')) + Decimal('1') + >>> c.power(Decimal('-Infinity'), Decimal('1')) + Decimal('-Infinity') + >>> c.power(Decimal('-Infinity'), Decimal('2')) + Decimal('Infinity') + >>> c.power(Decimal('0'), Decimal('0')) + Decimal('NaN') + + >>> c.power(Decimal('3'), Decimal('7'), Decimal('16')) + Decimal('11') + >>> c.power(Decimal('-3'), Decimal('7'), Decimal('16')) + Decimal('-11') + >>> c.power(Decimal('-3'), Decimal('8'), Decimal('16')) + Decimal('1') + >>> c.power(Decimal('3'), Decimal('7'), Decimal('-16')) + Decimal('11') + >>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789')) + Decimal('11729830') + >>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729')) + Decimal('-0') + >>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537')) + Decimal('1') + >>> ExtendedContext.power(7, 7) + Decimal('823543') + >>> ExtendedContext.power(Decimal(7), 7) + Decimal('823543') + >>> ExtendedContext.power(7, Decimal(7), 2) + Decimal('1') + 'u'Raises a to the power of b, to modulo if given. + + With two arguments, compute a**b. If a is negative then b + must be integral. The result will be inexact unless b is + integral and the result is finite and can be expressed exactly + in 'precision' digits. + + With three arguments, compute (a**b) % modulo. For the + three argument form, the following restrictions on the + arguments hold: + + - all three arguments must be integral + - b must be nonnegative + - at least one of a or b must be nonzero + - modulo must be nonzero and have at most 'precision' digits + + The result of pow(a, b, modulo) is identical to the result + that would be obtained by computing (a**b) % modulo with + unbounded precision, but is computed more efficiently. It is + always exact. + + >>> c = ExtendedContext.copy() + >>> c.Emin = -999 + >>> c.Emax = 999 + >>> c.power(Decimal('2'), Decimal('3')) + Decimal('8') + >>> c.power(Decimal('-2'), Decimal('3')) + Decimal('-8') + >>> c.power(Decimal('2'), Decimal('-3')) + Decimal('0.125') + >>> c.power(Decimal('1.7'), Decimal('8')) + Decimal('69.7575744') + >>> c.power(Decimal('10'), Decimal('0.301029996')) + Decimal('2.00000000') + >>> c.power(Decimal('Infinity'), Decimal('-1')) + Decimal('0') + >>> c.power(Decimal('Infinity'), Decimal('0')) + Decimal('1') + >>> c.power(Decimal('Infinity'), Decimal('1')) + Decimal('Infinity') + >>> c.power(Decimal('-Infinity'), Decimal('-1')) + Decimal('-0') + >>> c.power(Decimal('-Infinity'), Decimal('0')) + Decimal('1') + >>> c.power(Decimal('-Infinity'), Decimal('1')) + Decimal('-Infinity') + >>> c.power(Decimal('-Infinity'), Decimal('2')) + Decimal('Infinity') + >>> c.power(Decimal('0'), Decimal('0')) + Decimal('NaN') + + >>> c.power(Decimal('3'), Decimal('7'), Decimal('16')) + Decimal('11') + >>> c.power(Decimal('-3'), Decimal('7'), Decimal('16')) + Decimal('-11') + >>> c.power(Decimal('-3'), Decimal('8'), Decimal('16')) + Decimal('1') + >>> c.power(Decimal('3'), Decimal('7'), Decimal('-16')) + Decimal('11') + >>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789')) + Decimal('11729830') + >>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729')) + Decimal('-0') + >>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537')) + Decimal('1') + >>> ExtendedContext.power(7, 7) + Decimal('823543') + >>> ExtendedContext.power(Decimal(7), 7) + Decimal('823543') + >>> ExtendedContext.power(7, Decimal(7), 2) + Decimal('1') + 'b'Returns a value equal to 'a' (rounded), having the exponent of 'b'. + + The coefficient of the result is derived from that of the left-hand + operand. It may be rounded using the current rounding setting (if the + exponent is being increased), multiplied by a positive power of ten (if + the exponent is being decreased), or is unchanged (if the exponent is + already equal to that of the right-hand operand). + + Unlike other operations, if the length of the coefficient after the + quantize operation would be greater than precision then an Invalid + operation condition is raised. This guarantees that, unless there is + an error condition, the exponent of the result of a quantize is always + equal to that of the right-hand operand. + + Also unlike other operations, quantize will never raise Underflow, even + if the result is subnormal and inexact. + + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001')) + Decimal('2.170') + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01')) + Decimal('2.17') + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1')) + Decimal('2.2') + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0')) + Decimal('2') + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1')) + Decimal('0E+1') + >>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity')) + Decimal('-Infinity') + >>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity')) + Decimal('NaN') + >>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1')) + Decimal('-0') + >>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5')) + Decimal('-0E+5') + >>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2')) + Decimal('NaN') + >>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2')) + Decimal('NaN') + >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1')) + Decimal('217.0') + >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0')) + Decimal('217') + >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1')) + Decimal('2.2E+2') + >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2')) + Decimal('2E+2') + >>> ExtendedContext.quantize(1, 2) + Decimal('1') + >>> ExtendedContext.quantize(Decimal(1), 2) + Decimal('1') + >>> ExtendedContext.quantize(1, Decimal(2)) + Decimal('1') + 'u'Returns a value equal to 'a' (rounded), having the exponent of 'b'. + + The coefficient of the result is derived from that of the left-hand + operand. It may be rounded using the current rounding setting (if the + exponent is being increased), multiplied by a positive power of ten (if + the exponent is being decreased), or is unchanged (if the exponent is + already equal to that of the right-hand operand). + + Unlike other operations, if the length of the coefficient after the + quantize operation would be greater than precision then an Invalid + operation condition is raised. This guarantees that, unless there is + an error condition, the exponent of the result of a quantize is always + equal to that of the right-hand operand. + + Also unlike other operations, quantize will never raise Underflow, even + if the result is subnormal and inexact. + + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001')) + Decimal('2.170') + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01')) + Decimal('2.17') + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1')) + Decimal('2.2') + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0')) + Decimal('2') + >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1')) + Decimal('0E+1') + >>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity')) + Decimal('-Infinity') + >>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity')) + Decimal('NaN') + >>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1')) + Decimal('-0') + >>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5')) + Decimal('-0E+5') + >>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2')) + Decimal('NaN') + >>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2')) + Decimal('NaN') + >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1')) + Decimal('217.0') + >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0')) + Decimal('217') + >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1')) + Decimal('2.2E+2') + >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2')) + Decimal('2E+2') + >>> ExtendedContext.quantize(1, 2) + Decimal('1') + >>> ExtendedContext.quantize(Decimal(1), 2) + Decimal('1') + >>> ExtendedContext.quantize(1, Decimal(2)) + Decimal('1') + 'b'Just returns 10, as this is Decimal, :) + + >>> ExtendedContext.radix() + Decimal('10') + 'u'Just returns 10, as this is Decimal, :) + + >>> ExtendedContext.radix() + Decimal('10') + 'b'Returns the remainder from integer division. + + The result is the residue of the dividend after the operation of + calculating integer division as described for divide-integer, rounded + to precision digits if necessary. The sign of the result, if + non-zero, is the same as that of the original dividend. + + This operation will fail under the same conditions as integer division + (that is, if integer division on the same two operands would fail, the + remainder cannot be calculated). + + >>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3')) + Decimal('2.1') + >>> ExtendedContext.remainder(Decimal('10'), Decimal('3')) + Decimal('1') + >>> ExtendedContext.remainder(Decimal('-10'), Decimal('3')) + Decimal('-1') + >>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1')) + Decimal('0.2') + >>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3')) + Decimal('0.1') + >>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3')) + Decimal('1.0') + >>> ExtendedContext.remainder(22, 6) + Decimal('4') + >>> ExtendedContext.remainder(Decimal(22), 6) + Decimal('4') + >>> ExtendedContext.remainder(22, Decimal(6)) + Decimal('4') + 'u'Returns the remainder from integer division. + + The result is the residue of the dividend after the operation of + calculating integer division as described for divide-integer, rounded + to precision digits if necessary. The sign of the result, if + non-zero, is the same as that of the original dividend. + + This operation will fail under the same conditions as integer division + (that is, if integer division on the same two operands would fail, the + remainder cannot be calculated). + + >>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3')) + Decimal('2.1') + >>> ExtendedContext.remainder(Decimal('10'), Decimal('3')) + Decimal('1') + >>> ExtendedContext.remainder(Decimal('-10'), Decimal('3')) + Decimal('-1') + >>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1')) + Decimal('0.2') + >>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3')) + Decimal('0.1') + >>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3')) + Decimal('1.0') + >>> ExtendedContext.remainder(22, 6) + Decimal('4') + >>> ExtendedContext.remainder(Decimal(22), 6) + Decimal('4') + >>> ExtendedContext.remainder(22, Decimal(6)) + Decimal('4') + 'b'Returns to be "a - b * n", where n is the integer nearest the exact + value of "x / b" (if two integers are equally near then the even one + is chosen). If the result is equal to 0 then its sign will be the + sign of a. + + This operation will fail under the same conditions as integer division + (that is, if integer division on the same two operands would fail, the + remainder cannot be calculated). + + >>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3')) + Decimal('-0.9') + >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6')) + Decimal('-2') + >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3')) + Decimal('1') + >>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3')) + Decimal('-1') + >>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1')) + Decimal('0.2') + >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3')) + Decimal('0.1') + >>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3')) + Decimal('-0.3') + >>> ExtendedContext.remainder_near(3, 11) + Decimal('3') + >>> ExtendedContext.remainder_near(Decimal(3), 11) + Decimal('3') + >>> ExtendedContext.remainder_near(3, Decimal(11)) + Decimal('3') + 'u'Returns to be "a - b * n", where n is the integer nearest the exact + value of "x / b" (if two integers are equally near then the even one + is chosen). If the result is equal to 0 then its sign will be the + sign of a. + + This operation will fail under the same conditions as integer division + (that is, if integer division on the same two operands would fail, the + remainder cannot be calculated). + + >>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3')) + Decimal('-0.9') + >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6')) + Decimal('-2') + >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3')) + Decimal('1') + >>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3')) + Decimal('-1') + >>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1')) + Decimal('0.2') + >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3')) + Decimal('0.1') + >>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3')) + Decimal('-0.3') + >>> ExtendedContext.remainder_near(3, 11) + Decimal('3') + >>> ExtendedContext.remainder_near(Decimal(3), 11) + Decimal('3') + >>> ExtendedContext.remainder_near(3, Decimal(11)) + Decimal('3') + 'b'Returns a rotated copy of a, b times. + + The coefficient of the result is a rotated copy of the digits in + the coefficient of the first operand. The number of places of + rotation is taken from the absolute value of the second operand, + with the rotation being to the left if the second operand is + positive or to the right otherwise. + + >>> ExtendedContext.rotate(Decimal('34'), Decimal('8')) + Decimal('400000003') + >>> ExtendedContext.rotate(Decimal('12'), Decimal('9')) + Decimal('12') + >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2')) + Decimal('891234567') + >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0')) + Decimal('123456789') + >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2')) + Decimal('345678912') + >>> ExtendedContext.rotate(1333333, 1) + Decimal('13333330') + >>> ExtendedContext.rotate(Decimal(1333333), 1) + Decimal('13333330') + >>> ExtendedContext.rotate(1333333, Decimal(1)) + Decimal('13333330') + 'u'Returns a rotated copy of a, b times. + + The coefficient of the result is a rotated copy of the digits in + the coefficient of the first operand. The number of places of + rotation is taken from the absolute value of the second operand, + with the rotation being to the left if the second operand is + positive or to the right otherwise. + + >>> ExtendedContext.rotate(Decimal('34'), Decimal('8')) + Decimal('400000003') + >>> ExtendedContext.rotate(Decimal('12'), Decimal('9')) + Decimal('12') + >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2')) + Decimal('891234567') + >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0')) + Decimal('123456789') + >>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2')) + Decimal('345678912') + >>> ExtendedContext.rotate(1333333, 1) + Decimal('13333330') + >>> ExtendedContext.rotate(Decimal(1333333), 1) + Decimal('13333330') + >>> ExtendedContext.rotate(1333333, Decimal(1)) + Decimal('13333330') + 'b'Returns True if the two operands have the same exponent. + + The result is never affected by either the sign or the coefficient of + either operand. + + >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001')) + False + >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01')) + True + >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1')) + False + >>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf')) + True + >>> ExtendedContext.same_quantum(10000, -1) + True + >>> ExtendedContext.same_quantum(Decimal(10000), -1) + True + >>> ExtendedContext.same_quantum(10000, Decimal(-1)) + True + 'u'Returns True if the two operands have the same exponent. + + The result is never affected by either the sign or the coefficient of + either operand. + + >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001')) + False + >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01')) + True + >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1')) + False + >>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf')) + True + >>> ExtendedContext.same_quantum(10000, -1) + True + >>> ExtendedContext.same_quantum(Decimal(10000), -1) + True + >>> ExtendedContext.same_quantum(10000, Decimal(-1)) + True + 'b'Returns the first operand after adding the second value its exp. + + >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2')) + Decimal('0.0750') + >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0')) + Decimal('7.50') + >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3')) + Decimal('7.50E+3') + >>> ExtendedContext.scaleb(1, 4) + Decimal('1E+4') + >>> ExtendedContext.scaleb(Decimal(1), 4) + Decimal('1E+4') + >>> ExtendedContext.scaleb(1, Decimal(4)) + Decimal('1E+4') + 'u'Returns the first operand after adding the second value its exp. + + >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2')) + Decimal('0.0750') + >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0')) + Decimal('7.50') + >>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3')) + Decimal('7.50E+3') + >>> ExtendedContext.scaleb(1, 4) + Decimal('1E+4') + >>> ExtendedContext.scaleb(Decimal(1), 4) + Decimal('1E+4') + >>> ExtendedContext.scaleb(1, Decimal(4)) + Decimal('1E+4') + 'b'Returns a shifted copy of a, b times. + + The coefficient of the result is a shifted copy of the digits + in the coefficient of the first operand. The number of places + to shift is taken from the absolute value of the second operand, + with the shift being to the left if the second operand is + positive or to the right otherwise. Digits shifted into the + coefficient are zeros. + + >>> ExtendedContext.shift(Decimal('34'), Decimal('8')) + Decimal('400000000') + >>> ExtendedContext.shift(Decimal('12'), Decimal('9')) + Decimal('0') + >>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2')) + Decimal('1234567') + >>> ExtendedContext.shift(Decimal('123456789'), Decimal('0')) + Decimal('123456789') + >>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2')) + Decimal('345678900') + >>> ExtendedContext.shift(88888888, 2) + Decimal('888888800') + >>> ExtendedContext.shift(Decimal(88888888), 2) + Decimal('888888800') + >>> ExtendedContext.shift(88888888, Decimal(2)) + Decimal('888888800') + 'u'Returns a shifted copy of a, b times. + + The coefficient of the result is a shifted copy of the digits + in the coefficient of the first operand. The number of places + to shift is taken from the absolute value of the second operand, + with the shift being to the left if the second operand is + positive or to the right otherwise. Digits shifted into the + coefficient are zeros. + + >>> ExtendedContext.shift(Decimal('34'), Decimal('8')) + Decimal('400000000') + >>> ExtendedContext.shift(Decimal('12'), Decimal('9')) + Decimal('0') + >>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2')) + Decimal('1234567') + >>> ExtendedContext.shift(Decimal('123456789'), Decimal('0')) + Decimal('123456789') + >>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2')) + Decimal('345678900') + >>> ExtendedContext.shift(88888888, 2) + Decimal('888888800') + >>> ExtendedContext.shift(Decimal(88888888), 2) + Decimal('888888800') + >>> ExtendedContext.shift(88888888, Decimal(2)) + Decimal('888888800') + 'b'Square root of a non-negative number to context precision. + + If the result must be inexact, it is rounded using the round-half-even + algorithm. + + >>> ExtendedContext.sqrt(Decimal('0')) + Decimal('0') + >>> ExtendedContext.sqrt(Decimal('-0')) + Decimal('-0') + >>> ExtendedContext.sqrt(Decimal('0.39')) + Decimal('0.624499800') + >>> ExtendedContext.sqrt(Decimal('100')) + Decimal('10') + >>> ExtendedContext.sqrt(Decimal('1')) + Decimal('1') + >>> ExtendedContext.sqrt(Decimal('1.0')) + Decimal('1.0') + >>> ExtendedContext.sqrt(Decimal('1.00')) + Decimal('1.0') + >>> ExtendedContext.sqrt(Decimal('7')) + Decimal('2.64575131') + >>> ExtendedContext.sqrt(Decimal('10')) + Decimal('3.16227766') + >>> ExtendedContext.sqrt(2) + Decimal('1.41421356') + >>> ExtendedContext.prec + 9 + 'u'Square root of a non-negative number to context precision. + + If the result must be inexact, it is rounded using the round-half-even + algorithm. + + >>> ExtendedContext.sqrt(Decimal('0')) + Decimal('0') + >>> ExtendedContext.sqrt(Decimal('-0')) + Decimal('-0') + >>> ExtendedContext.sqrt(Decimal('0.39')) + Decimal('0.624499800') + >>> ExtendedContext.sqrt(Decimal('100')) + Decimal('10') + >>> ExtendedContext.sqrt(Decimal('1')) + Decimal('1') + >>> ExtendedContext.sqrt(Decimal('1.0')) + Decimal('1.0') + >>> ExtendedContext.sqrt(Decimal('1.00')) + Decimal('1.0') + >>> ExtendedContext.sqrt(Decimal('7')) + Decimal('2.64575131') + >>> ExtendedContext.sqrt(Decimal('10')) + Decimal('3.16227766') + >>> ExtendedContext.sqrt(2) + Decimal('1.41421356') + >>> ExtendedContext.prec + 9 + 'b'Return the difference between the two operands. + + >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07')) + Decimal('0.23') + >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30')) + Decimal('0.00') + >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07')) + Decimal('-0.77') + >>> ExtendedContext.subtract(8, 5) + Decimal('3') + >>> ExtendedContext.subtract(Decimal(8), 5) + Decimal('3') + >>> ExtendedContext.subtract(8, Decimal(5)) + Decimal('3') + 'u'Return the difference between the two operands. + + >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07')) + Decimal('0.23') + >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30')) + Decimal('0.00') + >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07')) + Decimal('-0.77') + >>> ExtendedContext.subtract(8, 5) + Decimal('3') + >>> ExtendedContext.subtract(Decimal(8), 5) + Decimal('3') + >>> ExtendedContext.subtract(8, Decimal(5)) + Decimal('3') + 'b'Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. + + The operation is not affected by the context. + + >>> ExtendedContext.to_eng_string(Decimal('123E+1')) + '1.23E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E+3')) + '123E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E-10')) + '12.3E-9' + >>> ExtendedContext.to_eng_string(Decimal('-123E-12')) + '-123E-12' + >>> ExtendedContext.to_eng_string(Decimal('7E-7')) + '700E-9' + >>> ExtendedContext.to_eng_string(Decimal('7E+1')) + '70' + >>> ExtendedContext.to_eng_string(Decimal('0E+1')) + '0.00E+3' + + 'u'Convert to a string, using engineering notation if an exponent is needed. + + Engineering notation has an exponent which is a multiple of 3. This + can leave up to 3 digits to the left of the decimal place and may + require the addition of either one or two trailing zeros. + + The operation is not affected by the context. + + >>> ExtendedContext.to_eng_string(Decimal('123E+1')) + '1.23E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E+3')) + '123E+3' + >>> ExtendedContext.to_eng_string(Decimal('123E-10')) + '12.3E-9' + >>> ExtendedContext.to_eng_string(Decimal('-123E-12')) + '-123E-12' + >>> ExtendedContext.to_eng_string(Decimal('7E-7')) + '700E-9' + >>> ExtendedContext.to_eng_string(Decimal('7E+1')) + '70' + >>> ExtendedContext.to_eng_string(Decimal('0E+1')) + '0.00E+3' + + 'b'Converts a number to a string, using scientific notation. + + The operation is not affected by the context. + 'u'Converts a number to a string, using scientific notation. + + The operation is not affected by the context. + 'b'Rounds to an integer. + + When the operand has a negative exponent, the result is the same + as using the quantize() operation using the given operand as the + left-hand-operand, 1E+0 as the right-hand-operand, and the precision + of the operand as the precision setting; Inexact and Rounded flags + are allowed in this operation. The rounding mode is taken from the + context. + + >>> ExtendedContext.to_integral_exact(Decimal('2.1')) + Decimal('2') + >>> ExtendedContext.to_integral_exact(Decimal('100')) + Decimal('100') + >>> ExtendedContext.to_integral_exact(Decimal('100.0')) + Decimal('100') + >>> ExtendedContext.to_integral_exact(Decimal('101.5')) + Decimal('102') + >>> ExtendedContext.to_integral_exact(Decimal('-101.5')) + Decimal('-102') + >>> ExtendedContext.to_integral_exact(Decimal('10E+5')) + Decimal('1.0E+6') + >>> ExtendedContext.to_integral_exact(Decimal('7.89E+77')) + Decimal('7.89E+77') + >>> ExtendedContext.to_integral_exact(Decimal('-Inf')) + Decimal('-Infinity') + 'u'Rounds to an integer. + + When the operand has a negative exponent, the result is the same + as using the quantize() operation using the given operand as the + left-hand-operand, 1E+0 as the right-hand-operand, and the precision + of the operand as the precision setting; Inexact and Rounded flags + are allowed in this operation. The rounding mode is taken from the + context. + + >>> ExtendedContext.to_integral_exact(Decimal('2.1')) + Decimal('2') + >>> ExtendedContext.to_integral_exact(Decimal('100')) + Decimal('100') + >>> ExtendedContext.to_integral_exact(Decimal('100.0')) + Decimal('100') + >>> ExtendedContext.to_integral_exact(Decimal('101.5')) + Decimal('102') + >>> ExtendedContext.to_integral_exact(Decimal('-101.5')) + Decimal('-102') + >>> ExtendedContext.to_integral_exact(Decimal('10E+5')) + Decimal('1.0E+6') + >>> ExtendedContext.to_integral_exact(Decimal('7.89E+77')) + Decimal('7.89E+77') + >>> ExtendedContext.to_integral_exact(Decimal('-Inf')) + Decimal('-Infinity') + 'b'Rounds to an integer. + + When the operand has a negative exponent, the result is the same + as using the quantize() operation using the given operand as the + left-hand-operand, 1E+0 as the right-hand-operand, and the precision + of the operand as the precision setting, except that no flags will + be set. The rounding mode is taken from the context. + + >>> ExtendedContext.to_integral_value(Decimal('2.1')) + Decimal('2') + >>> ExtendedContext.to_integral_value(Decimal('100')) + Decimal('100') + >>> ExtendedContext.to_integral_value(Decimal('100.0')) + Decimal('100') + >>> ExtendedContext.to_integral_value(Decimal('101.5')) + Decimal('102') + >>> ExtendedContext.to_integral_value(Decimal('-101.5')) + Decimal('-102') + >>> ExtendedContext.to_integral_value(Decimal('10E+5')) + Decimal('1.0E+6') + >>> ExtendedContext.to_integral_value(Decimal('7.89E+77')) + Decimal('7.89E+77') + >>> ExtendedContext.to_integral_value(Decimal('-Inf')) + Decimal('-Infinity') + 'u'Rounds to an integer. + + When the operand has a negative exponent, the result is the same + as using the quantize() operation using the given operand as the + left-hand-operand, 1E+0 as the right-hand-operand, and the precision + of the operand as the precision setting, except that no flags will + be set. The rounding mode is taken from the context. + + >>> ExtendedContext.to_integral_value(Decimal('2.1')) + Decimal('2') + >>> ExtendedContext.to_integral_value(Decimal('100')) + Decimal('100') + >>> ExtendedContext.to_integral_value(Decimal('100.0')) + Decimal('100') + >>> ExtendedContext.to_integral_value(Decimal('101.5')) + Decimal('102') + >>> ExtendedContext.to_integral_value(Decimal('-101.5')) + Decimal('-102') + >>> ExtendedContext.to_integral_value(Decimal('10E+5')) + Decimal('1.0E+6') + >>> ExtendedContext.to_integral_value(Decimal('7.89E+77')) + Decimal('7.89E+77') + >>> ExtendedContext.to_integral_value(Decimal('-Inf')) + Decimal('-Infinity') + 'b'(%r, %r, %r)'u'(%r, %r, %r)'b'Normalizes op1, op2 to have the same exp and length of coefficient. + + Done during addition. + 'u'Normalizes op1, op2 to have the same exp and length of coefficient. + + Done during addition. + 'b' Given integers n and e, return n * 10**e if it's an integer, else None. + + The computation is designed to avoid computing large powers of 10 + unnecessarily. + + >>> _decimal_lshift_exact(3, 4) + 30000 + >>> _decimal_lshift_exact(300, -999999999) # returns None + + 'u' Given integers n and e, return n * 10**e if it's an integer, else None. + + The computation is designed to avoid computing large powers of 10 + unnecessarily. + + >>> _decimal_lshift_exact(3, 4) + 30000 + >>> _decimal_lshift_exact(300, -999999999) # returns None + + 'b'Closest integer to the square root of the positive integer n. a is + an initial approximation to the square root. Any positive integer + will do for a, but the closer a is to the square root of n the + faster convergence will be. + + 'u'Closest integer to the square root of the positive integer n. a is + an initial approximation to the square root. Any positive integer + will do for a, but the closer a is to the square root of n the + faster convergence will be. + + 'b'Both arguments to _sqrt_nearest should be positive.'u'Both arguments to _sqrt_nearest should be positive.'b'Given an integer x and a nonnegative integer shift, return closest + integer to x / 2**shift; use round-to-even in case of a tie. + + 'u'Given an integer x and a nonnegative integer shift, return closest + integer to x / 2**shift; use round-to-even in case of a tie. + + 'b'Closest integer to a/b, a and b positive integers; rounds to even + in the case of a tie. + + 'u'Closest integer to a/b, a and b positive integers; rounds to even + in the case of a tie. + + 'b'Integer approximation to M*log(x/M), with absolute error boundable + in terms only of x/M. + + Given positive integers x and M, return an integer approximation to + M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference + between the approximation and the exact result is at most 22. For + L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In + both cases these are upper bounds on the error; it will usually be + much smaller.'u'Integer approximation to M*log(x/M), with absolute error boundable + in terms only of x/M. + + Given positive integers x and M, return an integer approximation to + M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference + between the approximation and the exact result is at most 22. For + L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In + both cases these are upper bounds on the error; it will usually be + much smaller.'b'Given integers c, e and p with c > 0, p >= 0, compute an integer + approximation to 10**p * log10(c*10**e), with an absolute error of + at most 1. Assumes that c*10**e is not exactly 1.'u'Given integers c, e and p with c > 0, p >= 0, compute an integer + approximation to 10**p * log10(c*10**e), with an absolute error of + at most 1. Assumes that c*10**e is not exactly 1.'b'Given integers c, e and p with c > 0, compute an integer + approximation to 10**p * log(c*10**e), with an absolute error of + at most 1. Assumes that c*10**e is not exactly 1.'u'Given integers c, e and p with c > 0, compute an integer + approximation to 10**p * log(c*10**e), with an absolute error of + at most 1. Assumes that c*10**e is not exactly 1.'b'Class to compute, store, and allow retrieval of, digits of the + constant log(10) = 2.302585.... This constant is needed by + Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__.'u'Class to compute, store, and allow retrieval of, digits of the + constant log(10) = 2.302585.... This constant is needed by + Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__.'b'23025850929940456840179914546843642076011014886'u'23025850929940456840179914546843642076011014886'b'Given an integer p >= 0, return floor(10**p)*log(10). + + For example, self.getdigits(3) returns 2302. + 'u'Given an integer p >= 0, return floor(10**p)*log(10). + + For example, self.getdigits(3) returns 2302. + 'b'p should be nonnegative'u'p should be nonnegative'b'Given integers x and M, M > 0, such that x/M is small in absolute + value, compute an integer approximation to M*exp(x/M). For 0 <= + x/M <= 2.4, the absolute error in the result is bounded by 60 (and + is usually much smaller).'u'Given integers x and M, M > 0, such that x/M is small in absolute + value, compute an integer approximation to M*exp(x/M). For 0 <= + x/M <= 2.4, the absolute error in the result is bounded by 60 (and + is usually much smaller).'b'Compute an approximation to exp(c*10**e), with p decimal places of + precision. + + Returns integers d, f such that: + + 10**(p-1) <= d <= 10**p, and + (d-1)*10**f < exp(c*10**e) < (d+1)*10**f + + In other words, d*10**f is an approximation to exp(c*10**e) with p + digits of precision, and with an error in d of at most 1. This is + almost, but not quite, the same as the error being < 1ulp: when d + = 10**(p-1) the error could be up to 10 ulp.'u'Compute an approximation to exp(c*10**e), with p decimal places of + precision. + + Returns integers d, f such that: + + 10**(p-1) <= d <= 10**p, and + (d-1)*10**f < exp(c*10**e) < (d+1)*10**f + + In other words, d*10**f is an approximation to exp(c*10**e) with p + digits of precision, and with an error in d of at most 1. This is + almost, but not quite, the same as the error being < 1ulp: when d + = 10**(p-1) the error could be up to 10 ulp.'b'Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and + y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that: + + 10**(p-1) <= c <= 10**p, and + (c-1)*10**e < x**y < (c+1)*10**e + + in other words, c*10**e is an approximation to x**y with p digits + of precision, and with an error in c of at most 1. (This is + almost, but not quite, the same as the error being < 1ulp: when c + == 10**(p-1) we can only guarantee error < 10ulp.) + + We assume that: x is positive and not equal to 1, and y is nonzero. + 'u'Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and + y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that: + + 10**(p-1) <= c <= 10**p, and + (c-1)*10**e < x**y < (c+1)*10**e + + in other words, c*10**e is an approximation to x**y with p digits + of precision, and with an error in c of at most 1. (This is + almost, but not quite, the same as the error being < 1ulp: when c + == 10**(p-1) we can only guarantee error < 10ulp.) + + We assume that: x is positive and not equal to 1, and y is nonzero. + 'b'Compute a lower bound for 100*log10(c) for a positive integer c.'u'Compute a lower bound for 100*log10(c) for a positive integer c.'b'The argument to _log10_lb should be nonnegative.'u'The argument to _log10_lb should be nonnegative.'b'Convert other to Decimal. + + Verifies that it's ok to use in an implicit construction. + If allow_float is true, allow conversion from float; this + is used in the comparison methods (__eq__ and friends). + + 'u'Convert other to Decimal. + + Verifies that it's ok to use in an implicit construction. + If allow_float is true, allow conversion from float; this + is used in the comparison methods (__eq__ and friends). + + 'b'Given a Decimal instance self and a Python object other, return + a pair (s, o) of Decimal instances such that "s op o" is + equivalent to "self op other" for any of the 6 comparison + operators "op". + + 'u'Given a Decimal instance self and a Python object other, return + a pair (s, o) of Decimal instances such that "s op o" is + equivalent to "self op other" for any of the 6 comparison + operators "op". + + 'b' # A numeric string consists of: +# \s* + (?P[-+])? # an optional sign, followed by either... + ( + (?=\d|\.\d) # ...a number (with at least one digit) + (?P\d*) # having a (possibly empty) integer part + (\.(?P\d*))? # followed by an optional fractional part + (E(?P[-+]?\d+))? # followed by an optional exponent, or... + | + Inf(inity)? # ...an infinity, or... + | + (?Ps)? # ...an (optionally signaling) + NaN # NaN + (?P\d*) # with (possibly empty) diagnostic info. + ) +# \s* + \Z +'u' # A numeric string consists of: +# \s* + (?P[-+])? # an optional sign, followed by either... + ( + (?=\d|\.\d) # ...a number (with at least one digit) + (?P\d*) # having a (possibly empty) integer part + (\.(?P\d*))? # followed by an optional fractional part + (E(?P[-+]?\d+))? # followed by an optional exponent, or... + | + Inf(inity)? # ...an infinity, or... + | + (?Ps)? # ...an (optionally signaling) + NaN # NaN + (?P\d*) # with (possibly empty) diagnostic info. + ) +# \s* + \Z +'b'0*$'u'0*$'b'50*$'u'50*$'b'\A +(?: + (?P.)? + (?P[<>=^]) +)? +(?P[-+ ])? +(?P\#)? +(?P0)? +(?P(?!0)\d+)? +(?P,)? +(?:\.(?P0|(?!0)\d+))? +(?P[eEfFgGn%])? +\Z +'u'\A +(?: + (?P.)? + (?P[<>=^]) +)? +(?P[-+ ])? +(?P\#)? +(?P0)? +(?P(?!0)\d+)? +(?P,)? +(?:\.(?P0|(?!0)\d+))? +(?P[eEfFgGn%])? +\Z +'b'Parse and validate a format specifier. + + Turns a standard numeric format specifier into a dict, with the + following entries: + + fill: fill character to pad field to minimum width + align: alignment type, either '<', '>', '=' or '^' + sign: either '+', '-' or ' ' + minimumwidth: nonnegative integer giving minimum width + zeropad: boolean, indicating whether to pad with zeros + thousands_sep: string to use as thousands separator, or '' + grouping: grouping for thousands separators, in format + used by localeconv + decimal_point: string to use for decimal point + precision: nonnegative integer giving precision, or None + type: one of the characters 'eEfFgG%', or None + + 'u'Parse and validate a format specifier. + + Turns a standard numeric format specifier into a dict, with the + following entries: + + fill: fill character to pad field to minimum width + align: alignment type, either '<', '>', '=' or '^' + sign: either '+', '-' or ' ' + minimumwidth: nonnegative integer giving minimum width + zeropad: boolean, indicating whether to pad with zeros + thousands_sep: string to use as thousands separator, or '' + grouping: grouping for thousands separators, in format + used by localeconv + decimal_point: string to use for decimal point + precision: nonnegative integer giving precision, or None + type: one of the characters 'eEfFgG%', or None + + 'b'Invalid format specifier: 'u'Invalid format specifier: 'b'fill'u'fill'b'align'u'align'b'zeropad'u'zeropad'b'Fill character conflicts with '0' in format specifier: 'u'Fill character conflicts with '0' in format specifier: 'b'Alignment conflicts with '0' in format specifier: 'u'Alignment conflicts with '0' in format specifier: 'b'minimumwidth'u'minimumwidth'b'gGn'u'gGn'b'thousands_sep'u'thousands_sep'b'Explicit thousands separator conflicts with 'n' type in format specifier: 'u'Explicit thousands separator conflicts with 'n' type in format specifier: 'b'grouping'u'grouping'b'decimal_point'u'decimal_point'b'Given an unpadded, non-aligned numeric string 'body' and sign + string 'sign', add padding and alignment conforming to the given + format specifier dictionary 'spec' (as produced by + parse_format_specifier). + + 'u'Given an unpadded, non-aligned numeric string 'body' and sign + string 'sign', add padding and alignment conforming to the given + format specifier dictionary 'spec' (as produced by + parse_format_specifier). + + 'b'='u'='b'^'u'^'b'Unrecognised alignment field'u'Unrecognised alignment field'b'Convert a localeconv-style grouping into a (possibly infinite) + iterable of integers representing group lengths. + + 'u'Convert a localeconv-style grouping into a (possibly infinite) + iterable of integers representing group lengths. + + 'b'unrecognised format for grouping'u'unrecognised format for grouping'b'Insert thousands separators into a digit string. + + spec is a dictionary whose keys should include 'thousands_sep' and + 'grouping'; typically it's the result of parsing the format + specifier using _parse_format_specifier. + + The min_width keyword argument gives the minimum length of the + result, which will be padded on the left with zeros if necessary. + + If necessary, the zero padding adds an extra '0' on the left to + avoid a leading thousands separator. For example, inserting + commas every three digits in '123456', with min_width=8, gives + '0,123,456', even though that has length 9. + + 'u'Insert thousands separators into a digit string. + + spec is a dictionary whose keys should include 'thousands_sep' and + 'grouping'; typically it's the result of parsing the format + specifier using _parse_format_specifier. + + The min_width keyword argument gives the minimum length of the + result, which will be padded on the left with zeros if necessary. + + If necessary, the zero padding adds an extra '0' on the left to + avoid a leading thousands separator. For example, inserting + commas every three digits in '123456', with min_width=8, gives + '0,123,456', even though that has length 9. + + 'b'group length should be positive'u'group length should be positive'b'Determine sign character.'u'Determine sign character.'b' +'u' +'b'Format a number, given the following data: + + is_negative: true if the number is negative, else false + intpart: string of digits that must appear before the decimal point + fracpart: string of digits that must come after the point + exp: exponent, as an integer + spec: dictionary resulting from parsing the format specifier + + This function uses the information in spec to: + insert separators (decimal separator and thousands separators) + format the sign + format the exponent + add trailing '%' for the '%' type + zero-pad if necessary + fill and align if necessary + 'u'Format a number, given the following data: + + is_negative: true if the number is negative, else false + intpart: string of digits that must appear before the decimal point + fracpart: string of digits that must come after the point + exp: exponent, as an integer + spec: dictionary resulting from parsing the format specifier + + This function uses the information in spec to: + insert separators (decimal separator and thousands separators) + format the sign + format the exponent + add trailing '%' for the '%' type + zero-pad if necessary + fill and align if necessary + 'b'alt'u'alt'b'{0}{1:+}'u'{0}{1:+}'b'Inf'u'Inf'b'-Inf'u'-Inf'u'_pydecimal'u'Exception raised by Queue.get(block=0)/get_nowait().'u'_queue'u'Empty.__weakref__'_queue.EmptyEmptyu'Simple, unbounded, reentrant FIFO queue.'emptyget_nowaitput_nowaitqsize_queue.SimpleQueueSimpleQueueu'C implementation of the Python queue module. +This module is an implementation detail, please do not use it directly.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_queue.cpython-38-darwin.so'_queueu'Random() -> create a random number generator with its own internal state.'getrandbitsrandomseed_random.RandomRandomu'Module implements the Mersenne Twister random number generator.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_random.cpython-38-darwin.so'u'_random'_randomu'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_scproxy.cpython-38-darwin.so'u'_scproxy'_get_proxies_get_proxy_settings_scproxyu'sha1.block_size'u'sha1.digest_size'u'sha1.name'_sha1.sha1SHA1Typeu'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_sha1.cpython-38-darwin.so'u'_sha1'sha1_sha1u'sha224.block_size'u'sha224.name'_sha256.sha224SHA224Typeu'sha256.block_size'u'sha256.name'_sha256.sha256SHA256Typeu'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_sha256.cpython-38-darwin.so'u'_sha256'sha224sha256_sha256u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_sha3.cpython-38-darwin.so'u'_sha3'u'generic 64-bit optimized implementation (lane complementing, all rounds unrolled)'keccakoptu'sha3_224([data]) -> SHA3 object + +Return a new SHA3 hash object with a hashbit length of 28 bytes.'u'sha3_224._capacity_bits'_capacity_bitsu'sha3_224._rate_bits'_rate_bitsu'sha3_224._suffix'_suffixu'sha3_224.block_size'u'sha3_224.digest_size'u'sha3_224.name'_sha3.sha3_224sha3_224u'sha3_256([data]) -> SHA3 object + +Return a new SHA3 hash object with a hashbit length of 32 bytes.'u'sha3_256._capacity_bits'u'sha3_256._rate_bits'u'sha3_256._suffix'u'sha3_256.block_size'u'sha3_256.digest_size'u'sha3_256.name'_sha3.sha3_256sha3_256u'sha3_384([data]) -> SHA3 object + +Return a new SHA3 hash object with a hashbit length of 48 bytes.'u'sha3_384._capacity_bits'u'sha3_384._rate_bits'u'sha3_384._suffix'u'sha3_384.block_size'u'sha3_384.digest_size'u'sha3_384.name'_sha3.sha3_384sha3_384u'sha3_512([data]) -> SHA3 object + +Return a new SHA3 hash object with a hashbit length of 64 bytes.'u'sha3_512._capacity_bits'u'sha3_512._rate_bits'u'sha3_512._suffix'u'sha3_512.block_size'u'sha3_512.digest_size'u'sha3_512.name'_sha3.sha3_512sha3_512u'shake_128([data]) -> SHAKE object + +Return a new SHAKE hash object.'u'shake_128._capacity_bits'u'shake_128._rate_bits'u'shake_128._suffix'u'shake_128.block_size'u'shake_128.digest_size'u'shake_128.name'_sha3.shake_128shake_128u'shake_256([data]) -> SHAKE object + +Return a new SHAKE hash object.'u'shake_256._capacity_bits'u'shake_256._rate_bits'u'shake_256._suffix'u'shake_256.block_size'u'shake_256.digest_size'u'shake_256.name'_sha3.shake_256shake_256_sha3u'sha384.block_size'u'sha384.name'_sha512.sha384SHA384Typeu'sha512.block_size'u'sha512.name'_sha512.sha512SHA512Typeu'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_sha512.cpython-38-darwin.so'u'_sha512'sha384sha512_sha512ITIMER_PROFITIMER_REALITIMER_VIRTUALu'ItimerError.__weakref__'signal.ItimerErrorItimerErrorNSIGSIGABRTSIGALRMSIGBUSSIGCHLDSIGCONTSIGEMTSIGFPESIGHUPSIGILLSIGINFOSIGINTSIGIOSIGIOTSIGPIPESIGPROFSIGQUITSIGSEGVSIGSYSSIGTERMSIGTRAPSIGTSTPSIGTTINSIGTTOUSIGURGSIGUSR1SIGUSR2SIGVTALRMSIGWINCHSIGXCPUSIGXFSZSIG_BLOCKSIG_DFLSIG_IGNSIG_SETMASKSIG_UNBLOCKu'This module provides mechanisms to use signal handlers in Python. + +Functions: + +alarm() -- cause SIGALRM after a specified time [Unix only] +setitimer() -- cause a signal (described below) after a specified + float time and the timer may restart then [Unix only] +getitimer() -- get current value of timer [Unix only] +signal() -- set the action for a given signal +getsignal() -- get the signal action for a given signal +pause() -- wait until a signal arrives [Unix only] +default_int_handler() -- default SIGINT handler + +signal constants: +SIG_DFL -- used to refer to the system default handler +SIG_IGN -- used to ignore the signal +NSIG -- number of defined signals +SIGINT, SIGTERM, etc. -- signal numbers + +itimer constants: +ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon + expiration +ITIMER_VIRTUAL -- decrements only when the process is executing, + and delivers SIGVTALRM upon expiration +ITIMER_PROF -- decrements both when the process is executing and + when the system is executing on behalf of the process. + Coupled with ITIMER_VIRTUAL, this timer is usually + used to profile the time spent by the application + in user and kernel space. SIGPROF is delivered upon + expiration. + + +*** IMPORTANT NOTICE *** +A signal handler function is called with two arguments: +the first is the signal number, the second is the interrupted stack frame.'alarmdefault_int_handlergetitimerpausepthread_killpthread_sigmaskraise_signalset_wakeup_fdsetitimersiginterruptsigpendingsigwaitstrsignal_signalAF_APPLETALKAF_DECnetAF_IPXAF_LINKAF_ROUTEAF_SNAAF_SYSTEMAF_UNSPECAI_ADDRCONFIGAI_ALLAI_CANONNAME1536AI_DEFAULT5127AI_MASKAI_NUMERICHOSTAI_NUMERICSERVAI_PASSIVE2048AI_V4MAPPED512AI_V4MAPPED_CFGCAPICMSG_LENCMSG_SPACEEAI_ADDRFAMILYEAI_BADFLAGSEAI_BADHINTSEAI_FAMILYEAI_MAXEAI_MEMORYEAI_OVERFLOWEAI_PROTOCOLEAI_SERVICEEAI_SOCKTYPEEAI_SYSTEM3758096385INADDR_ALLHOSTS_GROUPINADDR_ANYINADDR_BROADCAST2130706433INADDR_LOOPBACK3758096639INADDR_MAX_LOCAL_GROUPINADDR_NONE3758096384INADDR_UNSPEC_GROUPIPPORT_RESERVED5000IPPORT_USERRESERVEDIPPROTO_AHIPPROTO_DSTOPTSIPPROTO_EGP80IPPROTO_EONIPPROTO_ESPIPPROTO_FRAGMENTIPPROTO_GGPIPPROTO_GREIPPROTO_HELLOIPPROTO_HOPOPTSIPPROTO_ICMP58IPPROTO_ICMPV6IPPROTO_IDPIPPROTO_IGMPIPPROTO_IP108IPPROTO_IPCOMPIPPROTO_IPIPIPPROTO_IPV4IPPROTO_IPV6IPPROTO_MAX77IPPROTO_ND59IPPROTO_NONE103IPPROTO_PIMIPPROTO_PUP255IPPROTO_RAWIPPROTO_ROUTINGIPPROTO_RSVPIPPROTO_SCTPIPPROTO_TCPIPPROTO_TPIPPROTO_UDPIPPROTO_XTPIPV6_CHECKSUMIPV6_JOIN_GROUPIPV6_LEAVE_GROUPIPV6_MULTICAST_HOPSIPV6_MULTICAST_IFIPV6_MULTICAST_LOOPIPV6_RECVTCLASSIPV6_RTHDR_TYPE_0IPV6_TCLASSIPV6_UNICAST_HOPSIPV6_V6ONLYIP_ADD_MEMBERSHIPIP_DEFAULT_MULTICAST_LOOPIP_DEFAULT_MULTICAST_TTLIP_DROP_MEMBERSHIPIP_HDRINCL4095IP_MAX_MEMBERSHIPSIP_MULTICAST_IFIP_MULTICAST_LOOPIP_MULTICAST_TTLIP_OPTIONSIP_RECVDSTADDRIP_RECVOPTSIP_RECVRETOPTSIP_RETOPTSIP_TOSIP_TTLLOCAL_PEERCREDMSG_CTRUNCMSG_DONTROUTEMSG_DONTWAITMSG_EOFMSG_EORMSG_NOSIGNALMSG_OOBMSG_PEEKMSG_TRUNCMSG_WAITALLNI_DGRAM1025NI_MAXHOSTNI_MAXSERVNI_NAMEREQDNI_NOFQDNNI_NUMERICHOSTNI_NUMERICSERVPF_SYSTEMSCM_CREDSSCM_RIGHTSSHUT_RDSHUT_RDWRSHUT_WRSOCK_DGRAMSOCK_RAWSOCK_RDMSOCK_SEQPACKETSOL_IPSOL_TCPSOL_UDPSOMAXCONNSO_ACCEPTCONNSO_BROADCASTSO_DEBUGSO_DONTROUTE4103SO_ERRORSO_KEEPALIVESO_LINGERSO_OOBINLINE4098SO_RCVBUF4100SO_RCVLOWAT4102SO_RCVTIMEO4097SO_SNDBUF4099SO_SNDLOWAT4101SO_SNDTIMEO4104SO_TYPESO_USELOOPBACKSYSPROTO_CONTROLu'socket(family=AF_INET, type=SOCK_STREAM, proto=0) -> socket object +socket(family=-1, type=-1, proto=-1, fileno=None) -> socket object + +Open a socket of the given type. The family argument specifies the +address family; it defaults to AF_INET. The type argument specifies +whether this is a stream (SOCK_STREAM, this is the default) +or datagram (SOCK_DGRAM) socket. The protocol argument defaults to 0, +specifying the default protocol. Keyword arguments are accepted. +The socket is created as non-inheritable. + +When a fileno is passed in, family, type and proto are auto-detected, +unless they are explicitly set. + +A socket object represents one endpoint of a network connection. + +Methods of socket objects (keyword arguments not allowed): + +_accept() -- accept connection, returning new socket fd and client address +bind(addr) -- bind the socket to a local address +close() -- close the socket +connect(addr) -- connect the socket to a remote address +connect_ex(addr) -- connect, return an error code instead of an exception +dup() -- return a new socket fd duplicated from fileno() +fileno() -- return underlying file descriptor +getpeername() -- return remote address [*] +getsockname() -- return local address +getsockopt(level, optname[, buflen]) -- get socket options +gettimeout() -- return timeout or None +listen([n]) -- start listening for incoming connections +recv(buflen[, flags]) -- receive data +recv_into(buffer[, nbytes[, flags]]) -- receive data (into a buffer) +recvfrom(buflen[, flags]) -- receive data and sender's address +recvfrom_into(buffer[, nbytes, [, flags]) + -- receive data and sender's address (into a buffer) +sendall(data[, flags]) -- send all data +send(data[, flags]) -- send data, may not send all of it +sendto(data[, flags], addr) -- send data to a given address +setblocking(0 | 1) -- set or clear the blocking I/O flag +getblocking() -- return True if socket is blocking, False if non-blocking +setsockopt(level, optname, value[, optlen]) -- set socket options +settimeout(None | float) -- set or clear the timeout +shutdown(how) -- shut down traffic in one or both directions +if_nameindex() -- return all network interface indices and names +if_nametoindex(name) -- return the corresponding interface index +if_indextoname(index) -- return the corresponding interface name + + [*] not available on all platforms!'_acceptconnectconnect_exgetblockinggetpeernamegettimeoutlistenprotorecvrecv_intorecvfromrecvfrom_intorecvmsgrecvmsg_intosendallsendmsgsendtosetblockingsettimeoutu'the socket timeout'u'socket.timeout'_socket.socket261TCP_FASTOPEN258TCP_KEEPCNT257TCP_KEEPINTVLTCP_MAXSEGTCP_NODELAY513TCP_NOTSENT_LOWATu'Implementation module for socket operations. + +See the socket module for documentation.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_socket.cpython-38-darwin.so'u'gaierror.__weakref__'socket.gaierrorgetaddrinfogethostbyaddrgethostbynamegethostbyname_exgethostnamegetnameinfogetprotobynamegetservbynamegetservbyportu'herror.__weakref__'socket.herrorherrorhtonlhtonsif_indextonameif_nameindexif_nametoindexinet_atoninet_ntoainet_ntopinet_ptonntohlntohssethostnamesocketpairu'timeout.__weakref__'socket.timeoutCODESIZE20171005MAGIC2147483647MAXGROUPSMAXREPEATascii_iscasedascii_toloweru' SRE 2.2.2 Copyright (c) 1997-2002 by Secret Labs AB 'getcodesizeunicode_iscasedunicode_tolower_sreALERT_DESCRIPTION_ACCESS_DENIEDALERT_DESCRIPTION_BAD_CERTIFICATE114ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUEALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSEALERT_DESCRIPTION_BAD_RECORD_MACALERT_DESCRIPTION_CERTIFICATE_EXPIREDALERT_DESCRIPTION_CERTIFICATE_REVOKEDALERT_DESCRIPTION_CERTIFICATE_UNKNOWNALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLEALERT_DESCRIPTION_CLOSE_NOTIFYALERT_DESCRIPTION_DECODE_ERRORALERT_DESCRIPTION_DECOMPRESSION_FAILUREALERT_DESCRIPTION_DECRYPT_ERRORALERT_DESCRIPTION_HANDSHAKE_FAILUREALERT_DESCRIPTION_ILLEGAL_PARAMETER71ALERT_DESCRIPTION_INSUFFICIENT_SECURITYALERT_DESCRIPTION_INTERNAL_ERRORALERT_DESCRIPTION_NO_RENEGOTIATIONALERT_DESCRIPTION_PROTOCOL_VERSIONALERT_DESCRIPTION_RECORD_OVERFLOWALERT_DESCRIPTION_UNEXPECTED_MESSAGEALERT_DESCRIPTION_UNKNOWN_CA115ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY112ALERT_DESCRIPTION_UNRECOGNIZED_NAMEALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATEALERT_DESCRIPTION_UNSUPPORTED_EXTENSION90ALERT_DESCRIPTION_USER_CANCELLEDCERT_NONECERT_OPTIONALCERT_REQUIREDHAS_ALPNHAS_ECDHHAS_NPNHAS_SNIHAS_SSLv2HAS_SSLv3HAS_TLS_UNIQUEHAS_TLSv1HAS_TLSv1_1HAS_TLSv1_2HAS_TLSv1_3HOSTFLAG_ALWAYS_CHECK_SUBJECTHOSTFLAG_MULTI_LABEL_WILDCARDSHOSTFLAG_NEVER_CHECK_SUBJECTHOSTFLAG_NO_PARTIAL_WILDCARDSHOSTFLAG_NO_WILDCARDSHOSTFLAG_SINGLE_LABEL_SUBDOMAINSu'Whether the memory BIO is at EOF.'u'MemoryBIO.eof'u'The number of bytes pending in the memory BIO.'u'MemoryBIO.pending'write_eof_ssl.MemoryBIOMemoryBIOu'OpenSSL 1.1.1u 30 May 2023'OPENSSL_VERSIONOPENSSL_VERSION_INFO269488479OPENSSL_VERSION_NUMBER2147483732OP_ALLOP_CIPHER_SERVER_PREFERENCEOP_ENABLE_MIDDLEBOX_COMPATOP_NO_COMPRESSION1073741824OP_NO_RENEGOTIATIONOP_NO_SSLv233554432OP_NO_SSLv3OP_NO_TICKET67108864OP_NO_TLSv1268435456OP_NO_TLSv1_1134217728OP_NO_TLSv1_2536870912OP_NO_TLSv1_3OP_SINGLE_DH_USEOP_SINGLE_ECDH_USEPROTOCOL_SSLv23PROTOCOL_TLSPROTOCOL_TLS_CLIENTPROTOCOL_TLS_SERVERPROTOCOL_TLSv1PROTOCOL_TLSv1_1PROTOCOL_TLSv1_2-1PROTO_MAXIMUM_SUPPORTED-2PROTO_MINIMUM_SUPPORTED768PROTO_SSLv3769PROTO_TLSv1770PROTO_TLSv1_1771PROTO_TLSv1_2772PROTO_TLSv1_3RAND_addRAND_bytesRAND_pseudo_bytesRAND_statusu'A certificate could not be verified.'u'ssl'u'SSLCertVerificationError.__weakref__'u'An error occurred in the SSL implementation.'ssl.SSLErrorssl.SSLCertVerificationErrorSSLCertVerificationErroru'SSL/TLS connection terminated abruptly.'u'SSLEOFError.__weakref__'ssl.SSLEOFErrorSSLEOFErrorSSLErroru'Does the session contain a ticket?'u'Session.has_ticket'has_ticketu'Session id'u'Session.id'u'Ticket life time hint.'u'Session.ticket_lifetime_hint'ticket_lifetime_hintu'Session creation time (seconds since epoch).'u'Session.time'u'Session timeout (delta in seconds).'u'Session.timeout'_ssl.SessionSSLSessionu'System error when attempting SSL operation.'u'SSLSyscallError.__weakref__'ssl.SSLSyscallErrorSSLSyscallErroru'Non-blocking SSL socket needs to read more data +before the requested operation can be completed.'u'SSLWantReadError.__weakref__'ssl.SSLWantReadErrorSSLWantReadErroru'Non-blocking SSL socket needs to write more data +before the requested operation can be completed.'u'SSLWantWriteError.__weakref__'ssl.SSLWantWriteErrorSSLWantWriteErroru'SSL/TLS session closed cleanly.'u'SSLZeroReturnError.__weakref__'ssl.SSLZeroReturnErrorSSLZeroReturnErrorSSL_ERROR_EOFSSL_ERROR_INVALID_ERROR_CODESSL_ERROR_SSLSSL_ERROR_SYSCALLSSL_ERROR_WANT_CONNECTSSL_ERROR_WANT_READSSL_ERROR_WANT_WRITESSL_ERROR_WANT_X509_LOOKUPSSL_ERROR_ZERO_RETURNVERIFY_CRL_CHECK_CHAINVERIFY_CRL_CHECK_LEAFVERIFY_DEFAULTVERIFY_X509_STRICTVERIFY_X509_TRUSTED_FIRSTu'DEFAULT:!aNULL:!eNULL:!MD5:!3DES:!DES:!RC4:!IDEA:!SEED:!aDSS:!SRP:!PSK'_DEFAULT_CIPHERS_OPENSSL_API_VERSIONu'_SSLContext._host_flags'_host_flagsu'_SSLContext._msg_callback'_msg_callback_set_alpn_protocols_set_npn_protocols_wrap_bio_wrap_socketcert_store_statsu'_SSLContext.check_hostname'check_hostnameget_ca_certsget_ciphersu'_SSLContext.keylog_filename'keylog_filenameload_cert_chainload_dh_paramsload_verify_locationsu'_SSLContext.maximum_version'maximum_versionu'_SSLContext.minimum_version'minimum_versionu'Control the number of TLSv1.3 session tickets'u'_SSLContext.num_tickets'num_ticketsu'_SSLContext.options'u'_SSLContext.post_handshake_auth'post_handshake_authu'_SSLContext.protocol'session_statsset_ciphersset_default_verify_pathsset_ecdh_curveu'Set a callback that will be called when a server name is provided by the SSL/TLS client in the SNI extension. + +If the argument is None then the callback is disabled. The method is called +with the SSLSocket, the server name as a string, and the SSLContext object. +See RFC 6066 for details of the SNI extension.'u'_SSLContext.sni_callback'sni_callbacku'_SSLContext.verify_flags'verify_flagsu'_SSLContext.verify_mode'verify_mode_ssl._SSLContext_SSLContextciphercompressionu'_setter_context(ctx) +This changes the context associated with the SSLSocket. This is typically +used from within a callback function set by the sni_callback +on the SSLContext to change the certificate information associated with the +SSLSocket before the cryptographic exchange handshake messages +'u'_SSLSocket.context'do_handshakeget_channel_bindinggetpeercertu'The Python-level owner of this object.Passed as "self" in servername callback.'u'_SSLSocket.owner'selected_alpn_protocolu'The currently set server hostname (for SNI).'u'_SSLSocket.server_hostname'server_hostnameu'Whether this is a server-side socket.'u'_SSLSocket.server_side'server_sideu'_setter_session(session) +Get / set SSLSession.'u'_SSLSocket.session'sessionu'Was the client session reused during handshake?'u'_SSLSocket.session_reused'session_reusedshared_ciphersverify_client_post_handshake_ssl._SSLSocket_SSLSocketu'Implementation module for SSL socket operations. See the socket module +for documentation.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_ssl.cpython-38-darwin.so'u'_ssl'_test_decode_certerr_codes_to_nameserr_names_to_codesget_default_verify_pathslib_codes_to_namesnid2objtxt2obj_sslSF_APPENDSF_ARCHIVEDSF_IMMUTABLESF_NOUNLINKSF_SNAPSHOTST_ATIMEST_CTIMEST_DEVST_GIDST_INOST_MODEST_MTIMEST_NLINKST_SIZEST_UIDS_ENFMTS_IEXEC24576S_IFBLKS_IFCHRS_IFDIRS_IFDOORS_IFIFO40960S_IFLNKS_IFMTS_IFPORTS_IFREG49152S_IFSOCK57344S_IFWHTS_IMODES_IREADS_IRGRPS_IROTHS_IRUSRS_IRWXGS_IRWXO448S_ISBLKS_ISCHRS_ISDOORS_ISFIFOS_ISGIDS_ISLNKS_ISPORTS_ISREGS_ISSOCKS_ISUIDS_ISVTXS_ISWHTS_IWGRPS_IWOTHS_IWRITES_IWUSRS_IXGRPS_IXOTHS_IXUSRUF_APPENDUF_COMPRESSEDUF_HIDDENUF_IMMUTABLEUF_NODUMPUF_NOUNLINKUF_OPAQUEu'S_IFMT_: file type bits +S_IFDIR: directory +S_IFCHR: character device +S_IFBLK: block device +S_IFREG: regular file +S_IFIFO: fifo (named pipe) +S_IFLNK: symbolic link +S_IFSOCK: socket file +S_IFDOOR: door +S_IFPORT: event port +S_IFWHT: whiteout + +S_ISUID: set UID bit +S_ISGID: set GID bit +S_ENFMT: file locking enforcement +S_ISVTX: sticky bit +S_IREAD: Unix V7 synonym for S_IRUSR +S_IWRITE: Unix V7 synonym for S_IWUSR +S_IEXEC: Unix V7 synonym for S_IXUSR +S_IRWXU: mask for owner permissions +S_IRUSR: read by owner +S_IWUSR: write by owner +S_IXUSR: execute by owner +S_IRWXG: mask for group permissions +S_IRGRP: read by group +S_IWGRP: write by group +S_IXGRP: execute by group +S_IRWXO: mask for others (not in group) permissions +S_IROTH: read by others +S_IWOTH: write by others +S_IXOTH: execute by others + +UF_NODUMP: do not dump file +UF_IMMUTABLE: file may not be changed +UF_APPEND: file may only be appended to +UF_OPAQUE: directory is opaque when viewed through a union stack +UF_NOUNLINK: file may not be renamed or deleted +UF_COMPRESSED: OS X: file is hfs-compressed +UF_HIDDEN: OS X: file should not be displayed +SF_ARCHIVED: file may be archived +SF_IMMUTABLE: file may not be changed +SF_APPEND: file may only be appended to +SF_NOUNLINK: file may not be renamed or deleted +SF_SNAPSHOT: file is a snapshot file + +ST_MODE +ST_INO +ST_DEV +ST_NLINK +ST_UID +ST_GID +ST_SIZE +ST_ATIME +ST_MTIME +ST_CTIME + +FILE_ATTRIBUTE_*: Windows file attribute constants + (only present on Windows) +'_statu'string helper module'formatter_field_name_splitformatter_parser_stringu'Create a compiled struct object. + +Return a new Struct object which writes and reads binary data according to +the format string. + +See help(struct) for more on format strings.'u'struct format string'u'Struct.format'iter_unpackpack_intou'struct size in bytes'u'Struct.size'unpackunpack_fromStructu'Functions to convert between Python values and C structs. +Python bytes objects are used to hold the data representing the C struct +and also as format strings (explained below) to describe the layout of data +in the C struct. + +The optional first format char indicates byte order, size and alignment: + @: native order, size & alignment (default) + =: native order, std. size & alignment + <: little-endian, std. size & alignment + >: big-endian, std. size & alignment + !: same as > + +The remaining chars indicate types of args and must match exactly; +these can be preceded by a decimal repeat count: + x: pad byte (no data); c:char; b:signed byte; B:unsigned byte; + ?: _Bool (requires C99; if not available, char is used instead) + h:short; H:unsigned short; i:int; I:unsigned int; + l:long; L:unsigned long; f:float; d:double; e:half-float. +Special cases (preceding decimal count indicates length): + s:string (array of char); p: pascal string (with count byte). +Special cases (only available in native format): + n:ssize_t; N:size_t; + P:an integer type that is wide enough to hold a pointer. +Special case (not in native mode unless 'long long' in platform C): + q:long long; Q:unsigned long long +Whitespace between formats is ignored. + +The variable struct.error is an exception raised on errors. +'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_struct.cpython-38-darwin.so'u'_struct'_clearcacheu'struct'u'error.__weakref__'struct.error_struct-128CHAR_MINDBL_MAXDBL_MINDecodeLocaleExEncodeLocaleEx3.4028234663852886e+38FLT_MAX1.1754943508222875e-38FLT_MINGeneric__mro_entries__GenericAliasu'A heap type without GC, but with overridden __setattr__. + +The 'value' attribute is set to 10 in __init__ and updated via attribute setting.'u'_testcapi'pvalue_testcapi.HeapCTypeSetattrHeapCTypeSetattru'Subclass of HeapCType, without GC. + +__init__ sets the 'value' attribute to 10 and 'value2' to 20.'value2u'A heap type without GC, but with overridden dealloc. + +The 'value' attribute is set to 10 in __init__.'_testcapi.HeapCType_testcapi.HeapCTypeSubclassHeapCTypeSubclassu'Subclass of HeapCType with a finalizer that reassigns __class__. + +__class__ is set to plain HeapCTypeSubclass during finalization. +__init__ sets the 'value' attribute to 10 and 'value2' to 20.'_testcapi.HeapCTypeSubclassWithFinalizerHeapCTypeSubclassWithFinalizeru'A heap type with GC, and with overridden dealloc. + +The 'value' attribute is set to 10 in __init__.'_testcapi.HeapGcCTypeHeapGcCTypeINT_MAX-2147483648INT_MINLLONG_MAX-9223372036854775808LLONG_MINLONG_MAXLONG_MINMethodDescriptorBaseMethodDescriptor2MethodDescriptorDerivedMethodDescriptorNopGetMyListPY_SSIZE_T_MAXPY_SSIZE_T_MINPyTime_AsMicrosecondsPyTime_AsMillisecondsPyTime_AsSecondsDoublePyTime_AsTimespecPyTime_AsTimevalPyTime_FromSecondsPyTime_FromSecondsObjectu'Instantiating this exception starts infinite recursion.'RecursingInfinitelyErrorSHRT_MAX-32768SHRT_MINSIZEOF_TIME_TUCHAR_MAXUINT_MAX18446744073709551615ULLONG_MAXULONG_MAXUSHRT_MAXW_STOPCODEu'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/_testcapi.cpython-38-darwin.so'_pending_threadfuncT_BOOLT_BYTET_DOUBLET_FLOATT_INTT_LONGT_LONGLONGT_PYSSIZETT_SHORTT_STRING_INPLACET_UBYTET_UINTT_ULONGT_ULONGLONGT_USHORTu'Type containing all structmember types'test_structmembersType_test_structmembersType_test_thread_stateargparsingu'C level type with tp_as_async'awaitTypebad_getcall_in_temporary_c_threadcheck_pyobject_forbidden_bytes_is_freedcheck_pyobject_freed_is_freedcheck_pyobject_null_is_freedcheck_pyobject_uninitialized_is_freedcode_newemptycodec_incrementaldecodercodec_incrementalencodercrash_no_current_threadcreate_cfunctiondatetime_check_datedatetime_check_datetimedatetime_check_deltadatetime_check_timedatetime_check_tzinfodict_get_versiondict_getitem_knownhashdict_hassplittabledocstring_emptydocstring_no_signaturedocstring_with_invalid_signaturedocstring_with_invalid_signature2docstring_with_signaturedocstring_with_signature_and_extra_newlinesdocstring_with_signature_but_no_docdocstring_with_signature_with_defaults_testcapi.errorexception_printget_argsget_date_fromdateget_date_fromtimestampget_datetime_fromdateandtimeget_datetime_fromdateandtimeandfoldget_datetime_fromtimestampget_delta_fromdsuget_kwargsget_mapping_itemsget_mapping_keysget_mapping_valuesget_recursion_depthget_time_fromtimeget_time_fromtimeandfoldget_timezone_utc_capiget_timezones_offset_zerogetargs_Bgetargs_Cgetargs_Dgetargs_Hgetargs_Igetargs_Kgetargs_Lgetargs_Sgetargs_Ugetargs_Ygetargs_Zgetargs_Z_hashgetargs_bgetargs_cgetargs_dgetargs_esgetargs_es_hashgetargs_etgetargs_et_hashgetargs_fgetargs_hgetargs_igetargs_kgetargs_keyword_onlygetargs_keywordsgetargs_lgetargs_ngetargs_pgetargs_positional_only_and_keywordsgetargs_sgetargs_s_hashgetargs_s_stargetargs_tuplegetargs_ugetargs_u_hashgetargs_w_stargetargs_ygetargs_y_hashgetargs_y_stargetargs_zgetargs_z_hashgetargs_z_stargetbuffer_with_null_viewhamtu'instancemethod.__doc__'instancemethod__ipow__ipowTypemake_exception_with_docmake_memoryview_from_NULL_pointermake_timezones_capiu'C level type with matrix operations defined'__imatmul____matmul____rmatmul__matmulTypeno_docstringparse_tuple_and_keywordsprofile_intpymarshal_read_last_object_from_filepymarshal_read_long_from_filepymarshal_read_object_from_filepymarshal_read_short_from_filepymarshal_write_long_to_filepymarshal_write_object_to_filepymem_api_misusepymem_buffer_overflowpymem_getallocatorsnamepymem_malloc_without_gilpynumber_tobasepyobject_fastcallpyobject_fastcalldictpyobject_malloc_without_gilpyobject_vectorcallpytime_object_to_time_tpytime_object_to_timespecpytime_object_to_timevalpyvectorcall_callraise_SIGINT_then_send_Noneraise_exceptionraise_memoryerrorremove_mem_hooksreturn_null_without_errorreturn_result_with_errorset_exc_infoset_nomemorystack_pointertest_L_codetest_Z_codetest_buildvalue_Ntest_buildvalue_issue38913test_capsuletest_configtest_datetime_capitest_decref_doesnt_leaktest_dict_iterationtest_empty_argparsetest_from_contiguoustest_incref_decref_APItest_incref_doesnt_leaktest_k_codetest_lazy_hash_inheritancetest_list_apitest_long_and_overflowtest_long_apitest_long_as_doubletest_long_as_size_ttest_long_as_unsigned_long_long_masktest_long_long_and_overflowtest_long_numbitstest_longlong_apitest_null_stringstest_pymem_alloc0test_pymem_setallocatorstest_pymem_setrawallocatorstest_pyobject_setallocatorstest_pythread_tss_key_statetest_s_codetest_sizeof_c_typestest_string_from_formattest_string_to_doubletest_structseq_newtype_doesnt_leaktest_u_codetest_unicode_compare_with_asciitest_widechartest_with_docstringtest_xdecref_doesnt_leaktest_xincref_doesnt_leakthe_number_threetraceback_printtracemalloc_get_tracebacktracemalloc_tracktracemalloc_untrackunicode_asucs4unicode_aswidecharunicode_aswidecharstringunicode_copycharactersunicode_encodedecimalunicode_findcharunicode_legacy_stringunicode_transformdecimaltoasciiwith_tp_delwithout_gcwrite_unraisable_exclockedlocked_lock_thread.lockLockType_acquire_restore_is_owned_release_save_thread.RLock9223372036.0TIMEOUT_MAXu'ExceptHookArgs + +Type used to pass arguments to threading.excepthook.'_thread.ExceptHookArgs_ExceptHookArgsu'This module provides primitive operations to write multi-threaded programs. +The 'threading' module provides a more convenient interface.'_excepthooku'Thread-local data'_thread._local_local_set_sentinelallocateexit_threadget_native_idinterrupt_mainstack_sizestart_newstart_new_threadThread-local objects. + +(Note that this module provides a Python version of the threading.local + class. Depending on the version of Python you're using, there may be a + faster one available. You should always import the `local` class from + `threading`.) + +Thread-local objects support the management of thread-local data. +If you have data that you want to be local to a thread, simply create +a thread-local object and use its attributes: + + >>> mydata = local() + >>> mydata.number = 42 + >>> mydata.number + 42 + +You can also access the local-object's dictionary: + + >>> mydata.__dict__ + {'number': 42} + >>> mydata.__dict__.setdefault('widgets', []) + [] + >>> mydata.widgets + [] + +What's important about thread-local objects is that their data are +local to a thread. If we access the data in a different thread: + + >>> log = [] + >>> def f(): + ... items = sorted(mydata.__dict__.items()) + ... log.append(items) + ... mydata.number = 11 + ... log.append(mydata.number) + + >>> import threading + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[], 11] + +we get different data. Furthermore, changes made in the other thread +don't affect data seen in this thread: + + >>> mydata.number + 42 + +Of course, values you get from a local object, including a __dict__ +attribute, are for whatever thread was current at the time the +attribute was read. For that reason, you generally don't want to save +these values across threads, as they apply only to the thread they +came from. + +You can create custom local objects by subclassing the local class: + + >>> class MyLocal(local): + ... number = 2 + ... def __init__(self, /, **kw): + ... self.__dict__.update(kw) + ... def squared(self): + ... return self.number ** 2 + +This can be useful to support default values, methods and +initialization. Note that if you define an __init__ method, it will be +called each time the local object is used in a separate thread. This +is necessary to initialize each thread's dictionary. + +Now if we create a local object: + + >>> mydata = MyLocal(color='red') + +Now we have a default number: + + >>> mydata.number + 2 + +an initial color: + + >>> mydata.color + 'red' + >>> del mydata.color + +And a method that operates on the data: + + >>> mydata.squared() + 4 + +As before, we can access the data in a separate thread: + + >>> log = [] + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[('color', 'red')], 11] + +without affecting this thread's data: + + >>> mydata.number + 2 + >>> mydata.color + Traceback (most recent call last): + ... + AttributeError: 'MyLocal' object has no attribute 'color' + +Note that subclasses can define slots, but they are not thread +local. They are shared across threads: + + >>> class MyLocal(local): + ... __slots__ = 'number' + + >>> mydata = MyLocal() + >>> mydata.number = 42 + >>> mydata.color = 'red' + +So, the separate thread: + + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + +affects what we see: + + >>> mydata.number + 11 + +>>> del mydata +local_localimplA class managing thread-local dictsdictslocalargslocallock_threading_local._localimpl.get_dictReturn the dict for the current thread. Raises KeyError if none + defined.create_dictCreate a new dict for the current thread, and return it.localdictidtlocal_deletedwrthreadthread_deletedwrlocaldct_patch_local__implimplInitialization arguments are not supported%r object attribute '__dict__' is read-only# We need to use objects from the threading module, but the threading# module may also want to use our `local` class, if support for locals# isn't compiled in to the `thread` module. This creates potential problems# with circular imports. For that reason, we don't import `threading`# until the bottom of this file (a hack sufficient to worm around the# potential problems). Note that all platforms on CPython do have support# for locals in the `thread` module, and there is no circular import problem# then, so problems introduced by fiddling the order of imports here won't# manifest.# The key used in the Thread objects' attribute dicts.# We keep it a string for speed but make it unlikely to clash with# a "real" attribute.# { id(Thread) -> (ref(Thread), thread-local dict) }# When the localimpl is deleted, remove the thread attribute.# When the thread is deleted, remove the local dict.# Note that this is suboptimal if the thread object gets# caught in a reference loop. We would like to be called# as soon as the OS-level thread ends instead.# We need to create the thread dict in anticipation of# __init__ being called, to make sure we don't call it# again ourselves.b'Thread-local objects. + +(Note that this module provides a Python version of the threading.local + class. Depending on the version of Python you're using, there may be a + faster one available. You should always import the `local` class from + `threading`.) + +Thread-local objects support the management of thread-local data. +If you have data that you want to be local to a thread, simply create +a thread-local object and use its attributes: + + >>> mydata = local() + >>> mydata.number = 42 + >>> mydata.number + 42 + +You can also access the local-object's dictionary: + + >>> mydata.__dict__ + {'number': 42} + >>> mydata.__dict__.setdefault('widgets', []) + [] + >>> mydata.widgets + [] + +What's important about thread-local objects is that their data are +local to a thread. If we access the data in a different thread: + + >>> log = [] + >>> def f(): + ... items = sorted(mydata.__dict__.items()) + ... log.append(items) + ... mydata.number = 11 + ... log.append(mydata.number) + + >>> import threading + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[], 11] + +we get different data. Furthermore, changes made in the other thread +don't affect data seen in this thread: + + >>> mydata.number + 42 + +Of course, values you get from a local object, including a __dict__ +attribute, are for whatever thread was current at the time the +attribute was read. For that reason, you generally don't want to save +these values across threads, as they apply only to the thread they +came from. + +You can create custom local objects by subclassing the local class: + + >>> class MyLocal(local): + ... number = 2 + ... def __init__(self, /, **kw): + ... self.__dict__.update(kw) + ... def squared(self): + ... return self.number ** 2 + +This can be useful to support default values, methods and +initialization. Note that if you define an __init__ method, it will be +called each time the local object is used in a separate thread. This +is necessary to initialize each thread's dictionary. + +Now if we create a local object: + + >>> mydata = MyLocal(color='red') + +Now we have a default number: + + >>> mydata.number + 2 + +an initial color: + + >>> mydata.color + 'red' + >>> del mydata.color + +And a method that operates on the data: + + >>> mydata.squared() + 4 + +As before, we can access the data in a separate thread: + + >>> log = [] + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[('color', 'red')], 11] + +without affecting this thread's data: + + >>> mydata.number + 2 + >>> mydata.color + Traceback (most recent call last): + ... + AttributeError: 'MyLocal' object has no attribute 'color' + +Note that subclasses can define slots, but they are not thread +local. They are shared across threads: + + >>> class MyLocal(local): + ... __slots__ = 'number' + + >>> mydata = MyLocal() + >>> mydata.number = 42 + >>> mydata.color = 'red' + +So, the separate thread: + + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + +affects what we see: + + >>> mydata.number + 11 + +>>> del mydata +'u'Thread-local objects. + +(Note that this module provides a Python version of the threading.local + class. Depending on the version of Python you're using, there may be a + faster one available. You should always import the `local` class from + `threading`.) + +Thread-local objects support the management of thread-local data. +If you have data that you want to be local to a thread, simply create +a thread-local object and use its attributes: + + >>> mydata = local() + >>> mydata.number = 42 + >>> mydata.number + 42 + +You can also access the local-object's dictionary: + + >>> mydata.__dict__ + {'number': 42} + >>> mydata.__dict__.setdefault('widgets', []) + [] + >>> mydata.widgets + [] + +What's important about thread-local objects is that their data are +local to a thread. If we access the data in a different thread: + + >>> log = [] + >>> def f(): + ... items = sorted(mydata.__dict__.items()) + ... log.append(items) + ... mydata.number = 11 + ... log.append(mydata.number) + + >>> import threading + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[], 11] + +we get different data. Furthermore, changes made in the other thread +don't affect data seen in this thread: + + >>> mydata.number + 42 + +Of course, values you get from a local object, including a __dict__ +attribute, are for whatever thread was current at the time the +attribute was read. For that reason, you generally don't want to save +these values across threads, as they apply only to the thread they +came from. + +You can create custom local objects by subclassing the local class: + + >>> class MyLocal(local): + ... number = 2 + ... def __init__(self, /, **kw): + ... self.__dict__.update(kw) + ... def squared(self): + ... return self.number ** 2 + +This can be useful to support default values, methods and +initialization. Note that if you define an __init__ method, it will be +called each time the local object is used in a separate thread. This +is necessary to initialize each thread's dictionary. + +Now if we create a local object: + + >>> mydata = MyLocal(color='red') + +Now we have a default number: + + >>> mydata.number + 2 + +an initial color: + + >>> mydata.color + 'red' + >>> del mydata.color + +And a method that operates on the data: + + >>> mydata.squared() + 4 + +As before, we can access the data in a separate thread: + + >>> log = [] + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[('color', 'red')], 11] + +without affecting this thread's data: + + >>> mydata.number + 2 + >>> mydata.color + Traceback (most recent call last): + ... + AttributeError: 'MyLocal' object has no attribute 'color' + +Note that subclasses can define slots, but they are not thread +local. They are shared across threads: + + >>> class MyLocal(local): + ... __slots__ = 'number' + + >>> mydata = MyLocal() + >>> mydata.number = 42 + >>> mydata.color = 'red' + +So, the separate thread: + + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + +affects what we see: + + >>> mydata.number + 11 + +>>> del mydata +'b'local'u'local'b'A class managing thread-local dicts'u'A class managing thread-local dicts'b'dicts'u'dicts'b'localargs'u'localargs'b'locallock'u'locallock'b'_threading_local._localimpl.'u'_threading_local._localimpl.'b'Return the dict for the current thread. Raises KeyError if none + defined.'u'Return the dict for the current thread. Raises KeyError if none + defined.'b'Create a new dict for the current thread, and return it.'u'Create a new dict for the current thread, and return it.'b'_local__impl'u'_local__impl'b'__dict__'u'__dict__'b'Initialization arguments are not supported'u'Initialization arguments are not supported'b'%r object attribute '__dict__' is read-only'u'%r object attribute '__dict__' is read-only'u'_threading_local'u'Debug module to trace memory blocks allocated by Python.'_get_object_traceback_get_tracesclear_tracesget_traceback_limitget_traced_memoryget_tracemalloc_memory_tracemallocu'_warnings provides basic warning filtering support. +It is a helper module to speed up interpreter start-up.'_defaultaction_filters_mutated_onceregistrywarn_explicit__ifloordiv____ilshift____imod____irshift____itruediv__weakcallableproxyCallableProxyType__bytes__weakproxyProxyType__callback__ReferenceTypeu'Weak-reference support module.'_remove_dead_weakrefgetweakrefcountgetweakrefs_IterationGuardweakcontainer_iterating_removeselfref_pending_removalsitemrefpop from empty WeakSetnewset# Access WeakSet through the weakref module.# This code is separated-out because it is needed# by abc.py to load everything else at startup.# This context manager registers itself in the current iterators of the# weak container, such as to delay all removals until the context manager# exits.# This technique should be relatively thread-safe (since sets are).# Don't create cycles# A list of keys to be removed# Caveat: the iterator will keep a strong reference to# `item` until it is resumed or closed.b'WeakSet'u'WeakSet'b'pop from empty WeakSet'u'pop from empty WeakSet'u'abc'Abstract base classes related to import.machineryabstract_clsfrozen_clsFinderLegacy abstract base class for import finders. + + It may be subclassed for compatibility with legacy third party + reimplementations of the import system. Otherwise, finder + implementations should derive from the more specific MetaPathFinder + or PathEntryFinder ABCs. + + Deprecated since Python 3.3 + An abstract method that should find a module. + The fullname is a str and the optional path is a str or None. + Returns a Loader object or None. + MetaPathFinderAbstract base class for import finders on sys.meta_path.Return a loader for the module. + + If no module is found, return None. The fullname is a str and + the path is a list of strings or None. + + This method is deprecated since Python 3.4 in favor of + finder.find_spec(). If find_spec() exists then backwards-compatible + functionality is provided for this method. + + MetaPathFinder.find_module() is deprecated since Python 3.4 in favor of MetaPathFinder.find_spec() (available since 3.4)"MetaPathFinder.find_module() is deprecated since Python ""3.4 in favor of MetaPathFinder.find_spec() ""(available since 3.4)"An optional method for clearing the finder's cache, if any. + This method is used by importlib.invalidate_caches(). + PathEntryFinderAbstract base class for path entry finders used by PathFinder.Return (loader, namespace portion) for the path entry. + + The fullname is a str. The namespace portion is a sequence of + path entries contributing to part of a namespace package. The + sequence may be empty. If loader is not None, the portion will + be ignored. + + The portion will be discarded if another path entry finder + locates the module as a normal module or package. + + This method is deprecated since Python 3.4 in favor of + finder.find_spec(). If find_spec() is provided than backwards-compatible + functionality is provided. + PathEntryFinder.find_loader() is deprecated since Python 3.4 in favor of PathEntryFinder.find_spec() (available since 3.4)"PathEntryFinder.find_loader() is deprecated since Python ""3.4 in favor of PathEntryFinder.find_spec() "An optional method for clearing the finder's cache, if any. + This method is used by PathFinder.invalidate_caches(). + LoaderAbstract base class for import loaders.Return a module to initialize and into which to load. + + This method should raise ImportError if anything prevents it + from creating a new module. It may return None to indicate + that the spec should create the new module. + Return the loaded module. + + The module must be added to sys.modules and have import-related + attributes set properly. The fullname is a str. + + ImportError is raised on failure. + + This method is deprecated in favor of loader.exec_module(). If + exec_module() exists then it is used to provide a backwards-compatible + functionality for this method. + + Return a module's repr. + + Used by the module type when the method does not raise + NotImplementedError. + + This method is deprecated. + + ResourceLoaderAbstract base class for loaders which can return data from their + back-end storage. + + This ABC represents one of the optional protocols specified by PEP 302. + + Abstract method which when implemented should return the bytes for + the specified path. The path must be a str.InspectLoaderAbstract base class for loaders which support inspection about the + modules they can load. + + This ABC represents one of the optional protocols specified by PEP 302. + + Optional method which when implemented should return whether the + module is a package. The fullname is a str. Returns a bool. + + Raises ImportError if the module cannot be found. + Method which returns the code object for the module. + + The fullname is a str. Returns a types.CodeType if possible, else + returns None if a code object does not make sense + (e.g. built-in module). Raises ImportError if the module cannot be + found. + Abstract method which should return the source code for the + module. The fullname is a str. Returns a str. + + Raises ImportError if the module cannot be found. + Compile 'data' into a code object. + + The 'data' argument can be anything that compile() can handle. The'path' + argument should be where the data was retrieved (when applicable).ExecutionLoaderAbstract base class for loaders that wish to support the execution of + modules as scripts. + + This ABC represents one of the optional protocols specified in PEP 302. + + Abstract method which should return the value that __file__ is to be + set to. + + Raises ImportError if the module cannot be found. + Method to return the code object for fullname. + + Should return None if not applicable (e.g. built-in module). + Raise ImportError if the module cannot be found. + Abstract base class partially implementing the ResourceLoader and + ExecutionLoader ABCs.Abstract base class for loading source code (and optionally any + corresponding bytecode). + + To support loading from source code, the abstractmethods inherited from + ResourceLoader and ExecutionLoader need to be implemented. To also support + loading from bytecode, the optional methods specified directly by this ABC + is required. + + Inherited abstractmethods not implemented in this ABC: + + * ResourceLoader.get_data + * ExecutionLoader.get_filename + + Return the (int) modification time for the path (str).Return a metadata dict for the source pointed to by the path (str). + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + Write the bytes to the path (if possible). + + Accepts a str path and data as bytes. + + Any needed intermediary directories are to be created. If for some + reason the file cannot be written because of permissions, fail + silently. + ResourceReaderAbstract base class to provide resource-reading support. + + Loaders that support resource reading are expected to implement + the ``get_resource_reader(fullname)`` method and have it either return None + or an object compatible with this ABC. + Return an opened, file-like object for binary reading. + + The 'resource' argument is expected to represent only a file name + and thus not contain any subdirectory components. + + If the resource cannot be found, FileNotFoundError is raised. + Return the file system path to the specified resource. + + The 'resource' argument is expected to represent only a file name + and thus not contain any subdirectory components. + + If the resource does not exist on the file system, raise + FileNotFoundError. + Return True if the named 'name' is consider a resource.Return an iterable of strings over the contents of the package.# We don't define find_spec() here since that would break# hasattr checks we do to support backward compatibility.# By default, defer to default semantics for the new module.# We don't define exec_module() here since that would break# The exception will cause ModuleType.__repr__ to ignore this method.b'Abstract base classes related to import.'u'Abstract base classes related to import.'b'_frozen_importlib'b'Legacy abstract base class for import finders. + + It may be subclassed for compatibility with legacy third party + reimplementations of the import system. Otherwise, finder + implementations should derive from the more specific MetaPathFinder + or PathEntryFinder ABCs. + + Deprecated since Python 3.3 + 'u'Legacy abstract base class for import finders. + + It may be subclassed for compatibility with legacy third party + reimplementations of the import system. Otherwise, finder + implementations should derive from the more specific MetaPathFinder + or PathEntryFinder ABCs. + + Deprecated since Python 3.3 + 'b'An abstract method that should find a module. + The fullname is a str and the optional path is a str or None. + Returns a Loader object or None. + 'u'An abstract method that should find a module. + The fullname is a str and the optional path is a str or None. + Returns a Loader object or None. + 'b'Abstract base class for import finders on sys.meta_path.'u'Abstract base class for import finders on sys.meta_path.'b'Return a loader for the module. + + If no module is found, return None. The fullname is a str and + the path is a list of strings or None. + + This method is deprecated since Python 3.4 in favor of + finder.find_spec(). If find_spec() exists then backwards-compatible + functionality is provided for this method. + + 'u'Return a loader for the module. + + If no module is found, return None. The fullname is a str and + the path is a list of strings or None. + + This method is deprecated since Python 3.4 in favor of + finder.find_spec(). If find_spec() exists then backwards-compatible + functionality is provided for this method. + + 'b'MetaPathFinder.find_module() is deprecated since Python 3.4 in favor of MetaPathFinder.find_spec() (available since 3.4)'u'MetaPathFinder.find_module() is deprecated since Python 3.4 in favor of MetaPathFinder.find_spec() (available since 3.4)'b'An optional method for clearing the finder's cache, if any. + This method is used by importlib.invalidate_caches(). + 'u'An optional method for clearing the finder's cache, if any. + This method is used by importlib.invalidate_caches(). + 'b'Abstract base class for path entry finders used by PathFinder.'u'Abstract base class for path entry finders used by PathFinder.'b'Return (loader, namespace portion) for the path entry. + + The fullname is a str. The namespace portion is a sequence of + path entries contributing to part of a namespace package. The + sequence may be empty. If loader is not None, the portion will + be ignored. + + The portion will be discarded if another path entry finder + locates the module as a normal module or package. + + This method is deprecated since Python 3.4 in favor of + finder.find_spec(). If find_spec() is provided than backwards-compatible + functionality is provided. + 'u'Return (loader, namespace portion) for the path entry. + + The fullname is a str. The namespace portion is a sequence of + path entries contributing to part of a namespace package. The + sequence may be empty. If loader is not None, the portion will + be ignored. + + The portion will be discarded if another path entry finder + locates the module as a normal module or package. + + This method is deprecated since Python 3.4 in favor of + finder.find_spec(). If find_spec() is provided than backwards-compatible + functionality is provided. + 'b'PathEntryFinder.find_loader() is deprecated since Python 3.4 in favor of PathEntryFinder.find_spec() (available since 3.4)'u'PathEntryFinder.find_loader() is deprecated since Python 3.4 in favor of PathEntryFinder.find_spec() (available since 3.4)'b'An optional method for clearing the finder's cache, if any. + This method is used by PathFinder.invalidate_caches(). + 'u'An optional method for clearing the finder's cache, if any. + This method is used by PathFinder.invalidate_caches(). + 'b'Abstract base class for import loaders.'u'Abstract base class for import loaders.'b'Return a module to initialize and into which to load. + + This method should raise ImportError if anything prevents it + from creating a new module. It may return None to indicate + that the spec should create the new module. + 'u'Return a module to initialize and into which to load. + + This method should raise ImportError if anything prevents it + from creating a new module. It may return None to indicate + that the spec should create the new module. + 'b'Return the loaded module. + + The module must be added to sys.modules and have import-related + attributes set properly. The fullname is a str. + + ImportError is raised on failure. + + This method is deprecated in favor of loader.exec_module(). If + exec_module() exists then it is used to provide a backwards-compatible + functionality for this method. + + 'u'Return the loaded module. + + The module must be added to sys.modules and have import-related + attributes set properly. The fullname is a str. + + ImportError is raised on failure. + + This method is deprecated in favor of loader.exec_module(). If + exec_module() exists then it is used to provide a backwards-compatible + functionality for this method. + + 'b'Return a module's repr. + + Used by the module type when the method does not raise + NotImplementedError. + + This method is deprecated. + + 'u'Return a module's repr. + + Used by the module type when the method does not raise + NotImplementedError. + + This method is deprecated. + + 'b'Abstract base class for loaders which can return data from their + back-end storage. + + This ABC represents one of the optional protocols specified by PEP 302. + + 'u'Abstract base class for loaders which can return data from their + back-end storage. + + This ABC represents one of the optional protocols specified by PEP 302. + + 'b'Abstract method which when implemented should return the bytes for + the specified path. The path must be a str.'u'Abstract method which when implemented should return the bytes for + the specified path. The path must be a str.'b'Abstract base class for loaders which support inspection about the + modules they can load. + + This ABC represents one of the optional protocols specified by PEP 302. + + 'u'Abstract base class for loaders which support inspection about the + modules they can load. + + This ABC represents one of the optional protocols specified by PEP 302. + + 'b'Optional method which when implemented should return whether the + module is a package. The fullname is a str. Returns a bool. + + Raises ImportError if the module cannot be found. + 'u'Optional method which when implemented should return whether the + module is a package. The fullname is a str. Returns a bool. + + Raises ImportError if the module cannot be found. + 'b'Method which returns the code object for the module. + + The fullname is a str. Returns a types.CodeType if possible, else + returns None if a code object does not make sense + (e.g. built-in module). Raises ImportError if the module cannot be + found. + 'u'Method which returns the code object for the module. + + The fullname is a str. Returns a types.CodeType if possible, else + returns None if a code object does not make sense + (e.g. built-in module). Raises ImportError if the module cannot be + found. + 'b'Abstract method which should return the source code for the + module. The fullname is a str. Returns a str. + + Raises ImportError if the module cannot be found. + 'u'Abstract method which should return the source code for the + module. The fullname is a str. Returns a str. + + Raises ImportError if the module cannot be found. + 'b'Compile 'data' into a code object. + + The 'data' argument can be anything that compile() can handle. The'path' + argument should be where the data was retrieved (when applicable).'u'Compile 'data' into a code object. + + The 'data' argument can be anything that compile() can handle. The'path' + argument should be where the data was retrieved (when applicable).'b'Abstract base class for loaders that wish to support the execution of + modules as scripts. + + This ABC represents one of the optional protocols specified in PEP 302. + + 'u'Abstract base class for loaders that wish to support the execution of + modules as scripts. + + This ABC represents one of the optional protocols specified in PEP 302. + + 'b'Abstract method which should return the value that __file__ is to be + set to. + + Raises ImportError if the module cannot be found. + 'u'Abstract method which should return the value that __file__ is to be + set to. + + Raises ImportError if the module cannot be found. + 'b'Method to return the code object for fullname. + + Should return None if not applicable (e.g. built-in module). + Raise ImportError if the module cannot be found. + 'u'Method to return the code object for fullname. + + Should return None if not applicable (e.g. built-in module). + Raise ImportError if the module cannot be found. + 'b'Abstract base class partially implementing the ResourceLoader and + ExecutionLoader ABCs.'u'Abstract base class partially implementing the ResourceLoader and + ExecutionLoader ABCs.'b'Abstract base class for loading source code (and optionally any + corresponding bytecode). + + To support loading from source code, the abstractmethods inherited from + ResourceLoader and ExecutionLoader need to be implemented. To also support + loading from bytecode, the optional methods specified directly by this ABC + is required. + + Inherited abstractmethods not implemented in this ABC: + + * ResourceLoader.get_data + * ExecutionLoader.get_filename + + 'u'Abstract base class for loading source code (and optionally any + corresponding bytecode). + + To support loading from source code, the abstractmethods inherited from + ResourceLoader and ExecutionLoader need to be implemented. To also support + loading from bytecode, the optional methods specified directly by this ABC + is required. + + Inherited abstractmethods not implemented in this ABC: + + * ResourceLoader.get_data + * ExecutionLoader.get_filename + + 'b'Return the (int) modification time for the path (str).'u'Return the (int) modification time for the path (str).'b'Return a metadata dict for the source pointed to by the path (str). + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + 'u'Return a metadata dict for the source pointed to by the path (str). + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + 'b'Write the bytes to the path (if possible). + + Accepts a str path and data as bytes. + + Any needed intermediary directories are to be created. If for some + reason the file cannot be written because of permissions, fail + silently. + 'u'Write the bytes to the path (if possible). + + Accepts a str path and data as bytes. + + Any needed intermediary directories are to be created. If for some + reason the file cannot be written because of permissions, fail + silently. + 'b'Abstract base class to provide resource-reading support. + + Loaders that support resource reading are expected to implement + the ``get_resource_reader(fullname)`` method and have it either return None + or an object compatible with this ABC. + 'u'Abstract base class to provide resource-reading support. + + Loaders that support resource reading are expected to implement + the ``get_resource_reader(fullname)`` method and have it either return None + or an object compatible with this ABC. + 'b'Return an opened, file-like object for binary reading. + + The 'resource' argument is expected to represent only a file name + and thus not contain any subdirectory components. + + If the resource cannot be found, FileNotFoundError is raised. + 'u'Return an opened, file-like object for binary reading. + + The 'resource' argument is expected to represent only a file name + and thus not contain any subdirectory components. + + If the resource cannot be found, FileNotFoundError is raised. + 'b'Return the file system path to the specified resource. + + The 'resource' argument is expected to represent only a file name + and thus not contain any subdirectory components. + + If the resource does not exist on the file system, raise + FileNotFoundError. + 'u'Return the file system path to the specified resource. + + The 'resource' argument is expected to represent only a file name + and thus not contain any subdirectory components. + + If the resource does not exist on the file system, raise + FileNotFoundError. + 'b'Return True if the named 'name' is consider a resource.'u'Return True if the named 'name' is consider a resource.'b'Return an iterable of strings over the contents of the package.'u'Return an iterable of strings over the contents of the package.'u'importlib.abc'Abstract Base Classes (ABCs) according to PEP 3119.funcobjA decorator indicating abstract methods. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract methods are overridden. + The abstract methods can be called using any of the normal + 'super' call mechanisms. abstractmethod() may be used to declare + abstract methods for properties and descriptors. + + Usage: + + class C(metaclass=ABCMeta): + @abstractmethod + def my_abstract_method(self, ...): + ... + abstractclassmethodA decorator indicating abstract classmethods. + + Deprecated, use 'classmethod' with 'abstractmethod' instead. + abstractstaticmethodA decorator indicating abstract staticmethods. + + Deprecated, use 'staticmethod' with 'abstractmethod' instead. + abstractpropertyA decorator indicating abstract properties. + + Deprecated, use 'property' with 'abstractmethod' instead. + Metaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + Register a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + _abc_registry: _abc_cache: _abc_negative_cache: _abc_negative_cache_version: _py_abcABCHelper class that provides a standard way to create an ABC using + inheritance. + b'Abstract Base Classes (ABCs) according to PEP 3119.'u'Abstract Base Classes (ABCs) according to PEP 3119.'b'A decorator indicating abstract methods. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract methods are overridden. + The abstract methods can be called using any of the normal + 'super' call mechanisms. abstractmethod() may be used to declare + abstract methods for properties and descriptors. + + Usage: + + class C(metaclass=ABCMeta): + @abstractmethod + def my_abstract_method(self, ...): + ... + 'u'A decorator indicating abstract methods. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract methods are overridden. + The abstract methods can be called using any of the normal + 'super' call mechanisms. abstractmethod() may be used to declare + abstract methods for properties and descriptors. + + Usage: + + class C(metaclass=ABCMeta): + @abstractmethod + def my_abstract_method(self, ...): + ... + 'b'A decorator indicating abstract classmethods. + + Deprecated, use 'classmethod' with 'abstractmethod' instead. + 'u'A decorator indicating abstract classmethods. + + Deprecated, use 'classmethod' with 'abstractmethod' instead. + 'b'A decorator indicating abstract staticmethods. + + Deprecated, use 'staticmethod' with 'abstractmethod' instead. + 'u'A decorator indicating abstract staticmethods. + + Deprecated, use 'staticmethod' with 'abstractmethod' instead. + 'b'A decorator indicating abstract properties. + + Deprecated, use 'property' with 'abstractmethod' instead. + 'u'A decorator indicating abstract properties. + + Deprecated, use 'property' with 'abstractmethod' instead. + 'b'Metaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + 'u'Metaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + 'b'Register a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + 'u'Register a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + 'b'_abc_registry: 'u'_abc_registry: 'b'_abc_cache: 'u'_abc_cache: 'b'_abc_negative_cache: 'u'_abc_negative_cache: 'b'_abc_negative_cache_version: 'u'_abc_negative_cache_version: 'b'abc'b'Helper class that provides a standard way to create an ABC using + inheritance. + 'u'Helper class that provides a standard way to create an ABC using + inheritance. + ' Encoding Aliases Support + + This module is used by the encodings package search function to + map encodings names to module names. + + Note that the search function normalizes the encoding names before + doing the lookup, so the mapping will have to map normalized + encoding names to module names. + + Contents: + + The following aliases dictionary contains mappings of all IANA + character set names for which the Python core library provides + codecs. In addition to these, a few Python specific codec + aliases have also been added. + +646ansi_x3.4_1968ansi_x3_4_1968ansi_x3.4_1986cp367csasciiibm367iso646_usiso_646.irv_1991iso_ir_6usus_asciibase64_codecbase_64big5big5_twcsbig5big5hkscsbig5_hkscshkscsbz2_codeccp037037csibm037ebcdic_cp_caebcdic_cp_nlebcdic_cp_usebcdic_cp_wtibm037ibm039cp10261026csibm1026ibm1026cp11251125ibm1125cp866urusciicp11401140ibm1140cp12501250windows_1250cp12511251windows_1251cp12521252windows_1252cp12531253windows_1253cp12541254windows_1254cp12551255windows_1255cp12561256windows_1256cp12571257windows_1257cp12581258windows_1258cp273273ibm273csibm273cp424csibm424ebcdic_cp_heibm424cp437437cspc8codepage437ibm437cp500csibm500ebcdic_cp_beebcdic_cp_chibm500cp775775cspc775balticibm775cp850850cspc850multilingualibm850cp852852cspcp852ibm852cp855855csibm855ibm855cp857857csibm857ibm857cp858858csibm858ibm858cp860860csibm860ibm860cp861861cp_iscsibm861ibm861cp862862cspc862latinhebrewibm862cp863863csibm863ibm863cp864864csibm864ibm864cp865865csibm865ibm865cp866866csibm866ibm866cp869869cp_grcsibm869ibm869cp932932ms932mskanjims_kanjicp949949ms949uhccp950950ms950euc_jis_2004jisx0213eucjis2004euc_jis2004euc_jisx0213eucjisx0213euc_jpeucjpujisu_jiseuc_kreuckrkoreanksc5601ks_c_5601ks_c_5601_1987ksx1001ks_x_1001gb18030gb18030_2000gb2312chinesecsiso58gb231280euc_cneuccneucgb2312_cngb2312_1980gb2312_80iso_ir_58gbk936cp936ms936hex_codechp_roman8roman8r8csHPRoman8cp1051ibm1051hzhzgbhz_gbhz_gb_2312iso2022_jpcsiso2022jpiso2022jpiso_2022_jpiso2022_jp_1iso2022jp_1iso_2022_jp_1iso2022_jp_2iso2022jp_2iso_2022_jp_2iso2022_jp_2004iso_2022_jp_2004iso2022jp_2004iso2022_jp_3iso2022jp_3iso_2022_jp_3iso2022_jp_extiso2022jp_extiso_2022_jp_extiso2022_krcsiso2022kriso2022kriso_2022_kriso8859_10csisolatin6iso_8859_10iso_8859_10_1992iso_ir_157l6latin6iso8859_11thaiiso_8859_11iso_8859_11_2001iso8859_13iso_8859_13l7latin7iso8859_14iso_8859_14iso_8859_14_1998iso_celticiso_ir_199l8latin8iso8859_15iso_8859_15l9latin9iso8859_16iso_8859_16iso_8859_16_2001iso_ir_226l10latin10iso8859_2csisolatin2iso_8859_2iso_8859_2_1987iso_ir_101l2latin2iso8859_3csisolatin3iso_8859_3iso_8859_3_1988iso_ir_109l3latin3iso8859_4csisolatin4iso_8859_4iso_8859_4_1988iso_ir_110l4latin4iso8859_5csisolatincyrilliccyrilliciso_8859_5iso_8859_5_1988iso_ir_144iso8859_6arabicasmo_708csisolatinarabicecma_114iso_8859_6iso_8859_6_1987iso_ir_127iso8859_7csisolatingreekecma_118elot_928greekgreek8iso_8859_7iso_8859_7_1987iso_ir_126iso8859_8csisolatinhebrewhebrewiso_8859_8iso_8859_8_1988iso_ir_138iso8859_9csisolatin5iso_8859_9iso_8859_9_1989iso_ir_148l5latin5johabcp1361ms1361koi8_rcskoi8rkz1048kz_1048rk1048strk1048_2002latin_18859cp819csisolatin1ibm819iso8859iso8859_1iso_8859_1iso_8859_1_1987iso_ir_100l1latinlatin1mac_cyrillicmaccyrillicmac_greekmacgreekmac_icelandmacicelandmac_latin2maccentraleuropemaclatin2mac_romanmacintoshmacromanmac_turkishmacturkishansidbcsptcp154csptcp154pt154cp154cyrillic_asianquopri_codecquopriquoted_printablequotedprintablerot_13rot13shift_jiscsshiftjisshiftjissjiss_jisshift_jis_2004shiftjis2004sjis_2004s_jis_2004shift_jisx0213shiftjisx0213sjisx0213s_jisx0213tactistis260tis_620tis620tis_620_0tis_620_2529_0tis_620_2529_1iso_ir_166utf_16u16utf16utf_16_beunicodebigunmarkedutf_16beutf_16_leunicodelittleunmarkedutf_16leutf_32u32utf32utf_32_beutf_32beutf_32_leutf_32leutf_7u7utf7unicode_1_1_utf_7utf_8u8utfutf8utf8_ucs2utf8_ucs4uu_codecuuzlib_codecx_mac_japanesex_mac_koreanx_mac_simp_chinesex_mac_trad_chinese# Please keep this list sorted alphabetically by value !# ascii codec# some email headers use this non-standard name# base64_codec codec# big5 codec# big5hkscs codec# bz2_codec codec# cp037 codec# cp1026 codec# cp1125 codec# cp1140 codec# cp1250 codec# cp1251 codec# cp1252 codec# cp1253 codec# cp1254 codec# cp1255 codec# cp1256 codec# cp1257 codec# cp1258 codec# cp273 codec# cp424 codec# cp437 codec# cp500 codec# cp775 codec# cp850 codec# cp852 codec# cp855 codec# cp857 codec# cp858 codec# cp860 codec# cp861 codec# cp862 codec# cp863 codec# cp864 codec# cp865 codec# cp866 codec# cp869 codec# cp932 codec# cp949 codec# cp950 codec# euc_jis_2004 codec# euc_jisx0213 codec# euc_jp codec# euc_kr codec# gb18030 codec# gb2312 codec# gbk codec# hex_codec codec# hp_roman8 codec# hz codec# iso2022_jp codec# iso2022_jp_1 codec# iso2022_jp_2 codec# iso2022_jp_2004 codec# iso2022_jp_3 codec# iso2022_jp_ext codec# iso2022_kr codec# iso8859_10 codec# iso8859_11 codec# iso8859_13 codec# iso8859_14 codec# iso8859_15 codec# iso8859_16 codec# iso8859_2 codec# iso8859_3 codec# iso8859_4 codec# iso8859_5 codec# iso8859_6 codec# iso8859_7 codec# iso8859_8 codec# iso8859_9 codec# johab codec# koi8_r codec# kz1048 codec# latin_1 codec# Note that the latin_1 codec is implemented internally in C and a# lot faster than the charmap codec iso8859_1 which uses the same# encoding. This is why we discourage the use of the iso8859_1# codec and alias it to latin_1 instead.# mac_cyrillic codec# mac_greek codec# mac_iceland codec# mac_latin2 codec# mac_roman codec# mac_turkish codec# mbcs codec# ptcp154 codec# quopri_codec codec# rot_13 codec# shift_jis codec# shift_jis_2004 codec# shift_jisx0213 codec# tactis codec# tis_620 codec# utf_16 codec# utf_16_be codec# utf_16_le codec# utf_32 codec# utf_32_be codec# utf_32_le codec# utf_7 codec# utf_8 codec# uu_codec codec# zlib_codec codec# temporary mac CJK aliases, will be replaced by proper codecs in 3.1b' Encoding Aliases Support + + This module is used by the encodings package search function to + map encodings names to module names. + + Note that the search function normalizes the encoding names before + doing the lookup, so the mapping will have to map normalized + encoding names to module names. + + Contents: + + The following aliases dictionary contains mappings of all IANA + character set names for which the Python core library provides + codecs. In addition to these, a few Python specific codec + aliases have also been added. + +'u' Encoding Aliases Support + + This module is used by the encodings package search function to + map encodings names to module names. + + Note that the search function normalizes the encoding names before + doing the lookup, so the mapping will have to map normalized + encoding names to module names. + + Contents: + + The following aliases dictionary contains mappings of all IANA + character set names for which the Python core library provides + codecs. In addition to these, a few Python specific codec + aliases have also been added. + +'b'646'u'646'b'ansi_x3.4_1968'u'ansi_x3.4_1968'b'ansi_x3_4_1968'u'ansi_x3_4_1968'b'ansi_x3.4_1986'u'ansi_x3.4_1986'b'cp367'u'cp367'b'csascii'u'csascii'b'ibm367'u'ibm367'b'iso646_us'u'iso646_us'b'iso_646.irv_1991'u'iso_646.irv_1991'b'iso_ir_6'u'iso_ir_6'b'us'u'us'b'us_ascii'u'us_ascii'b'base64_codec'u'base64_codec'b'base64'u'base64'b'base_64'u'base_64'b'big5'u'big5'b'big5_tw'u'big5_tw'b'csbig5'u'csbig5'b'big5hkscs'u'big5hkscs'b'big5_hkscs'u'big5_hkscs'b'hkscs'u'hkscs'b'bz2_codec'u'bz2_codec'b'cp037'u'cp037'b'037'u'037'b'csibm037'u'csibm037'b'ebcdic_cp_ca'u'ebcdic_cp_ca'b'ebcdic_cp_nl'u'ebcdic_cp_nl'b'ebcdic_cp_us'u'ebcdic_cp_us'b'ebcdic_cp_wt'u'ebcdic_cp_wt'b'ibm037'u'ibm037'b'ibm039'u'ibm039'b'cp1026'u'cp1026'b'1026'u'1026'b'csibm1026'u'csibm1026'b'ibm1026'u'ibm1026'b'cp1125'u'cp1125'b'1125'u'1125'b'ibm1125'u'ibm1125'b'cp866u'u'cp866u'b'ruscii'u'ruscii'b'cp1140'u'cp1140'b'1140'u'1140'b'ibm1140'u'ibm1140'b'cp1250'u'cp1250'b'1250'u'1250'b'windows_1250'u'windows_1250'b'cp1251'u'cp1251'b'1251'u'1251'b'windows_1251'u'windows_1251'b'cp1252'u'cp1252'b'1252'u'1252'b'windows_1252'u'windows_1252'b'cp1253'u'cp1253'b'1253'u'1253'b'windows_1253'u'windows_1253'b'cp1254'u'cp1254'b'1254'u'1254'b'windows_1254'u'windows_1254'b'cp1255'u'cp1255'b'1255'u'1255'b'windows_1255'u'windows_1255'b'cp1256'u'cp1256'b'1256'u'1256'b'windows_1256'u'windows_1256'b'cp1257'u'cp1257'b'1257'u'1257'b'windows_1257'u'windows_1257'b'cp1258'u'cp1258'b'1258'u'1258'b'windows_1258'u'windows_1258'b'cp273'u'cp273'b'273'u'273'b'ibm273'u'ibm273'b'csibm273'u'csibm273'b'cp424'u'cp424'b'424'u'424'b'csibm424'u'csibm424'b'ebcdic_cp_he'u'ebcdic_cp_he'b'ibm424'u'ibm424'b'cp437'u'cp437'b'437'u'437'b'cspc8codepage437'u'cspc8codepage437'b'ibm437'u'ibm437'b'cp500'u'cp500'b'500'u'500'b'csibm500'u'csibm500'b'ebcdic_cp_be'u'ebcdic_cp_be'b'ebcdic_cp_ch'u'ebcdic_cp_ch'b'ibm500'u'ibm500'b'cp775'u'cp775'b'775'u'775'b'cspc775baltic'u'cspc775baltic'b'ibm775'u'ibm775'b'cp850'u'cp850'b'850'u'850'b'cspc850multilingual'u'cspc850multilingual'b'ibm850'u'ibm850'b'cp852'u'cp852'b'852'u'852'b'cspcp852'u'cspcp852'b'ibm852'u'ibm852'b'cp855'u'cp855'b'855'u'855'b'csibm855'u'csibm855'b'ibm855'u'ibm855'b'cp857'u'cp857'b'857'u'857'b'csibm857'u'csibm857'b'ibm857'u'ibm857'b'cp858'u'cp858'b'858'u'858'b'csibm858'u'csibm858'b'ibm858'u'ibm858'b'cp860'u'cp860'b'860'u'860'b'csibm860'u'csibm860'b'ibm860'u'ibm860'b'cp861'u'cp861'b'861'u'861'b'cp_is'u'cp_is'b'csibm861'u'csibm861'b'ibm861'u'ibm861'b'cp862'u'cp862'b'862'u'862'b'cspc862latinhebrew'u'cspc862latinhebrew'b'ibm862'u'ibm862'b'cp863'u'cp863'b'863'u'863'b'csibm863'u'csibm863'b'ibm863'u'ibm863'b'cp864'u'cp864'b'864'u'864'b'csibm864'u'csibm864'b'ibm864'u'ibm864'b'cp865'u'cp865'b'865'u'865'b'csibm865'u'csibm865'b'ibm865'u'ibm865'b'cp866'u'cp866'b'866'u'866'b'csibm866'u'csibm866'b'ibm866'u'ibm866'b'cp869'u'cp869'b'869'u'869'b'cp_gr'u'cp_gr'b'csibm869'u'csibm869'b'ibm869'u'ibm869'b'cp932'u'cp932'b'932'u'932'b'ms932'u'ms932'b'mskanji'u'mskanji'b'ms_kanji'u'ms_kanji'b'cp949'u'cp949'b'949'u'949'b'ms949'u'ms949'b'uhc'u'uhc'b'cp950'u'cp950'b'950'u'950'b'ms950'u'ms950'b'euc_jis_2004'u'euc_jis_2004'b'jisx0213'u'jisx0213'b'eucjis2004'u'eucjis2004'b'euc_jis2004'u'euc_jis2004'b'euc_jisx0213'u'euc_jisx0213'b'eucjisx0213'u'eucjisx0213'b'euc_jp'u'euc_jp'b'eucjp'u'eucjp'b'ujis'u'ujis'b'u_jis'u'u_jis'b'euc_kr'u'euc_kr'b'euckr'u'euckr'b'korean'u'korean'b'ksc5601'u'ksc5601'b'ks_c_5601'u'ks_c_5601'b'ks_c_5601_1987'u'ks_c_5601_1987'b'ksx1001'u'ksx1001'b'ks_x_1001'u'ks_x_1001'b'gb18030'u'gb18030'b'gb18030_2000'u'gb18030_2000'b'gb2312'u'gb2312'b'chinese'u'chinese'b'csiso58gb231280'u'csiso58gb231280'b'euc_cn'u'euc_cn'b'euccn'u'euccn'b'eucgb2312_cn'u'eucgb2312_cn'b'gb2312_1980'u'gb2312_1980'b'gb2312_80'u'gb2312_80'b'iso_ir_58'u'iso_ir_58'b'gbk'u'gbk'b'936'u'936'b'cp936'u'cp936'b'ms936'u'ms936'b'hex_codec'u'hex_codec'b'hex'u'hex'b'hp_roman8'u'hp_roman8'b'roman8'u'roman8'b'r8'u'r8'b'csHPRoman8'u'csHPRoman8'b'cp1051'u'cp1051'b'ibm1051'u'ibm1051'b'hz'u'hz'b'hzgb'u'hzgb'b'hz_gb'u'hz_gb'b'hz_gb_2312'u'hz_gb_2312'b'iso2022_jp'u'iso2022_jp'b'csiso2022jp'u'csiso2022jp'b'iso2022jp'u'iso2022jp'b'iso_2022_jp'u'iso_2022_jp'b'iso2022_jp_1'u'iso2022_jp_1'b'iso2022jp_1'u'iso2022jp_1'b'iso_2022_jp_1'u'iso_2022_jp_1'b'iso2022_jp_2'u'iso2022_jp_2'b'iso2022jp_2'u'iso2022jp_2'b'iso_2022_jp_2'u'iso_2022_jp_2'b'iso2022_jp_2004'u'iso2022_jp_2004'b'iso_2022_jp_2004'u'iso_2022_jp_2004'b'iso2022jp_2004'u'iso2022jp_2004'b'iso2022_jp_3'u'iso2022_jp_3'b'iso2022jp_3'u'iso2022jp_3'b'iso_2022_jp_3'u'iso_2022_jp_3'b'iso2022_jp_ext'u'iso2022_jp_ext'b'iso2022jp_ext'u'iso2022jp_ext'b'iso_2022_jp_ext'u'iso_2022_jp_ext'b'iso2022_kr'u'iso2022_kr'b'csiso2022kr'u'csiso2022kr'b'iso2022kr'u'iso2022kr'b'iso_2022_kr'u'iso_2022_kr'b'iso8859_10'u'iso8859_10'b'csisolatin6'u'csisolatin6'b'iso_8859_10'u'iso_8859_10'b'iso_8859_10_1992'u'iso_8859_10_1992'b'iso_ir_157'u'iso_ir_157'b'l6'u'l6'b'latin6'u'latin6'b'iso8859_11'u'iso8859_11'b'thai'u'thai'b'iso_8859_11'u'iso_8859_11'b'iso_8859_11_2001'u'iso_8859_11_2001'b'iso8859_13'u'iso8859_13'b'iso_8859_13'u'iso_8859_13'b'l7'u'l7'b'latin7'u'latin7'b'iso8859_14'u'iso8859_14'b'iso_8859_14'u'iso_8859_14'b'iso_8859_14_1998'u'iso_8859_14_1998'b'iso_celtic'u'iso_celtic'b'iso_ir_199'u'iso_ir_199'b'l8'u'l8'b'latin8'u'latin8'b'iso8859_15'u'iso8859_15'b'iso_8859_15'u'iso_8859_15'b'l9'u'l9'b'latin9'u'latin9'b'iso8859_16'u'iso8859_16'b'iso_8859_16'u'iso_8859_16'b'iso_8859_16_2001'u'iso_8859_16_2001'b'iso_ir_226'u'iso_ir_226'b'l10'u'l10'b'latin10'u'latin10'b'iso8859_2'u'iso8859_2'b'csisolatin2'u'csisolatin2'b'iso_8859_2'u'iso_8859_2'b'iso_8859_2_1987'u'iso_8859_2_1987'b'iso_ir_101'u'iso_ir_101'b'l2'u'l2'b'latin2'u'latin2'b'iso8859_3'u'iso8859_3'b'csisolatin3'u'csisolatin3'b'iso_8859_3'u'iso_8859_3'b'iso_8859_3_1988'u'iso_8859_3_1988'b'iso_ir_109'u'iso_ir_109'b'l3'u'l3'b'latin3'u'latin3'b'iso8859_4'u'iso8859_4'b'csisolatin4'u'csisolatin4'b'iso_8859_4'u'iso_8859_4'b'iso_8859_4_1988'u'iso_8859_4_1988'b'iso_ir_110'u'iso_ir_110'b'l4'u'l4'b'latin4'u'latin4'b'iso8859_5'u'iso8859_5'b'csisolatincyrillic'u'csisolatincyrillic'b'cyrillic'u'cyrillic'b'iso_8859_5'u'iso_8859_5'b'iso_8859_5_1988'u'iso_8859_5_1988'b'iso_ir_144'u'iso_ir_144'b'iso8859_6'u'iso8859_6'b'arabic'u'arabic'b'asmo_708'u'asmo_708'b'csisolatinarabic'u'csisolatinarabic'b'ecma_114'u'ecma_114'b'iso_8859_6'u'iso_8859_6'b'iso_8859_6_1987'u'iso_8859_6_1987'b'iso_ir_127'u'iso_ir_127'b'iso8859_7'u'iso8859_7'b'csisolatingreek'u'csisolatingreek'b'ecma_118'u'ecma_118'b'elot_928'u'elot_928'b'greek'u'greek'b'greek8'u'greek8'b'iso_8859_7'u'iso_8859_7'b'iso_8859_7_1987'u'iso_8859_7_1987'b'iso_ir_126'u'iso_ir_126'b'iso8859_8'u'iso8859_8'b'csisolatinhebrew'u'csisolatinhebrew'b'hebrew'u'hebrew'b'iso_8859_8'u'iso_8859_8'b'iso_8859_8_1988'u'iso_8859_8_1988'b'iso_ir_138'u'iso_ir_138'b'iso8859_9'u'iso8859_9'b'csisolatin5'u'csisolatin5'b'iso_8859_9'u'iso_8859_9'b'iso_8859_9_1989'u'iso_8859_9_1989'b'iso_ir_148'u'iso_ir_148'b'l5'u'l5'b'latin5'u'latin5'b'johab'u'johab'b'cp1361'u'cp1361'b'ms1361'u'ms1361'b'koi8_r'u'koi8_r'b'cskoi8r'u'cskoi8r'b'kz1048'u'kz1048'b'kz_1048'u'kz_1048'b'rk1048'u'rk1048'b'strk1048_2002'u'strk1048_2002'b'latin_1'u'latin_1'b'8859'u'8859'b'cp819'u'cp819'b'csisolatin1'u'csisolatin1'b'ibm819'u'ibm819'b'iso8859'u'iso8859'b'iso8859_1'u'iso8859_1'b'iso_8859_1'u'iso_8859_1'b'iso_8859_1_1987'u'iso_8859_1_1987'b'iso_ir_100'u'iso_ir_100'b'l1'u'l1'b'latin'u'latin'b'latin1'u'latin1'b'mac_cyrillic'u'mac_cyrillic'b'maccyrillic'u'maccyrillic'b'mac_greek'u'mac_greek'b'macgreek'u'macgreek'b'mac_iceland'u'mac_iceland'b'maciceland'u'maciceland'b'mac_latin2'u'mac_latin2'b'maccentraleurope'u'maccentraleurope'b'maclatin2'u'maclatin2'b'mac_roman'u'mac_roman'b'macintosh'u'macintosh'b'macroman'u'macroman'b'mac_turkish'u'mac_turkish'b'macturkish'u'macturkish'b'mbcs'u'mbcs'b'ansi'u'ansi'b'dbcs'u'dbcs'b'ptcp154'u'ptcp154'b'csptcp154'u'csptcp154'b'pt154'u'pt154'b'cp154'u'cp154'b'cyrillic_asian'u'cyrillic_asian'b'quopri_codec'u'quopri_codec'b'quopri'u'quopri'b'quoted_printable'u'quoted_printable'b'quotedprintable'u'quotedprintable'b'rot_13'u'rot_13'b'rot13'u'rot13'b'shift_jis'u'shift_jis'b'csshiftjis'u'csshiftjis'b'shiftjis'u'shiftjis'b'sjis'u'sjis'b's_jis'u's_jis'b'shift_jis_2004'u'shift_jis_2004'b'shiftjis2004'u'shiftjis2004'b'sjis_2004'u'sjis_2004'b's_jis_2004'u's_jis_2004'b'shift_jisx0213'u'shift_jisx0213'b'shiftjisx0213'u'shiftjisx0213'b'sjisx0213'u'sjisx0213'b's_jisx0213'u's_jisx0213'b'tactis'u'tactis'b'tis260'u'tis260'b'tis_620'u'tis_620'b'tis620'u'tis620'b'tis_620_0'u'tis_620_0'b'tis_620_2529_0'u'tis_620_2529_0'b'tis_620_2529_1'u'tis_620_2529_1'b'iso_ir_166'u'iso_ir_166'b'utf_16'u'utf_16'b'u16'u'u16'b'utf16'u'utf16'b'utf_16_be'u'utf_16_be'b'unicodebigunmarked'u'unicodebigunmarked'b'utf_16be'u'utf_16be'b'utf_16_le'u'utf_16_le'b'unicodelittleunmarked'u'unicodelittleunmarked'b'utf_16le'u'utf_16le'b'utf_32'u'utf_32'b'u32'u'u32'b'utf32'u'utf32'b'utf_32_be'u'utf_32_be'b'utf_32be'u'utf_32be'b'utf_32_le'u'utf_32_le'b'utf_32le'u'utf_32le'b'utf_7'u'utf_7'b'u7'u'u7'b'utf7'u'utf7'b'unicode_1_1_utf_7'u'unicode_1_1_utf_7'b'utf_8'u'utf_8'b'u8'u'u8'b'utf'u'utf'b'utf8'u'utf8'b'utf8_ucs2'u'utf8_ucs2'b'utf8_ucs4'u'utf8_ucs4'b'uu_codec'u'uu_codec'b'uu'u'uu'b'zlib_codec'u'zlib_codec'b'zlib'u'zlib'b'x_mac_japanese'u'x_mac_japanese'b'x_mac_korean'u'x_mac_korean'b'x_mac_simp_chinese'u'x_mac_simp_chinese'b'x_mac_trad_chinese'u'x_mac_trad_chinese'u'encodings.aliases'u'aliases'Command-line parsing library + +This module is an optparse-inspired command-line parsing library that: + + - handles both optional and positional arguments + - produces highly informative usage messages + - supports parsers that dispatch to sub-parsers + +The following is a simple usage example that sums integers from the +command-line and writes the result to a file:: + + parser = argparse.ArgumentParser( + description='sum the integers at the command line') + parser.add_argument( + 'integers', metavar='int', nargs='+', type=int, + help='an integer to be summed') + parser.add_argument( + '--log', default=sys.stdout, type=argparse.FileType('w'), + help='the file where the sum should be written') + args = parser.parse_args() + args.log.write('%s' % sum(args.integers)) + args.log.close() + +The module contains the following public classes: + + - ArgumentParser -- The main entry point for command-line parsing. As the + example above shows, the add_argument() method is used to populate + the parser with actions for optional and positional arguments. Then + the parse_args() method is invoked to convert the args at the + command-line into an object with attributes. + + - ArgumentError -- The exception raised by ArgumentParser objects when + there are errors with the parser's actions. Errors raised while + parsing the command-line are caught by ArgumentParser and emitted + as command-line messages. + + - FileType -- A factory for defining types of files to be created. As the + example above shows, instances of FileType are typically passed as + the type= argument of add_argument() calls. + + - Action -- The base class for parser actions. Typically actions are + selected by passing strings like 'store_true' or 'append_const' to + the action= argument of add_argument(). However, for greater + customization of ArgumentParser actions, subclasses of Action may + be defined and passed as the action= argument. + + - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, + ArgumentDefaultsHelpFormatter -- Formatter classes which + may be passed as the formatter_class= argument to the + ArgumentParser constructor. HelpFormatter is the default, + RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser + not to change the formatting for help text, and + ArgumentDefaultsHelpFormatter adds information about argument defaults + to the help. + +All other classes in this module are considered implementation details. +(Also note that HelpFormatter and RawDescriptionHelpFormatter are only +considered public as object names -- the API of the formatter objects is +still considered an implementation detail.) +1.1ArgumentParserArgumentTypeErrorFileTypeHelpFormatterArgumentDefaultsHelpFormatterRawDescriptionHelpFormatterRawTextHelpFormatterMetavarTypeHelpFormatterNamespaceActionONE_OR_MOREOPTIONALPARSERREMAINDERSUPPRESSZERO_OR_MORE_shutilngettext==SUPPRESS==A......_unrecognized_args_UNRECOGNIZED_ARGS_ATTR_AttributeHolderAbstract base class that provides __repr__. + + The __repr__ method returns a string in the format:: + ClassName(attr=name, attr=name, ...) + The attributes are determined either by a class-level attribute, + '_kwarg_names', or by inspecting the instance __dict__. + type_namearg_stringsstar_args_get_args_get_kwargs%s=%r**%s_copy_itemsFormatter for generating usage messages and argument help strings. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + progindent_incrementmax_help_positionget_terminal_sizecolumns_prog_indent_increment_max_help_position_width_current_indent_level_action_max_length_Section_root_section_current_section\s+_whitespace_matcher\n\n\n+_long_break_matcher_indent_dedentIndent decreased below 0.headingformat_help_join_partsitem_helpcurrent_indent%*s%s: +_add_itemstart_sectionsectionend_sectionadd_text_format_textadd_usageusageactions_format_usageadd_argumentactionhelp_format_action_invocationget_invocationinvocationssubaction_iter_indented_subactionsinvocation_lengthaction_length_format_actionadd_arguments + +part_stringsusage: %(prog)soptionalspositionalsoption_strings_format_actions_usageaction_usagetext_width\(.*?\)+(?=\s|$)|\[.*?\]+(?=\s|$)|\S+r'\(.*?\)+(?=\s|$)|'r'\[.*?\]+(?=\s|$)|'r'\S+'part_regexpopt_usagepos_usageopt_partspos_partsget_linesindentline_len0.75%s%s + +group_actionsinserts_group_actions [_get_default_metavar_for_positional_format_argsoption_stringnargs_get_default_metavar_for_optionalargs_string%s %s[\[(][\])](%s) \1 (%s)%s *%s\(([^|]*)\)%(prog)_fill_texthelp_positionhelp_widthaction_widthaction_headertup%*s%s +%*s%-*s indent_first_expand_helphelp_text_split_lineshelp_lines_metavar_formattermetavardefault_metavarchoiceschoicechoice_strstuple_sizeget_metavar[%s [%s ...]]%s [%s ...]%s ...formatsinvalid nargs valueparamschoices_str_get_help_string_get_subactionsget_subactionstextwrapwrapinitial_indentsubsequent_indentdestHelp message formatter which retains any formatting in descriptions. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + Help message formatter which retains formatting of all help text. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + Help message formatter which adds default values to argument help. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + %(default)defaulting_nargs (default: %(default)s)Help message formatter which uses the argument 'type' as the default + metavar value (instead of the argument 'dest') + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + _get_action_nameargumentAn error from creating or using an argument (optional or positional). + + The string value of this exception is the message, augmented with + information about the argument that caused it. + argument_nameargument %(argument_name)s: %(message)sAn error from trying to convert a command line string to a type.Information about how to convert command line strings to Python objects. + + Action objects are used by an ArgumentParser to represent the information + needed to parse a single argument from one or more strings from the + command line. The keyword arguments to the Action constructor are also + all attributes of Action instances. + + Keyword Arguments: + + - option_strings -- A list of command-line option strings which + should be associated with this action. + + - dest -- The name of the attribute to hold the created object(s) + + - nargs -- The number of command-line arguments that should be + consumed. By default, one argument will be consumed and a single + value will be produced. Other values include: + - N (an integer) consumes N arguments (and produces a list) + - '?' consumes zero or one arguments + - '*' consumes zero or more arguments (and produces a list) + - '+' consumes one or more arguments (and produces a list) + Note that the difference between the default and nargs=1 is that + with the default, a single value will be produced, while with + nargs=1, a list containing a single value will be produced. + + - const -- The value to be produced if the option is specified and the + option uses an action that takes no values. + + - default -- The value to be produced if the option is not specified. + + - type -- A callable that accepts a single string argument, and + returns the converted value. The standard Python types str, int, + float, and complex are useful examples of such callables. If None, + str is used. + + - choices -- A container of values that should be allowed. If not None, + after a command-line argument has been converted to the appropriate + type, an exception will be raised if it is not a member of this + collection. + + - required -- True if the action must always be specified at the + command line. This is only meaningful for optional command-line + arguments. + + - help -- The help string describing the argument. + + - metavar -- The name to be used for the option's argument with the + help string. If None, the 'dest' value will be used as the name. + const.__call__() not defined_StoreActionnargs for store actions must be != 0; if you have nothing to store, actions such as store true or store const may be more appropriate'nargs for store actions must be != 0; if you ''have nothing to store, actions such as store ''true or store const may be more appropriate'nargs must be %r to supply const_StoreConstAction_StoreTrueAction_StoreFalseAction_AppendActionnargs for append actions must be != 0; if arg strings are not supplying the value to append, the append const action may be more appropriate'nargs for append actions must be != 0; if arg ''strings are not supplying the value to append, ''the append const action may be more appropriate'_AppendConstAction_CountAction_HelpActionprint_help_VersionActionshow program's version number and exit_get_formatter_print_message_SubParsersAction_ChoicesPseudoActionsupparser_class_prog_prefix_parser_class_name_parser_map_choices_actionsadd_parserchoice_actionparser_nameunknown parser %(parser_name)r (choices: %(choices)s)parse_known_argssubnamespace_ExtendActionFactory for creating file object types + + Instances of FileType are typically passed as type= arguments to the + ArgumentParser add_argument() method. + + Keyword Arguments: + - mode -- A string indicating how the file is to be opened. Accepts the + same values as the builtin open() function. + - bufsize -- The file's desired buffer size. Accepts the same values as + the builtin open() function. + - encoding -- The file's encoding. Accepts the same values as the + builtin open() function. + - errors -- A string indicating how encoding and decoding errors are to + be handled. Accepts the same value as the builtin open() function. + bufsize_bufsize_encoding_errorsargument "-" with mode %rcan't open '%(filename)s': %(error)sargs_strSimple object for storing attributes. + + Implements equality by attribute names and values, and provides a simple + string representation. + _ActionsContainerprefix_charsargument_defaultconflict_handler_registriesstorestore_conststore_truestore_falseappend_const_get_handler_actions_option_string_actions_action_groups_mutually_exclusive_groups_defaults^-\d+$|^-\d*\.\d+$_negative_number_matcher_has_negative_number_optionalsregistry_name_registry_getset_defaultsget_default + add_argument(dest, ..., name=value, ...) + add_argument(option_string, option_string, ..., name=value, ...) + dest supplied twice for positional argument_get_positional_kwargs_get_optional_kwargs_pop_action_classaction_classunknown action "%s"type_func%r is not callable%r is a FileType class object, instance of it must be passed'%r is a FileType class object, instance of it'' must be passed'length of metavar tuple does not match nargs_add_actionadd_argument_group_ArgumentGroupadd_mutually_exclusive_group_MutuallyExclusiveGroup_check_conflictcontainer_remove_action_add_container_actionstitle_group_mapcannot merge actions - two groups are named %rgroup_mapmutex_group'required' is an invalid argument for positionalslong_option_stringsinvalid option string %(option)r: must start with a character %(prefix_chars)r'invalid option string %(option)r: ''must start with a character %(prefix_chars)r'dest_option_stringdest= is required for options like %r_handle_conflict_%shandler_func_nameinvalid conflict_resolution value: %rconfl_optionalsconfl_optional_handle_conflict_errorconflicting_actionsconflicting option string: %sconflicting option strings: %sconflict_string_handle_conflict_resolvesuper_init_containermutually exclusive arguments must be optionalObject for parsing command line strings into Python objects. + + Keyword Arguments: + - prog -- The name of the program (default: sys.argv[0]) + - usage -- A usage message (default: auto-generated from arguments) + - description -- A description of what the program does + - epilog -- Text following the argument descriptions + - parents -- Parsers whose arguments should be copied into this one + - formatter_class -- HelpFormatter class for printing help messages + - prefix_chars -- Characters that prefix optional arguments + - fromfile_prefix_chars -- Characters that prefix files containing + additional arguments + - argument_default -- The default value for all arguments + - conflict_handler -- String indicating how to handle conflicts + - add_help -- Add a -h/-help option + - allow_abbrev -- Allow long options to be abbreviated unambiguously + epilogformatter_classfromfile_prefix_charsadd_helpallow_abbrevsuperinitadd_grouppositional arguments_positionalsoptional arguments_optionals_subparsersidentitydefault_prefixshow this help message and exitadd_subparserscannot have multiple subparser argumentssubcommands_get_positional_actionsparsers_class_get_optional_actionsparse_argsunrecognized arguments: %s_parse_known_args_read_args_from_filesaction_conflictsmutex_actionconflictsoption_string_indicesarg_string_pattern_partsarg_strings_iterarg_string_parse_optionaloption_tuplearg_strings_patternseen_actionsseen_non_default_actionstake_actionargument_strings_get_valuesargument_valuesconflict_actionnot allowed with argument %saction_nameconsume_optionalstart_indexexplicit_arg_match_argumentmatch_argumentaction_tuplesextrasarg_countnew_explicit_argoptionals_mapignored explicit argument %rselected_patternsconsume_positionals_match_arguments_partialmatch_partialselected_patternarg_countsmax_option_string_indexnext_option_string_indexpositionals_end_indexstringsstop_indexrequired_actionsthe following arguments are required: %sone of the arguments %s is requirednew_arg_stringsargs_filearg_lineconvert_arg_line_to_args_get_nargs_patternnargs_patternexpected one argumentexpected at most one argumentexpected at least one argumentnargs_errorsexpected %s argumentexpected %s argumentsactions_slice_get_option_tuplesoption_tuplesambiguous option: %(option)s could match %(matches)soption_prefixshort_option_prefixshort_explicit_argunexpected option string: %s(-*A-*)(-*A?-*)(-*[A-]*)(-*A[A-]*)([-AO]*)(-*A[-AO]*)(-*-*)(-*%s-*)-*parse_intermixed_argsparse_known_intermixed_argsparse_intermixed_args: positional arg with nargs=%s'parse_intermixed_args: positional arg'' with nargs=%s'parse_intermixed_args: positional in mutuallyExclusiveGroup'parse_intermixed_args: positional in'' mutuallyExclusiveGroup'save_usageformat_usagesave_nargssave_defaultremaining_argsDo not expect %s in %ssave_required_check_valueinvalid %(type)s value: %(value)rinvalid choice: %(value)r (choose from %(choices)s)action_groupprint_usageerror(message: string) + + Prints a usage message incorporating the message to stderr and + exits. + + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + %(prog)s: error: %(message)s +# Author: Steven J. Bethard .# New maintainer as of 29 August 2019: Raymond Hettinger # =============================# Utility functions and classes# The copy module is used only in the 'append' and 'append_const'# actions, and it is needed only when the default value isn't a list.# Delay its import for speeding up the common case.# ===============# Formatting Help# default setting for width# ===============================# Section and indentation methods# format the indented section# return nothing if the section was empty# add the heading if the section was non-empty# join the section-initial newline, the heading and the help# ========================# Message building methods# find all invocations# update the maximum item length# add the item to the list# =======================# Help-formatting methods# if usage is specified, use that# if no optionals or positionals are available, usage is just prog# if optionals and positionals are available, calculate usage# split optionals from positionals# build full usage string# wrap the usage parts if it's too long# break usage into wrappable parts# helper for wrapping lines# if prog is short, follow it with optionals or positionals# if prog is long, put it on its own line# join lines into usage# prefix with 'usage:'# find group indices and identify actions in groups# collect all actions format strings# suppressed arguments are marked with None# remove | separators for suppressed arguments# produce all arg strings# if it's in a group, strip the outer []# add the action string to the list# produce the first way to invoke the option in brackets# if the Optional doesn't take a value, format is:# -s or --long# if the Optional takes a value, format is:# -s ARGS or --long ARGS# make it look optional if it's not required or in a group# insert things at the necessary indices# join all the action items with spaces# clean up separators for mutually exclusive groups# return the text# determine the required width and the entry label# no help; start on same line and add a final newline# short action name; start on the same line and pad two spaces# long action name; start on the next line# collect the pieces of the action help# if there was help for the action, add lines of help text# or add a newline if the description doesn't end with one# if there are any sub-actions, add their help as well# return a single string# -s, --long# -s ARGS, --long ARGS# The textwrap module is used only for formatting help.# Delay its import for speeding up the common usage of argparse.# =====================# Options and Arguments# ==============# Action classes# set prog from the existing prefix# create a pseudo-action to hold the choice help# create the parser and add it to the map# make parser available under aliases also# set the parser name if requested# select the parser# parse all the remaining options into the namespace# store any unrecognized options on the object, so that the top# level parser can decide what to do with them# In case this subparser defines new defaults, we parse them# in a new namespace object and then update the original# namespace for the relevant parts.# Type classes# the special argument "-" means sys.std{in,out}# all other arguments are used as file names# ===========================# Optional and Positional Parsing# set up registries# register actions# raise an exception if the conflict handler is invalid# action storage# groups# defaults storage# determines whether an "option" looks like a negative number# whether or not there are any optionals that look like negative# numbers -- uses a list so it can be shared and edited# ====================# Registration methods# ==================================# Namespace default accessor methods# if these defaults match any existing arguments, replace# the previous default on the object with the new one# Adding argument actions# if no positional args are supplied or only one is supplied and# it doesn't look like an option string, parse a positional# argument# otherwise, we're adding an optional argument# if no default was supplied, use the parser-level default# create the action object, and add it to the parser# raise an error if the action type is not callable# raise an error if the metavar does not match the type# resolve any conflicts# add to actions list# index the action by any option strings it has# set the flag if any option strings look like negative numbers# return the created action# collect groups by titles# map each action to its group# if a group with the title exists, use that, otherwise# create a new group matching the container's group# map the actions to their new group# add container's mutually exclusive groups# NOTE: if add_mutually_exclusive_group ever gains title= and# description= then this code will need to be expanded as above# map the actions to their new mutex group# add all actions to this container or their group# make sure required is not specified# mark positional arguments as required if at least one is# always required# return the keyword arguments with no option strings# determine short and long option strings# error on strings that don't start with an appropriate prefix# strings starting with two prefix characters are long options# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'# return the updated keyword arguments# determine function from conflict handler string# find all options that conflict with this option# remove all conflicting options# remove the conflicting option# if the option now has no option string, remove it from the# container holding it# add any missing keyword arguments by checking the container# group attributes# share most attributes with the container# default setting for prog# register types# add help argument if necessary# (using explicit default to override global argument_default)# add parent arguments and defaults# Pretty __repr__ methods# Optional/Positional adding methods# add the parser class to the arguments if it's not present# prog defaults to the usage message of this parser, skipping# optional arguments and with no "usage:" prefix# create the parsers action and add it to the positionals list# return the created parsers action# =====================================# Command line argument parsing methods# args default to the system args# make sure that args are mutable# default Namespace built from parser defaults# add any action defaults that aren't present# add any parser defaults that aren't present# parse the arguments and exit if there are any errors# replace arg strings that are file references# map all mutually exclusive arguments to the other arguments# they can't occur with# find all option indices, and determine the arg_string_pattern# which has an 'O' if there is an option at an index,# an 'A' if there is an argument, or a '-' if there is a '--'# all args after -- are non-options# otherwise, add the arg to the arg strings# and note the index if it was an option# join the pieces together to form the pattern# converts arg strings to the appropriate and then takes the action# error if this argument is not allowed with other previously# seen arguments, assuming that actions that use the default# value don't really count as "present"# take the action if we didn't receive a SUPPRESS value# (e.g. from a default)# function to convert arg_strings into an optional action# get the optional identified at this index# identify additional optionals in the same arg string# (e.g. -xyz is the same as -x -y -z if no args are required)# if we found no optional action, skip it# if there is an explicit argument, try to match the# optional's string arguments to only this# if the action is a single-dash option and takes no# arguments, try to parse more single-dash options out# of the tail of the option string# if the action expect exactly one argument, we've# successfully matched the option; exit the loop# error if a double-dash option did not use the# explicit argument# if there is no explicit argument, try to match the# optional's string arguments with the following strings# if successful, exit the loop# add the Optional to the list and return the index at which# the Optional's string args stopped# the list of Positionals left to be parsed; this is modified# by consume_positionals()# function to convert arg_strings into positional actions# match as many Positionals as possible# slice off the appropriate arg strings for each Positional# and add the Positional and its args to the list# slice off the Positionals that we just parsed and return the# index at which the Positionals' string args stopped# consume Positionals and Optionals alternately, until we have# passed the last option string# consume any Positionals preceding the next option# only try to parse the next optional if we didn't consume# the option string during the positionals parsing# if we consumed all the positionals we could and we're not# at the index of an option string, there were extra arguments# consume the next optional and any arguments for it# consume any positionals following the last Optional# if we didn't consume all the argument strings, there were extras# make sure all required actions were present and also convert# action defaults which were not given as arguments# Convert action default now instead of doing it before# parsing arguments to avoid calling convert functions# twice (which may fail) if the argument was given, but# only if it was defined already in the namespace# make sure all required groups had one option present# if no actions were used, report the error# return the updated namespace and the extra arguments# expand arguments referencing files# for regular arguments, just add them back into the list# replace arguments referencing files with the file content# return the modified argument list# match the pattern for this action to the arg strings# raise an exception if we weren't able to find a match# return the number of arguments matched# progressively shorten the actions list by slicing off the# final actions until we find a match# return the list of arg string counts# if it's an empty string, it was meant to be a positional# if it doesn't start with a prefix, it was meant to be positional# if the option string is present in the parser, return the action# if it's just a single character, it was meant to be positional# if the option string before the "=" is present, return the action# search through all possible prefixes of the option string# and all actions in the parser for possible interpretations# if multiple actions match, the option string was ambiguous# if exactly one action matched, this segmentation is good,# so return the parsed action# if it was not found as an option, but it looks like a negative# number, it was meant to be positional# unless there are negative-number-like options# if it contains a space, it was meant to be a positional# it was meant to be an optional but there is no such option# in this parser (though it might be a valid option in a subparser)# option strings starting with two prefix characters are only# split at the '='# single character options can be concatenated with their arguments# but multiple character options always have to have their argument# separate# shouldn't ever get here# return the collected option tuples# in all examples below, we have to allow for '--' args# which are represented as '-' in the pattern# the default (None) is assumed to be a single argument# allow zero or one arguments# allow zero or more arguments# allow one or more arguments# allow any number of options or arguments# allow one argument followed by any number of options or arguments# suppress action, like nargs=0# all others should be integers# if this is an optional action, -- is not allowed# return the pattern# Alt command line argument parsing, allowing free intermix# returns a namespace and list of extras# positional can be freely intermixed with optionals. optionals are# first parsed with all positional arguments deactivated. The 'extras'# are then parsed. If the parser definition is incompatible with the# intermixed assumptions (e.g. use of REMAINDER, subparsers) a# TypeError is raised.# positionals are 'deactivated' by setting nargs and default to# SUPPRESS. This blocks the addition of that positional to the# namespace# capture the full usage for use in error messages# deactivate positionals# action.nargs = 0# remove the empty positional values from namespace# restore nargs and usage before exiting# parse positionals. optionals aren't normally required, but# they could be, so make sure they aren't.# restore parser values before exiting# Value conversion methods# for everything but PARSER, REMAINDER args, strip out first '--'# optional argument produces a default when not present# when nargs='*' on a positional, if there were no command-line# args, use the default if it is anything other than None# single argument or optional argument produces a single value# REMAINDER arguments convert all values, checking none# PARSER arguments convert all values, but check only the first# SUPPRESS argument does not put anything in the namespace# all other types of nargs produce a list# return the converted value# convert the value to the appropriate type# ArgumentTypeErrors indicate errors# TypeErrors or ValueErrors also indicate errors# converted value must be one of the choices (if specified)# usage# description# positionals, optionals and user-defined groups# epilog# determine help from format above# Help-printing methods# Exiting methodsb'Command-line parsing library + +This module is an optparse-inspired command-line parsing library that: + + - handles both optional and positional arguments + - produces highly informative usage messages + - supports parsers that dispatch to sub-parsers + +The following is a simple usage example that sums integers from the +command-line and writes the result to a file:: + + parser = argparse.ArgumentParser( + description='sum the integers at the command line') + parser.add_argument( + 'integers', metavar='int', nargs='+', type=int, + help='an integer to be summed') + parser.add_argument( + '--log', default=sys.stdout, type=argparse.FileType('w'), + help='the file where the sum should be written') + args = parser.parse_args() + args.log.write('%s' % sum(args.integers)) + args.log.close() + +The module contains the following public classes: + + - ArgumentParser -- The main entry point for command-line parsing. As the + example above shows, the add_argument() method is used to populate + the parser with actions for optional and positional arguments. Then + the parse_args() method is invoked to convert the args at the + command-line into an object with attributes. + + - ArgumentError -- The exception raised by ArgumentParser objects when + there are errors with the parser's actions. Errors raised while + parsing the command-line are caught by ArgumentParser and emitted + as command-line messages. + + - FileType -- A factory for defining types of files to be created. As the + example above shows, instances of FileType are typically passed as + the type= argument of add_argument() calls. + + - Action -- The base class for parser actions. Typically actions are + selected by passing strings like 'store_true' or 'append_const' to + the action= argument of add_argument(). However, for greater + customization of ArgumentParser actions, subclasses of Action may + be defined and passed as the action= argument. + + - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, + ArgumentDefaultsHelpFormatter -- Formatter classes which + may be passed as the formatter_class= argument to the + ArgumentParser constructor. HelpFormatter is the default, + RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser + not to change the formatting for help text, and + ArgumentDefaultsHelpFormatter adds information about argument defaults + to the help. + +All other classes in this module are considered implementation details. +(Also note that HelpFormatter and RawDescriptionHelpFormatter are only +considered public as object names -- the API of the formatter objects is +still considered an implementation detail.) +'u'Command-line parsing library + +This module is an optparse-inspired command-line parsing library that: + + - handles both optional and positional arguments + - produces highly informative usage messages + - supports parsers that dispatch to sub-parsers + +The following is a simple usage example that sums integers from the +command-line and writes the result to a file:: + + parser = argparse.ArgumentParser( + description='sum the integers at the command line') + parser.add_argument( + 'integers', metavar='int', nargs='+', type=int, + help='an integer to be summed') + parser.add_argument( + '--log', default=sys.stdout, type=argparse.FileType('w'), + help='the file where the sum should be written') + args = parser.parse_args() + args.log.write('%s' % sum(args.integers)) + args.log.close() + +The module contains the following public classes: + + - ArgumentParser -- The main entry point for command-line parsing. As the + example above shows, the add_argument() method is used to populate + the parser with actions for optional and positional arguments. Then + the parse_args() method is invoked to convert the args at the + command-line into an object with attributes. + + - ArgumentError -- The exception raised by ArgumentParser objects when + there are errors with the parser's actions. Errors raised while + parsing the command-line are caught by ArgumentParser and emitted + as command-line messages. + + - FileType -- A factory for defining types of files to be created. As the + example above shows, instances of FileType are typically passed as + the type= argument of add_argument() calls. + + - Action -- The base class for parser actions. Typically actions are + selected by passing strings like 'store_true' or 'append_const' to + the action= argument of add_argument(). However, for greater + customization of ArgumentParser actions, subclasses of Action may + be defined and passed as the action= argument. + + - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, + ArgumentDefaultsHelpFormatter -- Formatter classes which + may be passed as the formatter_class= argument to the + ArgumentParser constructor. HelpFormatter is the default, + RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser + not to change the formatting for help text, and + ArgumentDefaultsHelpFormatter adds information about argument defaults + to the help. + +All other classes in this module are considered implementation details. +(Also note that HelpFormatter and RawDescriptionHelpFormatter are only +considered public as object names -- the API of the formatter objects is +still considered an implementation detail.) +'b'1.1'u'1.1'b'ArgumentParser'u'ArgumentParser'b'ArgumentError'u'ArgumentError'b'ArgumentTypeError'u'ArgumentTypeError'b'FileType'u'FileType'b'HelpFormatter'u'HelpFormatter'b'ArgumentDefaultsHelpFormatter'u'ArgumentDefaultsHelpFormatter'b'RawDescriptionHelpFormatter'u'RawDescriptionHelpFormatter'b'RawTextHelpFormatter'u'RawTextHelpFormatter'b'MetavarTypeHelpFormatter'u'MetavarTypeHelpFormatter'b'Namespace'u'Namespace'b'Action'u'Action'b'ONE_OR_MORE'u'ONE_OR_MORE'b'OPTIONAL'u'OPTIONAL'b'PARSER'u'PARSER'b'REMAINDER'u'REMAINDER'b'SUPPRESS'u'SUPPRESS'b'ZERO_OR_MORE'u'ZERO_OR_MORE'b'==SUPPRESS=='u'==SUPPRESS=='b'A...'u'A...'b'...'u'...'b'_unrecognized_args'u'_unrecognized_args'b'Abstract base class that provides __repr__. + + The __repr__ method returns a string in the format:: + ClassName(attr=name, attr=name, ...) + The attributes are determined either by a class-level attribute, + '_kwarg_names', or by inspecting the instance __dict__. + 'u'Abstract base class that provides __repr__. + + The __repr__ method returns a string in the format:: + ClassName(attr=name, attr=name, ...) + The attributes are determined either by a class-level attribute, + '_kwarg_names', or by inspecting the instance __dict__. + 'b'%s=%r'u'%s=%r'b'**%s'u'**%s'b'Formatter for generating usage messages and argument help strings. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + 'u'Formatter for generating usage messages and argument help strings. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + 'b'\s+'u'\s+'b'\n\n\n+'u'\n\n\n+'b'Indent decreased below 0.'u'Indent decreased below 0.'b'%*s%s: +'u'%*s%s: +'b' + +'u' + +'b'usage: 'u'usage: 'b'%(prog)s'u'%(prog)s'b'\(.*?\)+(?=\s|$)|\[.*?\]+(?=\s|$)|\S+'u'\(.*?\)+(?=\s|$)|\[.*?\]+(?=\s|$)|\S+'b'%s%s + +'u'%s%s + +'b' ['u' ['b'%s %s'u'%s %s'b'[\[(]'u'[\[(]'b'[\])]'u'[\])]'b'(%s) 'u'(%s) 'b'\1'u'\1'b' (%s)'u' (%s)'b'%s *%s'u'%s *%s'b'\(([^|]*)\)'u'\(([^|]*)\)'b'%(prog)'u'%(prog)'b'%*s%s +'u'%*s%s +'b'%*s%-*s 'u'%*s%-*s 'b'[%s [%s ...]]'u'[%s [%s ...]]'b'%s [%s ...]'u'%s [%s ...]'b'%s ...'u'%s ...'b'invalid nargs value'u'invalid nargs value'b'choices'u'choices'b'Help message formatter which retains any formatting in descriptions. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + 'u'Help message formatter which retains any formatting in descriptions. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + 'b'Help message formatter which retains formatting of all help text. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + 'u'Help message formatter which retains formatting of all help text. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + 'b'Help message formatter which adds default values to argument help. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + 'u'Help message formatter which adds default values to argument help. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + 'b'%(default)'u'%(default)'b' (default: %(default)s)'u' (default: %(default)s)'b'Help message formatter which uses the argument 'type' as the default + metavar value (instead of the argument 'dest') + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + 'u'Help message formatter which uses the argument 'type' as the default + metavar value (instead of the argument 'dest') + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + 'b'An error from creating or using an argument (optional or positional). + + The string value of this exception is the message, augmented with + information about the argument that caused it. + 'u'An error from creating or using an argument (optional or positional). + + The string value of this exception is the message, augmented with + information about the argument that caused it. + 'b'argument %(argument_name)s: %(message)s'u'argument %(argument_name)s: %(message)s'b'An error from trying to convert a command line string to a type.'u'An error from trying to convert a command line string to a type.'b'Information about how to convert command line strings to Python objects. + + Action objects are used by an ArgumentParser to represent the information + needed to parse a single argument from one or more strings from the + command line. The keyword arguments to the Action constructor are also + all attributes of Action instances. + + Keyword Arguments: + + - option_strings -- A list of command-line option strings which + should be associated with this action. + + - dest -- The name of the attribute to hold the created object(s) + + - nargs -- The number of command-line arguments that should be + consumed. By default, one argument will be consumed and a single + value will be produced. Other values include: + - N (an integer) consumes N arguments (and produces a list) + - '?' consumes zero or one arguments + - '*' consumes zero or more arguments (and produces a list) + - '+' consumes one or more arguments (and produces a list) + Note that the difference between the default and nargs=1 is that + with the default, a single value will be produced, while with + nargs=1, a list containing a single value will be produced. + + - const -- The value to be produced if the option is specified and the + option uses an action that takes no values. + + - default -- The value to be produced if the option is not specified. + + - type -- A callable that accepts a single string argument, and + returns the converted value. The standard Python types str, int, + float, and complex are useful examples of such callables. If None, + str is used. + + - choices -- A container of values that should be allowed. If not None, + after a command-line argument has been converted to the appropriate + type, an exception will be raised if it is not a member of this + collection. + + - required -- True if the action must always be specified at the + command line. This is only meaningful for optional command-line + arguments. + + - help -- The help string describing the argument. + + - metavar -- The name to be used for the option's argument with the + help string. If None, the 'dest' value will be used as the name. + 'u'Information about how to convert command line strings to Python objects. + + Action objects are used by an ArgumentParser to represent the information + needed to parse a single argument from one or more strings from the + command line. The keyword arguments to the Action constructor are also + all attributes of Action instances. + + Keyword Arguments: + + - option_strings -- A list of command-line option strings which + should be associated with this action. + + - dest -- The name of the attribute to hold the created object(s) + + - nargs -- The number of command-line arguments that should be + consumed. By default, one argument will be consumed and a single + value will be produced. Other values include: + - N (an integer) consumes N arguments (and produces a list) + - '?' consumes zero or one arguments + - '*' consumes zero or more arguments (and produces a list) + - '+' consumes one or more arguments (and produces a list) + Note that the difference between the default and nargs=1 is that + with the default, a single value will be produced, while with + nargs=1, a list containing a single value will be produced. + + - const -- The value to be produced if the option is specified and the + option uses an action that takes no values. + + - default -- The value to be produced if the option is not specified. + + - type -- A callable that accepts a single string argument, and + returns the converted value. The standard Python types str, int, + float, and complex are useful examples of such callables. If None, + str is used. + + - choices -- A container of values that should be allowed. If not None, + after a command-line argument has been converted to the appropriate + type, an exception will be raised if it is not a member of this + collection. + + - required -- True if the action must always be specified at the + command line. This is only meaningful for optional command-line + arguments. + + - help -- The help string describing the argument. + + - metavar -- The name to be used for the option's argument with the + help string. If None, the 'dest' value will be used as the name. + 'b'option_strings'u'option_strings'b'dest'u'dest'b'nargs'u'nargs'b'const'u'const'b'default'b'help'u'help'b'metavar'u'metavar'b'.__call__() not defined'u'.__call__() not defined'b'nargs for store actions must be != 0; if you have nothing to store, actions such as store true or store const may be more appropriate'u'nargs for store actions must be != 0; if you have nothing to store, actions such as store true or store const may be more appropriate'b'nargs must be %r to supply const'u'nargs must be %r to supply const'b'nargs for append actions must be != 0; if arg strings are not supplying the value to append, the append const action may be more appropriate'u'nargs for append actions must be != 0; if arg strings are not supplying the value to append, the append const action may be more appropriate'b'show program's version number and exit'u'show program's version number and exit'b'prog'u'prog'b'aliases'b'parser_name'u'parser_name'b'unknown parser %(parser_name)r (choices: %(choices)s)'u'unknown parser %(parser_name)r (choices: %(choices)s)'b'Factory for creating file object types + + Instances of FileType are typically passed as type= arguments to the + ArgumentParser add_argument() method. + + Keyword Arguments: + - mode -- A string indicating how the file is to be opened. Accepts the + same values as the builtin open() function. + - bufsize -- The file's desired buffer size. Accepts the same values as + the builtin open() function. + - encoding -- The file's encoding. Accepts the same values as the + builtin open() function. + - errors -- A string indicating how encoding and decoding errors are to + be handled. Accepts the same value as the builtin open() function. + 'u'Factory for creating file object types + + Instances of FileType are typically passed as type= arguments to the + ArgumentParser add_argument() method. + + Keyword Arguments: + - mode -- A string indicating how the file is to be opened. Accepts the + same values as the builtin open() function. + - bufsize -- The file's desired buffer size. Accepts the same values as + the builtin open() function. + - encoding -- The file's encoding. Accepts the same values as the + builtin open() function. + - errors -- A string indicating how encoding and decoding errors are to + be handled. Accepts the same value as the builtin open() function. + 'b'argument "-" with mode %r'u'argument "-" with mode %r'b'can't open '%(filename)s': %(error)s'u'can't open '%(filename)s': %(error)s'b'encoding'u'encoding'b'Simple object for storing attributes. + + Implements equality by attribute names and values, and provides a simple + string representation. + 'u'Simple object for storing attributes. + + Implements equality by attribute names and values, and provides a simple + string representation. + 'b'action'u'action'b'store'u'store'b'store_const'u'store_const'b'store_true'u'store_true'b'store_false'u'store_false'b'append_const'u'append_const'b'version'u'version'b'extend'u'extend'b'^-\d+$|^-\d*\.\d+$'u'^-\d+$|^-\d*\.\d+$'b' + add_argument(dest, ..., name=value, ...) + add_argument(option_string, option_string, ..., name=value, ...) + 'u' + add_argument(dest, ..., name=value, ...) + add_argument(option_string, option_string, ..., name=value, ...) + 'b'dest supplied twice for positional argument'u'dest supplied twice for positional argument'b'unknown action "%s"'u'unknown action "%s"'b'%r is not callable'u'%r is not callable'b'%r is a FileType class object, instance of it must be passed'u'%r is a FileType class object, instance of it must be passed'b'_get_formatter'u'_get_formatter'b'length of metavar tuple does not match nargs'u'length of metavar tuple does not match nargs'b'cannot merge actions - two groups are named %r'u'cannot merge actions - two groups are named %r'b'required'u'required'b''required' is an invalid argument for positionals'u''required' is an invalid argument for positionals'b'prefix_chars'u'prefix_chars'b'invalid option string %(option)r: must start with a character %(prefix_chars)r'u'invalid option string %(option)r: must start with a character %(prefix_chars)r'b'dest= is required for options like %r'u'dest= is required for options like %r'b'_handle_conflict_%s'u'_handle_conflict_%s'b'invalid conflict_resolution value: %r'u'invalid conflict_resolution value: %r'b'conflicting option string: %s'u'conflicting option string: %s'b'conflicting option strings: %s'u'conflicting option strings: %s'b'conflict_handler'u'conflict_handler'b'argument_default'u'argument_default'b'mutually exclusive arguments must be optional'u'mutually exclusive arguments must be optional'b'Object for parsing command line strings into Python objects. + + Keyword Arguments: + - prog -- The name of the program (default: sys.argv[0]) + - usage -- A usage message (default: auto-generated from arguments) + - description -- A description of what the program does + - epilog -- Text following the argument descriptions + - parents -- Parsers whose arguments should be copied into this one + - formatter_class -- HelpFormatter class for printing help messages + - prefix_chars -- Characters that prefix optional arguments + - fromfile_prefix_chars -- Characters that prefix files containing + additional arguments + - argument_default -- The default value for all arguments + - conflict_handler -- String indicating how to handle conflicts + - add_help -- Add a -h/-help option + - allow_abbrev -- Allow long options to be abbreviated unambiguously + 'u'Object for parsing command line strings into Python objects. + + Keyword Arguments: + - prog -- The name of the program (default: sys.argv[0]) + - usage -- A usage message (default: auto-generated from arguments) + - description -- A description of what the program does + - epilog -- Text following the argument descriptions + - parents -- Parsers whose arguments should be copied into this one + - formatter_class -- HelpFormatter class for printing help messages + - prefix_chars -- Characters that prefix optional arguments + - fromfile_prefix_chars -- Characters that prefix files containing + additional arguments + - argument_default -- The default value for all arguments + - conflict_handler -- String indicating how to handle conflicts + - add_help -- Add a -h/-help option + - allow_abbrev -- Allow long options to be abbreviated unambiguously + 'b'positional arguments'u'positional arguments'b'optional arguments'u'optional arguments'b'show this help message and exit'u'show this help message and exit'b'usage'u'usage'b'description'u'description'b'formatter_class'u'formatter_class'b'add_help'u'add_help'b'cannot have multiple subparser arguments'u'cannot have multiple subparser arguments'b'parser_class'u'parser_class'b'subcommands'u'subcommands'b'unrecognized arguments: %s'u'unrecognized arguments: %s'b'A'u'A'b'not allowed with argument %s'u'not allowed with argument %s'b'ignored explicit argument %r'u'ignored explicit argument %r'b'the following arguments are required: %s'u'the following arguments are required: %s'b'one of the arguments %s is required'u'one of the arguments %s is required'b'expected one argument'u'expected one argument'b'expected at most one argument'u'expected at most one argument'b'expected at least one argument'u'expected at least one argument'b'expected %s argument'u'expected %s argument'b'expected %s arguments'u'expected %s arguments'b'matches'u'matches'b'ambiguous option: %(option)s could match %(matches)s'u'ambiguous option: %(option)s could match %(matches)s'b'unexpected option string: %s'u'unexpected option string: %s'b'(-*A-*)'u'(-*A-*)'b'(-*A?-*)'u'(-*A?-*)'b'(-*[A-]*)'u'(-*[A-]*)'b'(-*A[A-]*)'u'(-*A[A-]*)'b'([-AO]*)'u'([-AO]*)'b'(-*A[-AO]*)'u'(-*A[-AO]*)'b'(-*-*)'u'(-*-*)'b'(-*%s-*)'u'(-*%s-*)'b'-*'u'-*'b'parse_intermixed_args: positional arg with nargs=%s'u'parse_intermixed_args: positional arg with nargs=%s'b'parse_intermixed_args: positional in mutuallyExclusiveGroup'u'parse_intermixed_args: positional in mutuallyExclusiveGroup'b'Do not expect %s in %s'u'Do not expect %s in %s'b'value'b'invalid %(type)s value: %(value)r'u'invalid %(type)s value: %(value)r'b'invalid choice: %(value)r (choose from %(choices)s)'u'invalid choice: %(value)r (choose from %(choices)s)'b'error(message: string) + + Prints a usage message incorporating the message to stderr and + exits. + + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + 'u'error(message: string) + + Prints a usage message incorporating the message to stderr and + exits. + + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + 'b'%(prog)s: error: %(message)s +'u'%(prog)s: error: %(message)s +'u'argparse'u'array(typecode [, initializer]) -> array + +Return a new array whose items are restricted by typecode, and +initialized from the optional initializer value, which must be a list, +string or iterable over elements of the appropriate type. + +Arrays represent basic values and behave very much like lists, except +the type of objects stored in them is constrained. The type is specified +at object creation time by using a type code, which is a single character. +The following type codes are defined: + + Type code C Type Minimum size in bytes + 'b' signed integer 1 + 'B' unsigned integer 1 + 'u' Unicode character 2 (see note) + 'h' signed integer 2 + 'H' unsigned integer 2 + 'i' signed integer 2 + 'I' unsigned integer 2 + 'l' signed integer 4 + 'L' unsigned integer 4 + 'q' signed integer 8 (see note) + 'Q' unsigned integer 8 (see note) + 'f' floating point 4 + 'd' floating point 8 + +NOTE: The 'u' typecode corresponds to Python's unicode character. On +narrow builds this is 2-bytes on wide builds this is 4-bytes. + +NOTE: The 'q' and 'Q' type codes are only available if the platform +C compiler used to build Python supports 'long long', or, on Windows, +'__int64'. + +Methods: + +append() -- append a new item to the end of the array +buffer_info() -- return information giving the current memory info +byteswap() -- byteswap all the items of the array +count() -- return number of occurrences of an object +extend() -- extend array by appending multiple elements from an iterable +fromfile() -- read items from a file object +fromlist() -- append items from the list +frombytes() -- append items from the string +index() -- return index of first occurrence of an object +insert() -- insert a new item into the array at a provided position +pop() -- remove and return item (default last) +remove() -- remove first occurrence of an object +reverse() -- reverse the order of the items in the array +tofile() -- write all items to a file object +tolist() -- return the array converted to an ordinary list +tobytes() -- return the array converted to a string + +Attributes: + +typecode -- the typecode character used to create the array +itemsize -- the length in bytes of one array item +'byteswapfrombytesfromfilefromunicodeu'the size, in bytes, of one array item'u'array.itemsize'tofiletounicodeu'the typecode character used to create the array'u'array.typecode'array.arrayArrayTypeu'This module defines an object type which can efficiently represent +an array of basic values: characters, integers, floating point +numbers. Arrays are sequence types and behave very much like lists, +except that the type of objects stored in them is constrained. +'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/array.cpython-38-darwin.so'u'array'_array_reconstructorarrayu'bBuhHiIlLqQfd'typecodes + ast + ~~~ + + The `ast` module helps Python applications to process trees of the Python + abstract syntax grammar. The abstract syntax itself might change with + each Python release; this module helps to find out programmatically what + the current grammar looks like and allows modifications of it. + + An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as + a flag to the `compile()` builtin function or by using the `parse()` + function from this module. The result will be a tree of objects whose + classes all inherit from `ast.AST`. + + A modified abstract syntax tree can be compiled into a Python code object + using the built-in `compile()` function. + + Additionally various helper functions are provided that make working with + the trees simpler. The main intention of the helper functions and this + module in general is to provide an easy to use interface for libraries + that work tightly with the python syntax (template engines for example). + + + :copyright: Copyright 2008 by Armin Ronacher. + :license: Python License. +type_commentsfeature_version + Parse the source into an AST node. + Equivalent to compile(source, filename, mode, PyCF_ONLY_AST). + Pass type_comments=True to get back type comments where the syntax allows. + _feature_versionliteral_evalnode_or_string + Safely evaluate an expression node or a string containing a Python + expression. The string or node provided may only consist of the following + Python literal structures: strings, bytes, numbers, tuples, lists, dicts, + sets, booleans, and None. + _raise_malformed_nodenodemalformed node or string: _convert_num_convert_signed_numoperand_converteltsleftrightannotate_fieldsinclude_attributes + Return a formatted dump of the tree in node. This is mainly useful for + debugging purposes. If annotate_fields is true (by default), + the returned string will show the names and the values for fields. + If annotate_fields is false, the result string will be more compact by + omitting unambiguous field names. Attributes such as line + numbers and column offsets are not dumped by default. If this is wanted, + include_attributes can be set to true. + %s=%sexpected AST, got %rcopy_locationnew_nodeold_node + Copy source location (`lineno`, `col_offset`, `end_lineno`, and `end_col_offset` + attributes) from *old_node* to *new_node* if possible, and return *new_node*. + col_offsetend_linenoend_col_offsetend_fix_missing_locations + When you compile a node tree with compile(), the compiler expects lineno and + col_offset attributes for every node that supports them. This is rather + tedious to fill in for generated nodes, so this helper adds these attributes + recursively where not already set, by setting them to the values of the + parent node. It works recursively starting at *node*. + iter_child_nodesincrement_lineno + Increment the line number and end line number of each node in the tree + starting at *node* by *n*. This is useful to "move code" to a different + location in a file. + walkiter_fields + Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields`` + that is present on *node*. + + Yield all direct child nodes of *node*, that is, all fields that are nodes + and all items of fields that are lists of nodes. + get_docstringclean + Return the docstring for the given node or None if no docstring can + be found. If the node provided does not have docstrings a TypeError + will be raised. + + If *clean* is `True`, all tabs are expanded to spaces and any whitespace + that can be uniformly removed from the second line onwards is removed. + %r can't have docstringsStrcleandoc_splitlines_no_ffSplit a string into lines ignoring form feed and other chars. + + This mimics how the Python parser splits source code. + next_line_pad_whitespaceReplace all chars except '\f\t' in a line with spaces. get_source_segmentpaddedGet source code segment of the *source* that generated *node*. + + If some location information (`lineno`, `end_lineno`, `col_offset`, + or `end_col_offset`) is missing, return None. + + If *padded* is `True`, the first line of a multi-line statement will + be padded with spaces to match its original position. + + Recursively yield all descendant nodes in the tree starting at *node* + (including *node* itself), in no specified order. This is useful if you + only want to modify nodes in place and don't care about the context. + todoNodeVisitor + A node visitor base class that walks the abstract syntax tree and calls a + visitor function for every node found. This function may return a value + which is forwarded by the `visit` method. + + This class is meant to be subclassed, with the subclass adding visitor + methods. + + Per default the visitor functions for the nodes are ``'visit_'`` + + class name of the node. So a `TryFinally` node visit function would + be `visit_TryFinally`. This behavior can be changed by overriding + the `visit` method. If no visitor function exists for a node + (return value `None`) the `generic_visit` visitor is used instead. + + Don't use the `NodeVisitor` if you want to apply changes to nodes during + traversing. For this a special visitor exists (`NodeTransformer`) that + allows modifications. + visitVisit a node.visit_generic_visitvisitorCalled if no explicit visitor function exists for a node.visit_Constant_const_node_type_names is deprecated; add visit_ConstantNodeTransformer + A :class:`NodeVisitor` subclass that walks the abstract syntax tree and + allows modification of nodes. + + The `NodeTransformer` will walk the AST and use the return value of the + visitor methods to replace or remove the old node. If the return value of + the visitor method is ``None``, the node will be removed from its location, + otherwise it is replaced with the return value. The return value may be the + original node in which case no replacement takes place. + + Here is an example transformer that rewrites all occurrences of name lookups + (``foo``) to ``data['foo']``:: + + class RewriteName(NodeTransformer): + + def visit_Name(self, node): + return Subscript( + value=Name(id='data', ctx=Load()), + slice=Index(value=Str(s=node.id)), + ctx=node.ctx + ) + + Keep in mind that if the node you're operating on has child nodes you must + either transform the child nodes yourself or call the :meth:`generic_visit` + method for the node first. + + For nodes that were part of a collection of statements (that applies to all + statement nodes), the visitor may also return a list of nodes rather than + just a single node. + + Usually you use the transformer like this:: + + node = YourTransformer().visit(node) + new_values_getter_setter_ABC_const_types_const_types_not_new got multiple values for argument NumBytesNameConstant# Should be a 2-tuple.# Else it should be an int giving the minor version for 3.x.# end_lineno and end_col_offset are optional attributes, and they# should be copied whether the value is None or not.# Keep \r\n together# The following code is for backward compatibility.# It will be removed in future.# arbitrary keyword arguments are accepted# should be before intb' + ast + ~~~ + + The `ast` module helps Python applications to process trees of the Python + abstract syntax grammar. The abstract syntax itself might change with + each Python release; this module helps to find out programmatically what + the current grammar looks like and allows modifications of it. + + An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as + a flag to the `compile()` builtin function or by using the `parse()` + function from this module. The result will be a tree of objects whose + classes all inherit from `ast.AST`. + + A modified abstract syntax tree can be compiled into a Python code object + using the built-in `compile()` function. + + Additionally various helper functions are provided that make working with + the trees simpler. The main intention of the helper functions and this + module in general is to provide an easy to use interface for libraries + that work tightly with the python syntax (template engines for example). + + + :copyright: Copyright 2008 by Armin Ronacher. + :license: Python License. +'u' + ast + ~~~ + + The `ast` module helps Python applications to process trees of the Python + abstract syntax grammar. The abstract syntax itself might change with + each Python release; this module helps to find out programmatically what + the current grammar looks like and allows modifications of it. + + An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as + a flag to the `compile()` builtin function or by using the `parse()` + function from this module. The result will be a tree of objects whose + classes all inherit from `ast.AST`. + + A modified abstract syntax tree can be compiled into a Python code object + using the built-in `compile()` function. + + Additionally various helper functions are provided that make working with + the trees simpler. The main intention of the helper functions and this + module in general is to provide an easy to use interface for libraries + that work tightly with the python syntax (template engines for example). + + + :copyright: Copyright 2008 by Armin Ronacher. + :license: Python License. +'b' + Parse the source into an AST node. + Equivalent to compile(source, filename, mode, PyCF_ONLY_AST). + Pass type_comments=True to get back type comments where the syntax allows. + 'u' + Parse the source into an AST node. + Equivalent to compile(source, filename, mode, PyCF_ONLY_AST). + Pass type_comments=True to get back type comments where the syntax allows. + 'b' + Safely evaluate an expression node or a string containing a Python + expression. The string or node provided may only consist of the following + Python literal structures: strings, bytes, numbers, tuples, lists, dicts, + sets, booleans, and None. + 'u' + Safely evaluate an expression node or a string containing a Python + expression. The string or node provided may only consist of the following + Python literal structures: strings, bytes, numbers, tuples, lists, dicts, + sets, booleans, and None. + 'b'eval'u'eval'b'malformed node or string: 'u'malformed node or string: 'b' + Return a formatted dump of the tree in node. This is mainly useful for + debugging purposes. If annotate_fields is true (by default), + the returned string will show the names and the values for fields. + If annotate_fields is false, the result string will be more compact by + omitting unambiguous field names. Attributes such as line + numbers and column offsets are not dumped by default. If this is wanted, + include_attributes can be set to true. + 'u' + Return a formatted dump of the tree in node. This is mainly useful for + debugging purposes. If annotate_fields is true (by default), + the returned string will show the names and the values for fields. + If annotate_fields is false, the result string will be more compact by + omitting unambiguous field names. Attributes such as line + numbers and column offsets are not dumped by default. If this is wanted, + include_attributes can be set to true. + 'b'%s=%s'u'%s=%s'b'expected AST, got %r'u'expected AST, got %r'b' + Copy source location (`lineno`, `col_offset`, `end_lineno`, and `end_col_offset` + attributes) from *old_node* to *new_node* if possible, and return *new_node*. + 'u' + Copy source location (`lineno`, `col_offset`, `end_lineno`, and `end_col_offset` + attributes) from *old_node* to *new_node* if possible, and return *new_node*. + 'b'lineno'b'col_offset'b'end_lineno'b'end_col_offset'b'end_'u'end_'b' + When you compile a node tree with compile(), the compiler expects lineno and + col_offset attributes for every node that supports them. This is rather + tedious to fill in for generated nodes, so this helper adds these attributes + recursively where not already set, by setting them to the values of the + parent node. It works recursively starting at *node*. + 'u' + When you compile a node tree with compile(), the compiler expects lineno and + col_offset attributes for every node that supports them. This is rather + tedious to fill in for generated nodes, so this helper adds these attributes + recursively where not already set, by setting them to the values of the + parent node. It works recursively starting at *node*. + 'b' + Increment the line number and end line number of each node in the tree + starting at *node* by *n*. This is useful to "move code" to a different + location in a file. + 'u' + Increment the line number and end line number of each node in the tree + starting at *node* by *n*. This is useful to "move code" to a different + location in a file. + 'b' + Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields`` + that is present on *node*. + 'u' + Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields`` + that is present on *node*. + 'b' + Yield all direct child nodes of *node*, that is, all fields that are nodes + and all items of fields that are lists of nodes. + 'u' + Yield all direct child nodes of *node*, that is, all fields that are nodes + and all items of fields that are lists of nodes. + 'b' + Return the docstring for the given node or None if no docstring can + be found. If the node provided does not have docstrings a TypeError + will be raised. + + If *clean* is `True`, all tabs are expanded to spaces and any whitespace + that can be uniformly removed from the second line onwards is removed. + 'u' + Return the docstring for the given node or None if no docstring can + be found. If the node provided does not have docstrings a TypeError + will be raised. + + If *clean* is `True`, all tabs are expanded to spaces and any whitespace + that can be uniformly removed from the second line onwards is removed. + 'b'%r can't have docstrings'u'%r can't have docstrings'b'Split a string into lines ignoring form feed and other chars. + + This mimics how the Python parser splits source code. + 'u'Split a string into lines ignoring form feed and other chars. + + This mimics how the Python parser splits source code. + 'b'Replace all chars except '\f\t' in a line with spaces.'u'Replace all chars except '\f\t' in a line with spaces.'b' 'u' 'b'Get source code segment of the *source* that generated *node*. + + If some location information (`lineno`, `end_lineno`, `col_offset`, + or `end_col_offset`) is missing, return None. + + If *padded* is `True`, the first line of a multi-line statement will + be padded with spaces to match its original position. + 'u'Get source code segment of the *source* that generated *node*. + + If some location information (`lineno`, `end_lineno`, `col_offset`, + or `end_col_offset`) is missing, return None. + + If *padded* is `True`, the first line of a multi-line statement will + be padded with spaces to match its original position. + 'b' + Recursively yield all descendant nodes in the tree starting at *node* + (including *node* itself), in no specified order. This is useful if you + only want to modify nodes in place and don't care about the context. + 'u' + Recursively yield all descendant nodes in the tree starting at *node* + (including *node* itself), in no specified order. This is useful if you + only want to modify nodes in place and don't care about the context. + 'b' + A node visitor base class that walks the abstract syntax tree and calls a + visitor function for every node found. This function may return a value + which is forwarded by the `visit` method. + + This class is meant to be subclassed, with the subclass adding visitor + methods. + + Per default the visitor functions for the nodes are ``'visit_'`` + + class name of the node. So a `TryFinally` node visit function would + be `visit_TryFinally`. This behavior can be changed by overriding + the `visit` method. If no visitor function exists for a node + (return value `None`) the `generic_visit` visitor is used instead. + + Don't use the `NodeVisitor` if you want to apply changes to nodes during + traversing. For this a special visitor exists (`NodeTransformer`) that + allows modifications. + 'u' + A node visitor base class that walks the abstract syntax tree and calls a + visitor function for every node found. This function may return a value + which is forwarded by the `visit` method. + + This class is meant to be subclassed, with the subclass adding visitor + methods. + + Per default the visitor functions for the nodes are ``'visit_'`` + + class name of the node. So a `TryFinally` node visit function would + be `visit_TryFinally`. This behavior can be changed by overriding + the `visit` method. If no visitor function exists for a node + (return value `None`) the `generic_visit` visitor is used instead. + + Don't use the `NodeVisitor` if you want to apply changes to nodes during + traversing. For this a special visitor exists (`NodeTransformer`) that + allows modifications. + 'b'Visit a node.'u'Visit a node.'b'visit_'u'visit_'b'Called if no explicit visitor function exists for a node.'u'Called if no explicit visitor function exists for a node.'b' is deprecated; add visit_Constant'u' is deprecated; add visit_Constant'b' + A :class:`NodeVisitor` subclass that walks the abstract syntax tree and + allows modification of nodes. + + The `NodeTransformer` will walk the AST and use the return value of the + visitor methods to replace or remove the old node. If the return value of + the visitor method is ``None``, the node will be removed from its location, + otherwise it is replaced with the return value. The return value may be the + original node in which case no replacement takes place. + + Here is an example transformer that rewrites all occurrences of name lookups + (``foo``) to ``data['foo']``:: + + class RewriteName(NodeTransformer): + + def visit_Name(self, node): + return Subscript( + value=Name(id='data', ctx=Load()), + slice=Index(value=Str(s=node.id)), + ctx=node.ctx + ) + + Keep in mind that if the node you're operating on has child nodes you must + either transform the child nodes yourself or call the :meth:`generic_visit` + method for the node first. + + For nodes that were part of a collection of statements (that applies to all + statement nodes), the visitor may also return a list of nodes rather than + just a single node. + + Usually you use the transformer like this:: + + node = YourTransformer().visit(node) + 'u' + A :class:`NodeVisitor` subclass that walks the abstract syntax tree and + allows modification of nodes. + + The `NodeTransformer` will walk the AST and use the return value of the + visitor methods to replace or remove the old node. If the return value of + the visitor method is ``None``, the node will be removed from its location, + otherwise it is replaced with the return value. The return value may be the + original node in which case no replacement takes place. + + Here is an example transformer that rewrites all occurrences of name lookups + (``foo``) to ``data['foo']``:: + + class RewriteName(NodeTransformer): + + def visit_Name(self, node): + return Subscript( + value=Name(id='data', ctx=Load()), + slice=Index(value=Str(s=node.id)), + ctx=node.ctx + ) + + Keep in mind that if the node you're operating on has child nodes you must + either transform the child nodes yourself or call the :meth:`generic_visit` + method for the node first. + + For nodes that were part of a collection of statements (that applies to all + statement nodes), the visitor may also return a list of nodes rather than + just a single node. + + Usually you use the transformer like this:: + + node = YourTransformer().visit(node) + 'b' got multiple values for argument 'u' got multiple values for argument 'b's'u's'b'NameConstant'u'NameConstant'b'Num'u'Num'b'Str'u'Str'b'Bytes'u'Bytes'b'Ellipsis'u'Ellipsis'u'ast'runTestmethodName_asyncioTestLoop_asyncioCallsQueueasyncSetUpasyncTearDownaddAsyncCleanup_callSetUpsetUp_callAsync_callTestMethod_callMaybeAsync_callTearDowntearDown_callCleanupretisawaitablecreate_futurefutrun_until_complete_asyncioLoopRunnerquerytask_doneawaitable_setupAsyncioLoopnew_event_looploopset_event_loopset_debugcreate_task_asyncioCallsTask_tearDownAsyncioLoopto_canceltaskgatherreturn_exceptionscall_exception_handlerunhandled exception during test shutdownshutdown_asyncgens# Names intentionally have a long prefix# to reduce a chance of clashing with user-defined attributes# from inherited test case# The class doesn't call loop.run_until_complete(self.setUp()) and family# but uses a different approach:# 1. create a long-running task that reads self.setUp()# awaitable from queue along with a future# 2. await the awaitable object passing in and set the result# into the future object# 3. Outer code puts the awaitable and the future object into a queue# with waiting for the future# The trick is necessary because every run_until_complete() call# creates a new task with embedded ContextVar context.# To share contextvars between setUp(), test and tearDown() we need to execute# them inside the same task.# Note: the test case modifies event loop policy if the policy was not instantiated# yet.# asyncio.get_event_loop_policy() creates a default policy on demand but never# returns None# I believe this is not an issue in user level tests but python itself for testing# should reset a policy in every test module# by calling asyncio.set_event_loop_policy(None) in tearDownModule()# A trivial trampoline to addCleanup()# the function exists because it has a different semantics# and signature:# addCleanup() accepts regular functions# but addAsyncCleanup() accepts coroutines# We intentionally don't add inspect.iscoroutinefunction() check# for func argument because there is no way# to check for async function reliably:# 1. It can be "async def func()" iself# 2. Class can implement "async def __call__()" method# 3. Regular "def func()" that returns awaitable object# cancel all tasks# shutdown asyncgensb'runTest'u'runTest'b'unhandled exception during test shutdown'u'unhandled exception during test shutdown'b'task'u'task'u'unittest.async_case'u'async_case'u'allow programmer to define multiple exit functions to be executedupon normal program termination. + +Two public functions, register and unregister, are defined. +'_clear_ncallbacks_run_exitfuncsunregisterBase16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodingsencodebytesdecodebytesb32encodeb32decodeb16encodeb16decodeb85encodeb85decodea85encodea85decodestandard_b64encodestandard_b64decodeurlsafe_b64encodeurlsafe_b64decodebytes_types_bytes_from_decode_datastring argument should contain only ASCII charactersargument should be a bytes-like object or ASCII string, not %r"argument should be a bytes-like object or ASCII ""string, not %r"altcharsEncode the bytes-like object s using Base64 and return a bytes object. + + Optional altchars should be a byte string of length 2 which specifies an + alternative alphabet for the '+' and '/' characters. This allows an + application to e.g. generate url or filesystem safe Base64 strings. + b2a_base64+/Decode the Base64 encoded bytes-like object or ASCII string s. + + Optional altchars must be a bytes-like object or ASCII string of length 2 + which specifies the alternative alphabet used instead of the '+' and '/' + characters. + + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded. + + If validate is False (the default), characters that are neither in the + normal base-64 alphabet nor the alternative alphabet are discarded prior + to the padding check. If validate is True, these non-alphabet characters + in the input result in a binascii.Error. + fullmatch[A-Za-z0-9+/]*={0,2}Non-base64 digit founda2b_base64Encode bytes-like object s using the standard Base64 alphabet. + + The result is returned as a bytes object. + Decode bytes encoded with the standard Base64 alphabet. + + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the standard alphabet + are discarded prior to the padding check. + -__urlsafe_encode_translation_urlsafe_decode_translationEncode bytes using the URL- and filesystem-safe Base64 alphabet. + + Argument s is a bytes-like object to encode. The result is returned as a + bytes object. The alphabet uses '-' instead of '+' and '_' instead of + '/'. + Decode bytes using the URL- and filesystem-safe Base64 alphabet. + + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the URL-safe base-64 + alphabet, and are not a plus '+' or slash '/', are discarded prior to the + padding check. + + The alphabet uses '-' instead of '+' and '_' instead of '/'. + ABCDEFGHIJKLMNOPQRSTUVWXYZ234567_b32alphabet_b32tab2_b32revEncode the bytes-like object s using Base32 and return a bytes object. + b32tabb32tab210230x3ff==========map01Decode the Base32 encoded bytes-like object or ASCII string s. + + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. + + RFC 3548 allows for optional mapping of the digit 0 (zero) to the + letter O (oh), and for optional mapping of the digit 1 (one) to + either the letter I (eye) or letter L (el). The optional argument + map01 when not None, specifies which letter the digit 1 should be + mapped to (when map01 is not None, the digit 0 is always mapped to + the letter O). For security purposes the default is None, so that + 0 and 1 are not allowed in the input. + + The result is returned as a bytes object. A binascii.Error is raised if + the input is incorrectly padded or if there are non-alphabet + characters present in the input. + Incorrect paddingpadcharsdecodedb32revquantaaccNon-base32 digit foundEncode the bytes-like object s using Base16 and return a bytes object. + hexlifyDecode the Base16 encoded bytes-like object or ASCII string s. + + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. + + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded or if there are non-alphabet characters present + in the input. + [^0-9A-F]Non-base16 digit foundunhexlify_a85chars_a85chars2<~_A85START~>_A85END_85encodechars2padfoldnulsfoldspaces!%dIwordsword5389762880x20202020614125857225chunkswrapcoladobeEncode bytes-like object b using Ascii85 and return a bytes object. + + foldspaces is an optional flag that uses the special short sequence 'y' + instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This + feature is not supported by the "standard" Adobe encoding. + + wrapcol controls whether the output should have newline (b'\n') characters + added to it. If this is non-zero, each output line will be at most this + many characters long. + + pad controls whether the input is padded to a multiple of 4 before + encoding. Note that the btoa implementation always pads. + + adobe controls whether the encoded byte sequence is framed with <~ and ~>, + which is used by the Adobe implementation. + 118 + ignorecharsDecode the Ascii85 encoded bytes-like object or ASCII string b. + + foldspaces is a flag that specifies whether the 'y' short sequence should be + accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is + not supported by the "standard" Adobe encoding. + + adobe controls whether the input sequence is in Adobe Ascii85 format (i.e. + is framed with <~ and ~>). + + ignorechars should be a byte string containing characters to ignore from the + input. This should only contain whitespace characters, and by default + contains all whitespace characters in ASCII. + + The result is returned as a bytes object. + Ascii85 encoded byte sequences must end with {!r}"Ascii85 encoded byte sequences must end ""with {!r}"!IpackIdecoded_appendcurr_appendcurr_clear!Ascii85 overflowz inside Ascii85 5-tupley inside Ascii85 5-tuple Non-Ascii85 digit found: %c0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"_b85alphabet_b85chars_b85chars2_b85decEncode bytes-like object b in base85 format and return a bytes object. + + If pad is true, the input is padded with b'\0' so its length is a multiple of + 4 bytes before encoding. + Decode the base85-encoded bytes-like object or ASCII string b + + The result is returned as a bytes object. + ~chunkjbad base85 character at position %dbase85 overflow in hunk starting at byte %d76MAXLINESIZEMAXBINSIZEoutputEncode a file; input and output are binary files.Decode a file; input and output are binary files._input_type_checkexpected bytes-like object, not %sexpected single byte elements, not %r from %sexpected 1-D data, not %d-D data from %sEncode a bytestring into a bytes object containing multiple lines + of base-64 data.piecesencodestringLegacy alias of encodebytes().encodestring() is a deprecated alias since 3.1, use encodebytes()"encodestring() is a deprecated alias since 3.1, ""use encodebytes()"Decode a bytestring of base-64 data into a bytes object.decodestringLegacy alias of decodebytes().decodestring() is a deprecated alias since Python 3.1, use decodebytes()"decodestring() is a deprecated alias since Python 3.1, ""use decodebytes()"Small main programgetoptdeutoptsusage: %s [-d|-e|-u|-t] [file|-] + -d, -u: decode + -e: encode (default) + -t: encode and decode string 'Aladdin:open sesame'-e-d-u-tAladdin:open sesames0s1s2Script#! /usr/bin/env python3# Modified 04-Oct-1995 by Jack Jansen to use binascii module# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere# Legacy interface exports traditional RFC 2045 Base64 encodings# Generalized interface for other encodings# Base85 and Ascii85 encodings# Standard Base64 encoding# Some common Base64 alternatives. As referenced by RFC 3458, see thread# starting at:# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html# Types acceptable as binary data# Base64 encoding/decoding uses binascii# Base32 encoding/decoding must be done in Python# Delay the initialization of the table to not waste memory# if the function is never called# Pad the last quantum with zero bits if necessary# Don't use += !# bits 1 - 10# bits 11 - 20# bits 21 - 30# bits 31 - 40# Adjust for any leftover partial quanta# Handle section 2.4 zero and one mapping. The flag map01 will be either# False, or the character to map the digit 1 (one) to. It should be# either L (el) or I (eye).# Strip off pad characters from the right. We need to count the pad# characters because this will tell us how many null bytes to remove from# the end of the decoded string.# Now decode the full quanta# Process the last, partial quanta# 1: 4, 3: 3, 4: 2, 6: 1# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns# lowercase. The RFC also recommends against accepting input case# insensitively.# Ascii85 encoding/decoding# Helper function for a85encode and b85encode# Delay the initialization of tables to not waste memory# Strip off start/end markers# We have to go through this stepwise, so as to ignore spaces and handle# special short sequences# Skip whitespace# Throw away the extra padding# The following code is originally taken (with permission) from Mercurial# Legacy interface. This code could be cleaned up since I don't believe# binascii has any line length limitations. It just doesn't seem worth it# though. The files should be opened in binary mode.# Excluding the CRLF# Usable as a script...b'Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings'u'Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings'b'encodebytes'u'encodebytes'b'decodebytes'u'decodebytes'b'b64encode'u'b64encode'b'b64decode'u'b64decode'b'b32encode'u'b32encode'b'b32decode'u'b32decode'b'b16encode'u'b16encode'b'b16decode'u'b16decode'b'b85encode'u'b85encode'b'b85decode'u'b85decode'b'a85encode'u'a85encode'b'a85decode'u'a85decode'b'standard_b64encode'u'standard_b64encode'b'standard_b64decode'u'standard_b64decode'b'urlsafe_b64encode'u'urlsafe_b64encode'b'urlsafe_b64decode'u'urlsafe_b64decode'b'string argument should contain only ASCII characters'u'string argument should contain only ASCII characters'b'argument should be a bytes-like object or ASCII string, not %r'u'argument should be a bytes-like object or ASCII string, not %r'b'Encode the bytes-like object s using Base64 and return a bytes object. + + Optional altchars should be a byte string of length 2 which specifies an + alternative alphabet for the '+' and '/' characters. This allows an + application to e.g. generate url or filesystem safe Base64 strings. + 'u'Encode the bytes-like object s using Base64 and return a bytes object. + + Optional altchars should be a byte string of length 2 which specifies an + alternative alphabet for the '+' and '/' characters. This allows an + application to e.g. generate url or filesystem safe Base64 strings. + 'b'+/'b'Decode the Base64 encoded bytes-like object or ASCII string s. + + Optional altchars must be a bytes-like object or ASCII string of length 2 + which specifies the alternative alphabet used instead of the '+' and '/' + characters. + + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded. + + If validate is False (the default), characters that are neither in the + normal base-64 alphabet nor the alternative alphabet are discarded prior + to the padding check. If validate is True, these non-alphabet characters + in the input result in a binascii.Error. + 'u'Decode the Base64 encoded bytes-like object or ASCII string s. + + Optional altchars must be a bytes-like object or ASCII string of length 2 + which specifies the alternative alphabet used instead of the '+' and '/' + characters. + + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded. + + If validate is False (the default), characters that are neither in the + normal base-64 alphabet nor the alternative alphabet are discarded prior + to the padding check. If validate is True, these non-alphabet characters + in the input result in a binascii.Error. + 'b'[A-Za-z0-9+/]*={0,2}'b'Non-base64 digit found'u'Non-base64 digit found'b'Encode bytes-like object s using the standard Base64 alphabet. + + The result is returned as a bytes object. + 'u'Encode bytes-like object s using the standard Base64 alphabet. + + The result is returned as a bytes object. + 'b'Decode bytes encoded with the standard Base64 alphabet. + + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the standard alphabet + are discarded prior to the padding check. + 'u'Decode bytes encoded with the standard Base64 alphabet. + + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the standard alphabet + are discarded prior to the padding check. + 'b'-_'b'Encode bytes using the URL- and filesystem-safe Base64 alphabet. + + Argument s is a bytes-like object to encode. The result is returned as a + bytes object. The alphabet uses '-' instead of '+' and '_' instead of + '/'. + 'u'Encode bytes using the URL- and filesystem-safe Base64 alphabet. + + Argument s is a bytes-like object to encode. The result is returned as a + bytes object. The alphabet uses '-' instead of '+' and '_' instead of + '/'. + 'b'Decode bytes using the URL- and filesystem-safe Base64 alphabet. + + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the URL-safe base-64 + alphabet, and are not a plus '+' or slash '/', are discarded prior to the + padding check. + + The alphabet uses '-' instead of '+' and '_' instead of '/'. + 'u'Decode bytes using the URL- and filesystem-safe Base64 alphabet. + + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the URL-safe base-64 + alphabet, and are not a plus '+' or slash '/', are discarded prior to the + padding check. + + The alphabet uses '-' instead of '+' and '_' instead of '/'. + 'b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'b'Encode the bytes-like object s using Base32 and return a bytes object. + 'u'Encode the bytes-like object s using Base32 and return a bytes object. + 'b''b'======'b'===='b'Decode the Base32 encoded bytes-like object or ASCII string s. + + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. + + RFC 3548 allows for optional mapping of the digit 0 (zero) to the + letter O (oh), and for optional mapping of the digit 1 (one) to + either the letter I (eye) or letter L (el). The optional argument + map01 when not None, specifies which letter the digit 1 should be + mapped to (when map01 is not None, the digit 0 is always mapped to + the letter O). For security purposes the default is None, so that + 0 and 1 are not allowed in the input. + + The result is returned as a bytes object. A binascii.Error is raised if + the input is incorrectly padded or if there are non-alphabet + characters present in the input. + 'u'Decode the Base32 encoded bytes-like object or ASCII string s. + + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. + + RFC 3548 allows for optional mapping of the digit 0 (zero) to the + letter O (oh), and for optional mapping of the digit 1 (one) to + either the letter I (eye) or letter L (el). The optional argument + map01 when not None, specifies which letter the digit 1 should be + mapped to (when map01 is not None, the digit 0 is always mapped to + the letter O). For security purposes the default is None, so that + 0 and 1 are not allowed in the input. + + The result is returned as a bytes object. A binascii.Error is raised if + the input is incorrectly padded or if there are non-alphabet + characters present in the input. + 'b'Incorrect padding'u'Incorrect padding'b'Non-base32 digit found'u'Non-base32 digit found'b'Encode the bytes-like object s using Base16 and return a bytes object. + 'u'Encode the bytes-like object s using Base16 and return a bytes object. + 'b'Decode the Base16 encoded bytes-like object or ASCII string s. + + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. + + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded or if there are non-alphabet characters present + in the input. + 'u'Decode the Base16 encoded bytes-like object or ASCII string s. + + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. + + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded or if there are non-alphabet characters present + in the input. + 'b'[^0-9A-F]'b'Non-base16 digit found'u'Non-base16 digit found'b'<~'b'~>'b'!%dI'u'!%dI'b'Encode bytes-like object b using Ascii85 and return a bytes object. + + foldspaces is an optional flag that uses the special short sequence 'y' + instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This + feature is not supported by the "standard" Adobe encoding. + + wrapcol controls whether the output should have newline (b'\n') characters + added to it. If this is non-zero, each output line will be at most this + many characters long. + + pad controls whether the input is padded to a multiple of 4 before + encoding. Note that the btoa implementation always pads. + + adobe controls whether the encoded byte sequence is framed with <~ and ~>, + which is used by the Adobe implementation. + 'u'Encode bytes-like object b using Ascii85 and return a bytes object. + + foldspaces is an optional flag that uses the special short sequence 'y' + instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This + feature is not supported by the "standard" Adobe encoding. + + wrapcol controls whether the output should have newline (b'\n') characters + added to it. If this is non-zero, each output line will be at most this + many characters long. + + pad controls whether the input is padded to a multiple of 4 before + encoding. Note that the btoa implementation always pads. + + adobe controls whether the encoded byte sequence is framed with <~ and ~>, + which is used by the Adobe implementation. + 'b' + 'b'Decode the Ascii85 encoded bytes-like object or ASCII string b. + + foldspaces is a flag that specifies whether the 'y' short sequence should be + accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is + not supported by the "standard" Adobe encoding. + + adobe controls whether the input sequence is in Adobe Ascii85 format (i.e. + is framed with <~ and ~>). + + ignorechars should be a byte string containing characters to ignore from the + input. This should only contain whitespace characters, and by default + contains all whitespace characters in ASCII. + + The result is returned as a bytes object. + 'u'Decode the Ascii85 encoded bytes-like object or ASCII string b. + + foldspaces is a flag that specifies whether the 'y' short sequence should be + accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is + not supported by the "standard" Adobe encoding. + + adobe controls whether the input sequence is in Adobe Ascii85 format (i.e. + is framed with <~ and ~>). + + ignorechars should be a byte string containing characters to ignore from the + input. This should only contain whitespace characters, and by default + contains all whitespace characters in ASCII. + + The result is returned as a bytes object. + 'b'Ascii85 encoded byte sequences must end with {!r}'u'Ascii85 encoded byte sequences must end with {!r}'b'!I'u'!I'b'!'b'Ascii85 overflow'u'Ascii85 overflow'b'z inside Ascii85 5-tuple'u'z inside Ascii85 5-tuple'b''b'y inside Ascii85 5-tuple'u'y inside Ascii85 5-tuple'b' 'b'Non-Ascii85 digit found: %c'u'Non-Ascii85 digit found: %c'b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~'b'Encode bytes-like object b in base85 format and return a bytes object. + + If pad is true, the input is padded with b'\0' so its length is a multiple of + 4 bytes before encoding. + 'u'Encode bytes-like object b in base85 format and return a bytes object. + + If pad is true, the input is padded with b'\0' so its length is a multiple of + 4 bytes before encoding. + 'b'Decode the base85-encoded bytes-like object or ASCII string b + + The result is returned as a bytes object. + 'u'Decode the base85-encoded bytes-like object or ASCII string b + + The result is returned as a bytes object. + 'b'~'b'bad base85 character at position %d'u'bad base85 character at position %d'b'base85 overflow in hunk starting at byte %d'u'base85 overflow in hunk starting at byte %d'b'Encode a file; input and output are binary files.'u'Encode a file; input and output are binary files.'b'Decode a file; input and output are binary files.'u'Decode a file; input and output are binary files.'b'expected bytes-like object, not %s'u'expected bytes-like object, not %s'b'expected single byte elements, not %r from %s'u'expected single byte elements, not %r from %s'b'expected 1-D data, not %d-D data from %s'u'expected 1-D data, not %d-D data from %s'b'Encode a bytestring into a bytes object containing multiple lines + of base-64 data.'u'Encode a bytestring into a bytes object containing multiple lines + of base-64 data.'b'Legacy alias of encodebytes().'u'Legacy alias of encodebytes().'b'encodestring() is a deprecated alias since 3.1, use encodebytes()'u'encodestring() is a deprecated alias since 3.1, use encodebytes()'b'Decode a bytestring of base-64 data into a bytes object.'u'Decode a bytestring of base-64 data into a bytes object.'b'Legacy alias of decodebytes().'u'Legacy alias of decodebytes().'b'decodestring() is a deprecated alias since Python 3.1, use decodebytes()'u'decodestring() is a deprecated alias since Python 3.1, use decodebytes()'b'Small main program'u'Small main program'b'deut'u'deut'b'usage: %s [-d|-e|-u|-t] [file|-] + -d, -u: decode + -e: encode (default) + -t: encode and decode string 'Aladdin:open sesame''u'usage: %s [-d|-e|-u|-t] [file|-] + -d, -u: decode + -e: encode (default) + -t: encode and decode string 'Aladdin:open sesame''b'-e'u'-e'b'-d'u'-d'b'-u'u'-u'b'-t'u'-t'b'Aladdin:open sesame'Base64 content transfer encoding per RFCs 2045-2047. + +This module handles the content transfer encoding method defined in RFC 2045 +to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit +characters encoding known as Base64. + +It is used in the MIME standards for email to attach images, audio, and text +using some 8-bit character sets to messages. + +This module provides an interface to encode and decode both headers and bodies +with Base64 encoding. + +RFC 2045 defines a method for including character set information in an +`encoded-word' in a header. This method is commonly used for 8-bit real names +in To:, From:, Cc:, etc. fields, as well as Subject: lines. + +This module does not do the line wrapping or end-of-line character conversion +necessary for proper internationalized headers; it only does dumb encoding and +decoding. To deal with the various line wrapping issues, use the email.header +module. +body_decodebody_encodeheader_encodeheader_lengthCRLFNLMISC_LENReturn the length of s when it is encoded with base64.iso-8859-1header_bytesEncode a single header line with Base64 encoding in a given charset. + + charset names the character set to use to encode the header. It defaults + to iso-8859-1. Base64 encoding is defined in RFC 2045. + =?%s?b?%s?=eolEncode a string with base64. + + Each line will be wrapped at, at most, maxlinelen characters (defaults to + 76 characters). + + Each line of encoded text will end with eol, which defaults to "\n". Set + this to "\r\n" if you will be using the result of this function directly + in an email. + encvecmax_unencodedencDecode a raw base64 string, returning a bytes object. + + This function does not parse a full MIME header value encoded with + base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high + level email.header class for that functionality. + raw-unicode-escape# Author: Ben Gertzfield# See also Charset.py# Helpers# BAW: should encode() inherit b2a_base64()'s dubious behavior in# adding a newline to the encoded string?# For convenience and backwards compatibility w/ standard base64 moduleb'Base64 content transfer encoding per RFCs 2045-2047. + +This module handles the content transfer encoding method defined in RFC 2045 +to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit +characters encoding known as Base64. + +It is used in the MIME standards for email to attach images, audio, and text +using some 8-bit character sets to messages. + +This module provides an interface to encode and decode both headers and bodies +with Base64 encoding. + +RFC 2045 defines a method for including character set information in an +`encoded-word' in a header. This method is commonly used for 8-bit real names +in To:, From:, Cc:, etc. fields, as well as Subject: lines. + +This module does not do the line wrapping or end-of-line character conversion +necessary for proper internationalized headers; it only does dumb encoding and +decoding. To deal with the various line wrapping issues, use the email.header +module. +'u'Base64 content transfer encoding per RFCs 2045-2047. + +This module handles the content transfer encoding method defined in RFC 2045 +to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit +characters encoding known as Base64. + +It is used in the MIME standards for email to attach images, audio, and text +using some 8-bit character sets to messages. + +This module provides an interface to encode and decode both headers and bodies +with Base64 encoding. + +RFC 2045 defines a method for including character set information in an +`encoded-word' in a header. This method is commonly used for 8-bit real names +in To:, From:, Cc:, etc. fields, as well as Subject: lines. + +This module does not do the line wrapping or end-of-line character conversion +necessary for proper internationalized headers; it only does dumb encoding and +decoding. To deal with the various line wrapping issues, use the email.header +module. +'b'body_decode'u'body_decode'b'body_encode'u'body_encode'b'decodestring'u'decodestring'b'header_encode'u'header_encode'b'header_length'u'header_length'b'Return the length of s when it is encoded with base64.'u'Return the length of s when it is encoded with base64.'b'iso-8859-1'u'iso-8859-1'b'Encode a single header line with Base64 encoding in a given charset. + + charset names the character set to use to encode the header. It defaults + to iso-8859-1. Base64 encoding is defined in RFC 2045. + 'u'Encode a single header line with Base64 encoding in a given charset. + + charset names the character set to use to encode the header. It defaults + to iso-8859-1. Base64 encoding is defined in RFC 2045. + 'b'=?%s?b?%s?='u'=?%s?b?%s?='b'Encode a string with base64. + + Each line will be wrapped at, at most, maxlinelen characters (defaults to + 76 characters). + + Each line of encoded text will end with eol, which defaults to "\n". Set + this to "\r\n" if you will be using the result of this function directly + in an email. + 'u'Encode a string with base64. + + Each line will be wrapped at, at most, maxlinelen characters (defaults to + 76 characters). + + Each line of encoded text will end with eol, which defaults to "\n". Set + this to "\r\n" if you will be using the result of this function directly + in an email. + 'b'Decode a raw base64 string, returning a bytes object. + + This function does not parse a full MIME header value encoded with + base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high + level email.header class for that functionality. + 'u'Decode a raw base64 string, returning a bytes object. + + This function does not parse a full MIME header value encoded with + base64 (like =?iso-8859-1?b?bmloISBuaWgh?=) -- please use the high + level email.header class for that functionality. + 'b'raw-unicode-escape'u'raw-unicode-escape'u'email.base64mime'Base implementation of event loop. + +The event loop can be broken up into a multiplexer (the part +responsible for notifying us of I/O events) and the event loop proper, +which wraps a multiplexer with functionality for scheduling callbacks, +immediately or at a given time in the future. + +Whenever a public API takes a callback, subsequent positional +arguments will be passed to the callback if/when it is called. This +avoids the proliferation of trivial lambdas implementing closures. +Keyword arguments for the callback are not supported; this is a +conscious design decision, leaving the door open for keyword arguments +to modify the meaning of the API call itself. +concurrentsslconstantssslprotostaggeredtrsockBaseEventLoop_MIN_SCHEDULED_TIMER_HANDLES0.5_MIN_CANCELLED_TIMER_HANDLES_FRACTION_HAS_IPv6MAXIMUM_SELECT_TIMEOUT_unset_format_handle_callback_format_pipeSTDOUT_set_reuseportreuse_port not supported by socket modulereuse_port not supported by socket module, SO_REUSEPORT defined but not implemented.'reuse_port not supported by socket module, ''SO_REUSEPORT defined but not implemented.'_ipaddr_infoflowinfoscopeidafsidnaaf_interleave_addrinfosaddrinfosfirst_address_family_countInterleave list of addrinfo tuples by family.addrinfos_by_familyaddrinfos_listsreordered_run_until_complete_cb_get_loop_set_nodelay_SendfileFallbackProtocolProtocoltransp_FlowControlMixintransport should be _FlowControlMixin instance_transportget_protocol_protois_reading_should_resume_reading_protocol_paused_should_resume_writingpause_readingset_protocol_write_ready_futdrainis_closingConnection closed by peerconnection_madetransportInvalid state: connection should have been established already."Invalid state: ""connection should have been established already."connection_lostConnection is closed by peerpause_writingresume_writingdata_receivedInvalid state: reading should be pausedeof_receivedresume_readingServerAbstractServersocketsprotocol_factoryssl_contextbacklogssl_handshake_timeout_sockets_active_count_protocol_factory_backlog_ssl_context_ssl_handshake_timeout_serving_serving_forever_fut sockets=_attach_detach_wakeup_start_servingis_servingTransportSocket_stop_servingstart_servingserve_foreverserver is already being awaited on serve_forever() is closedwait_closedAbstractEventLoop_timer_cancelled_count_closed_stopping_ready_scheduled_default_executor_internal_fds_thread_idget_clock_info_clock_resolution_exception_handler_is_debug_modeslow_callback_duration_current_handle_task_factory_coroutine_origin_tracking_enabled_coroutine_origin_tracking_saved_depth_asyncgens_asyncgens_shutdown_called running=is_running closed=' ''closed='is_closed debug=get_debugCreate a Future object attached to the loop.coroSchedule a coroutine object. + + Return a task object. + _check_closed_set_task_nameset_task_factorySet a task factory that will be used by loop.create_task(). + + If factory is None the default task factory will be set. + + If factory is a callable, it should have a signature matching + '(loop, coro)', where 'loop' will be a reference to the active + event loop, 'coro' will be a coroutine object. The callable + must return a Future. + task factory must be a callable or Noneget_task_factoryReturn a task factory, or None if the default one is in use._make_socket_transportCreate socket transport._make_ssl_transportrawsocksslcontextcall_connection_madeCreate SSL transport._make_datagram_transportaddressCreate datagram transport._make_read_pipe_transportpipeCreate read pipe transport._make_write_pipe_transportCreate write pipe transport._make_subprocess_transportshellCreate subprocess transport._write_to_selfWrite a byte to self-pipe, to wake up the event loop. + + This may be called from a different thread. + + The subclass is responsible for implementing the self-pipe. + _process_eventsevent_listProcess selector events.Event loop is closed_asyncgen_finalizer_hookagencall_soon_threadsafe_asyncgen_firstiter_hookasynchronous generator was scheduled after loop.shutdown_asyncgens() call" was scheduled after ""loop.shutdown_asyncgens() call"Shutdown all active asynchronous generators.closing_agensagresultsan error occurred during closing of asynchronous generator 'an error occurred during closing of ''asynchronous generator 'asyncgen_check_runningThis event loop is already runningCannot run the event loop while another loop is runningrun_foreverRun until stop() is called._set_coroutine_origin_tracking_debugold_agen_hooksfirstiterfinalizer_run_onceRun until the Future is done. + + If the argument is a coroutine, it is wrapped in a Task. + + WARNING: It would be disastrous to call run_until_complete() + with the same coroutine twice -- it would wrap it in two + different Tasks and that can't be good. + + Return the Future's result, or raise its exception. + isfuturenew_taskensure_futureEvent loop stopped before Future completed.Stop running the event loop. + + Every callback already scheduled will still run. This simply informs + run_forever to stop looping after a complete iteration. + Close the event loop. + + This clears the queues and shuts down the executor, + but does not wait for the executor to finish. + + The event loop must not be running. + Cannot close a running event loopClose %rexecutorReturns True if the event loop was closed._warnunclosed event loop Returns True if the event loop is running.Return the time according to the event loop's clock. + + This is a float expressed in seconds since an epoch, but the + epoch, precision, accuracy and drift are unspecified and may + differ per event loop. + call_laterArrange for a callback to be called at a given time. + + Return a Handle: an opaque object with a cancel() method that + can be used to cancel the call. + + The delay can be an int or float, expressed in seconds. It is + always relative to the current time. + + Each callback will be called exactly once. If two callbacks + are scheduled for exactly the same time, it undefined which + will be called first. + + Any positional arguments after the callback will be passed to + the callback when it is called. + call_attimerwhenLike call_later(), but uses an absolute time. + + Absolute time corresponds to the event loop's time() method. + _check_thread_check_callbackTimerHandlecall_soonArrange for a callback to be called as soon as possible. + + This operates as a FIFO queue: callbacks are called in the + order in which they are registered. Each callback will be + called exactly once. + + Any positional arguments after the callback will be passed to + the callback when it is called. + _call_sooniscoroutineiscoroutinefunctioncoroutines cannot be used with ()a callable object was expected by (), got '(), ''got 'HandleCheck that the current thread is the thread running the event loop. + + Non-thread-safe methods of this class make this assumption and will + likely behave incorrectly when the assumption is violated. + + Should only be called when (self._debug == True). The caller is + responsible for checking this condition for performance reasons. + thread_idNon-thread-safe operation invoked on an event loop other than the current one"Non-thread-safe operation invoked on an event loop other ""than the current one"Like call_soon(), but thread-safe.run_in_executorwrap_futureset_default_executorUsing the default executor that is not an instance of ThreadPoolExecutor is deprecated and will be prohibited in Python 3.9'Using the default executor that is not an instance of ''ThreadPoolExecutor is deprecated and will be prohibited ''in Python 3.9'_getaddrinfo_debugfamily=type=proto=flags=Get address info %st0addrinfoGetting address info took 1000.01e3ms: getaddr_funcsockaddrsock_sendfilefallbackthe socket must be non-blocking_check_sendfile_params_sock_sendfile_nativeSendfileNotAvailableError_sock_sendfile_fallbacksyscall sendfile is not available for socket and file {file!r} combination"and file {file!r} combination"SENDFILE_FALLBACK_READBUFFER_SIZEblocksizetotal_sentsock_sendallfile should be opened in binary modeonly SOCK_STREAM type sockets are supportedcount must be a positive integer (got {!r})offset must be a non-negative integer (got {!r})_connect_sockaddr_infolocal_addr_infosCreate, bind and connect one socket.my_exceptionsladdrerror while attempting to bind on address 'error while attempting to bind on ''address '': 'sock_connectcreate_connectionlocal_addrhappy_eyeballs_delayinterleaveConnect to a TCP server. + + Create a streaming transport connection to a given Internet host and + port: socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_STREAM. protocol_factory must be + a callable returning a protocol instance. + + This method is a coroutine which will try to establish the connection + in the background. When successful, the coroutine returns a + (transport, protocol) pair. + server_hostname is only meaningful with sslYou must set server_hostname when using ssl without a host'You must set server_hostname ''when using ssl without a host'ssl_handshake_timeout is only meaningful with sslhost/port and sock can not be specified at the same time_ensure_resolvedinfosgetaddrinfo() returned empty listladdr_infosstaggered_raceMultiple exceptions: {}host and port was not specified and no sock specifiedA Stream Socket was expected, got _create_connection_transportget_extra_info%r connected to %s:%r: (%r, %r)sendfileSend a file to transport. + + Return the total number of bytes which were sent. + + The method uses high-performance os.sendfile if available. + + file must be a regular file object opened in binary mode. + + offset tells from where to start reading the file. If specified, + count is the total number of bytes to transmit as opposed to + sending the file until EOF is reached. File position is updated on + return or also in case of error in which case file.tell() + can be used to figure out the number of bytes + which were sent. + + fallback set to True makes asyncio to manually read and send + the file when the platform does not support the sendfile syscall + (e.g. Windows or SSL socket on Unix). + + Raise SendfileNotAvailableError if the system does not support + sendfile syscall and fallback is False. + Transport is closing_sendfile_compatible_SendfileModeUNSUPPORTEDsendfile is not supported for transport TRY_NATIVE_sendfile_nativefallback is disabled and native sendfile is not supported for transport "fallback is disabled and native sendfile is not ""supported for transport "_sendfile_fallbacksendfile syscall is not supportedstart_tlsUpgrade transport to TLS. + + Return a new transport that *protocol* should start using + immediately. + Python ssl module is not availableSSLContextsslcontext is expected to be an instance of ssl.SSLContext, got 'sslcontext is expected to be an instance of ssl.SSLContext, '_start_tls_compatibletransport is not supported by start_tls()SSLProtocolssl_protocolconmade_cbresume_cb_app_transportcreate_datagram_endpointremote_addrreuse_addressreuse_portallow_broadcastCreate datagram connection.A UDP Socket was expected, got problemssocket modifier keyword arguments can not be used when sock is specified. ('socket modifier keyword arguments can not be used ''when sock is specified. ('r_addrunexpected address familyaddr_pairs_infostring is expectedUnable to check or remove stale UNIX socket %r: %r'Unable to check or remove stale UNIX ''socket %r: %r'addr_infos2-tuple is expectedfamproaddr_paircan not get address informationPassing `reuse_address=True` is no longer supported, as the usage of SO_REUSEPORT in UDP poses a significant security concern."Passing `reuse_address=True` is no ""longer supported, as the usage of ""SO_REUSEPORT in UDP poses a significant ""security concern."The *reuse_address* parameter has been deprecated as of 3.5.10 and is scheduled for removal in 3.11."The *reuse_address* parameter has been ""deprecated as of 3.5.10 and is scheduled ""for removal in 3.11."local_addressremote_addressDatagram endpoint local_addr=%r remote_addr=%r created: (%r, %r)"Datagram endpoint local_addr=%r remote_addr=%r ""created: (%r, %r)"Datagram endpoint remote_addr=%r created: (%r, %r)"Datagram endpoint remote_addr=%r created: ""(%r, %r)"_create_server_getaddrinfogetaddrinfo() returned empty listcreate_serverCreate a TCP server. + + The host parameter can be a string, in that case the TCP server is + bound to host and port. + + The host parameter can also be a sequence of strings and in that case + the TCP server is bound to all hosts of the sequence. If a host + appears multiple times (possibly indirectly e.g. when hostnames + resolve to the same IP address), the server is only bound once to that + host. + + Return a Server object which can be used to stop the service. + + This method is a coroutine. + ssl argument must be an SSLContext or Nonehostscompletedcanonnamesacreate_server() failed to create socket.socket(%r, %r, %r)'create_server() failed to create ''socket.socket(%r, %r, %r)'error while attempting to bind on address %r: %s'error while attempting ''to bind on address %r: %s'Neither host/port nor sock were specified%r is servingconnect_accepted_socketHandle an accepted connection. + + This is used by servers that accept connections outside of + asyncio but that use asyncio to handle connections. + + This method is a coroutine. When completed, the coroutine + returns a (transport, protocol) pair. + %r handled: (%r, %r)connect_read_pipeRead pipe %r connected: (%r, %r)connect_write_pipeWrite pipe %r connected: (%r, %r)_log_subprocessstdin=stdout=stderr=stdout=stderr=subprocess_shellcmd must be a stringuniversal_newlines must be Falseshell must be Truebufsize must be 0text must be Falseencoding must be Noneerrors must be Nonedebug_logrun shell command %r%s: %rsubprocess_execprogramshell must be Falsepopen_argsexecute program get_exception_handlerReturn an exception handler, or None if the default one is in use. + set_exception_handlerSet handler as the new event loop exception handler. + + If handler is None, the default exception handler will + be set. + + If handler is a callable object, it should have a + signature matching '(loop, context)', where 'loop' + will be a reference to the active event loop, 'context' + will be a dict object (see `call_exception_handler()` + documentation for details about context). + A callable object or None is expected, got 'A callable object or None is expected, 'default_exception_handlerDefault exception handler. + + This is called when an exception occurs and no exception + handler is set, and can be called by a custom exception + handler that wants to defer to the default behavior. + + This default handler logs the error message and other + context-dependent information. In debug mode, a truncated + stack trace is also appended showing where the given object + (e.g. a handle or future or task) was created, if any. + + The context parameter has the same meaning as in + `call_exception_handler()`. + Unhandled exception in event loopsource_tracebackhandle_tracebacklog_linesformat_listObject created at (most recent call last): +Handle created at (most recent call last): +Call the current event loop's exception handler. + + The context argument is a dict containing the following keys: + + - 'message': Error message; + - 'exception' (optional): Exception object; + - 'future' (optional): Future instance; + - 'task' (optional): Task instance; + - 'handle' (optional): Handle instance; + - 'protocol' (optional): Protocol instance; + - 'transport' (optional): Transport instance; + - 'socket' (optional): Socket instance; + - 'asyncgen' (optional): Asynchronous generator that caused + the exception. + + New keys maybe introduced in the future. + + Note: do not overload this method in an event loop subclass. + For custom exception handling, use the + `set_exception_handler()` method. + Exception in default exception handlerUnhandled error in exception handlerException in default exception handler while handling an unexpected error in custom exception handler'Exception in default exception handler ''while handling an unexpected error ''in custom exception handler'_add_callbackAdd a Handle to _scheduled (TimerHandle) or _ready.A Handle is required here_cancelled_add_callback_signalsafeLike _add_callback() but called from a signal handler._timer_handle_cancelledNotification that a TimerHandle has been cancelled.Run one full iteration of the event loop. + + This calls all currently ready callbacks, polls for I/O, + schedules the resulting callbacks, and finally schedules + 'call_later' callbacks. + sched_countnew_scheduled_when_selectorntodo_runExecuting %s took %.3f secondsenabledDEBUG_STACK_DEPTH# Minimum number of _scheduled timer handles before cleanup of# cancelled handles is performed.# Minimum fraction of _scheduled timer handles that are cancelled# before cleanup of cancelled handles is performed.# Maximum timeout passed to select to avoid OS limitations# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s# *reuse_address* parameter# format the task# Try to skip getaddrinfo if "host" is already an IP. Users might have# handled name resolution in their own code and pass in resolved IPs.# If port's a service name like "http", don't skip getaddrinfo.# Linux's inet_pton doesn't accept an IPv6 zone index after host,# like '::1%lo0'.# The host has already been resolved.# "host" is not an IP address.# Group addresses by family# Issue #22429: run_forever() already finished, no need to# stop it.# Never happens if peer disconnects after sending the whole content# Thus disconnection is always an exception from user perspective# Cancel the future.# Basically it has no effect because protocol is switched back,# no code should wait for it anymore.# Skip one loop iteration so that all 'loop.add_reader'# go through.# Identifier of the thread running the event loop, or None if the# event loop is not running# In debug mode, if the execution of a callback or a step of a task# exceed this duration in seconds, the slow callback/task is logged.# A weak set of all asynchronous generators that are# being iterated by the loop.# Set to True when `loop.shutdown_asyncgens` is called.# If Python version is <3.6 or we don't have any asynchronous# generators alive.# An exception is raised if the future didn't complete, so there# is no need to log the "destroy pending task" message# The coroutine raised a BaseException. Consume the exception# to not log a warning, the caller doesn't have access to the# local task.# NB: sendfile syscall is not supported for SSL sockets and# non-mmap files even if sendfile is supported by OS# EOF# all bind attempts failed# Use host as default for server_hostname. It is an error# if host is empty or not set, e.g. when an# already-connected socket was passed or when only a port# is given. To avoid this error, you can pass# server_hostname='' -- this will bypass the hostname# check. (This also means that if host is a numeric# IP/IPv6 address, we will attempt to verify that exact# address; this will probably fail, but it is possible to# create a certificate for a specific IP address, so we# don't judge it here.)# If using happy eyeballs, default to interleave addresses by family# not using happy eyeballs# using happy eyeballs# If they all have the same str(), raise one.# Raise a combined exception so the user can see all# the various error messages.# We allow AF_INET, AF_INET6, AF_UNIX as long as they# are SOCK_STREAM.# We support passing AF_UNIX sockets even though we have# a dedicated API for that: create_unix_connection.# Disallowing AF_UNIX in this method, breaks backwards# compatibility.# Get the socket from the transport because SSL transport closes# the old socket and creates a new SSL socket# Pause early so that "ssl_protocol.data_received()" doesn't# have a chance to get called before "ssl_protocol.connection_made()".# show the problematic kwargs in exception msg# Directory may have permissions only to create socket.# join address by (family, protocol)# Using order preserving dict# each addr has to have info for each (family, proto) pair# bpo-37228# "host" is already a resolved IP.# Assume it's a bad family/type/protocol combination.# Disable IPv4/IPv6 dual stack support (enabled by# default on Linux) which makes a single socket# listen on both address families.# don't log parameters: they may contain sensitive information# (password) and may be too long# Second protection layer for unexpected errors# in the default implementation, as well as for subclassed# event loops with overloaded "default_exception_handler".# Exception in the user set custom exception handler.# Let's try default handler.# Guard 'default_exception_handler' in case it is# overloaded.# Remove delayed calls that were cancelled if their number# is too high# Remove delayed calls that were cancelled from head of queue.# Compute the desired timeout.# Handle 'later' callbacks that are ready.# This is the only place where callbacks are actually *called*.# All other places just add them to ready.# Note: We run all currently scheduled callbacks, but not any# callbacks scheduled by callbacks run this time around --# they will be run the next time (after another I/O poll).# Use an idiom that is thread-safe without using locks.# Needed to break cycles when an exception occurs.b'Base implementation of event loop. + +The event loop can be broken up into a multiplexer (the part +responsible for notifying us of I/O events) and the event loop proper, +which wraps a multiplexer with functionality for scheduling callbacks, +immediately or at a given time in the future. + +Whenever a public API takes a callback, subsequent positional +arguments will be passed to the callback if/when it is called. This +avoids the proliferation of trivial lambdas implementing closures. +Keyword arguments for the callback are not supported; this is a +conscious design decision, leaving the door open for keyword arguments +to modify the meaning of the API call itself. +'u'Base implementation of event loop. + +The event loop can be broken up into a multiplexer (the part +responsible for notifying us of I/O events) and the event loop proper, +which wraps a multiplexer with functionality for scheduling callbacks, +immediately or at a given time in the future. + +Whenever a public API takes a callback, subsequent positional +arguments will be passed to the callback if/when it is called. This +avoids the proliferation of trivial lambdas implementing closures. +Keyword arguments for the callback are not supported; this is a +conscious design decision, leaving the door open for keyword arguments +to modify the meaning of the API call itself. +'b'BaseEventLoop'u'BaseEventLoop'b'AF_INET6'u'AF_INET6'b'__self__'u'__self__'b''u''b''u''b'reuse_port not supported by socket module'u'reuse_port not supported by socket module'b'reuse_port not supported by socket module, SO_REUSEPORT defined but not implemented.'u'reuse_port not supported by socket module, SO_REUSEPORT defined but not implemented.'b'inet_pton'u'inet_pton'b'idna'u'idna'b'Interleave list of addrinfo tuples by family.'u'Interleave list of addrinfo tuples by family.'b'TCP_NODELAY'u'TCP_NODELAY'b'transport should be _FlowControlMixin instance'u'transport should be _FlowControlMixin instance'b'Connection closed by peer'u'Connection closed by peer'b'Invalid state: connection should have been established already.'u'Invalid state: connection should have been established already.'b'Connection is closed by peer'u'Connection is closed by peer'b'Invalid state: reading should be paused'u'Invalid state: reading should be paused'b' sockets='u' sockets='b'server 'u'server 'b' is already being awaited on serve_forever()'u' is already being awaited on serve_forever()'b' is closed'u' is closed'b'monotonic'u'monotonic'b' running='u' running='b' closed='u' closed='b' debug='u' debug='b'Create a Future object attached to the loop.'u'Create a Future object attached to the loop.'b'Schedule a coroutine object. + + Return a task object. + 'u'Schedule a coroutine object. + + Return a task object. + 'b'Set a task factory that will be used by loop.create_task(). + + If factory is None the default task factory will be set. + + If factory is a callable, it should have a signature matching + '(loop, coro)', where 'loop' will be a reference to the active + event loop, 'coro' will be a coroutine object. The callable + must return a Future. + 'u'Set a task factory that will be used by loop.create_task(). + + If factory is None the default task factory will be set. + + If factory is a callable, it should have a signature matching + '(loop, coro)', where 'loop' will be a reference to the active + event loop, 'coro' will be a coroutine object. The callable + must return a Future. + 'b'task factory must be a callable or None'u'task factory must be a callable or None'b'Return a task factory, or None if the default one is in use.'u'Return a task factory, or None if the default one is in use.'b'Create socket transport.'u'Create socket transport.'b'Create SSL transport.'u'Create SSL transport.'b'Create datagram transport.'u'Create datagram transport.'b'Create read pipe transport.'u'Create read pipe transport.'b'Create write pipe transport.'u'Create write pipe transport.'b'Create subprocess transport.'u'Create subprocess transport.'b'Write a byte to self-pipe, to wake up the event loop. + + This may be called from a different thread. + + The subclass is responsible for implementing the self-pipe. + 'u'Write a byte to self-pipe, to wake up the event loop. + + This may be called from a different thread. + + The subclass is responsible for implementing the self-pipe. + 'b'Process selector events.'u'Process selector events.'b'Event loop is closed'u'Event loop is closed'b'asynchronous generator 'u'asynchronous generator 'b' was scheduled after loop.shutdown_asyncgens() call'u' was scheduled after loop.shutdown_asyncgens() call'b'Shutdown all active asynchronous generators.'u'Shutdown all active asynchronous generators.'b'an error occurred during closing of asynchronous generator 'u'an error occurred during closing of asynchronous generator 'b'asyncgen'u'asyncgen'b'This event loop is already running'u'This event loop is already running'b'Cannot run the event loop while another loop is running'u'Cannot run the event loop while another loop is running'b'Run until stop() is called.'u'Run until stop() is called.'b'Run until the Future is done. + + If the argument is a coroutine, it is wrapped in a Task. + + WARNING: It would be disastrous to call run_until_complete() + with the same coroutine twice -- it would wrap it in two + different Tasks and that can't be good. + + Return the Future's result, or raise its exception. + 'u'Run until the Future is done. + + If the argument is a coroutine, it is wrapped in a Task. + + WARNING: It would be disastrous to call run_until_complete() + with the same coroutine twice -- it would wrap it in two + different Tasks and that can't be good. + + Return the Future's result, or raise its exception. + 'b'Event loop stopped before Future completed.'u'Event loop stopped before Future completed.'b'Stop running the event loop. + + Every callback already scheduled will still run. This simply informs + run_forever to stop looping after a complete iteration. + 'u'Stop running the event loop. + + Every callback already scheduled will still run. This simply informs + run_forever to stop looping after a complete iteration. + 'b'Close the event loop. + + This clears the queues and shuts down the executor, + but does not wait for the executor to finish. + + The event loop must not be running. + 'u'Close the event loop. + + This clears the queues and shuts down the executor, + but does not wait for the executor to finish. + + The event loop must not be running. + 'b'Cannot close a running event loop'u'Cannot close a running event loop'b'Close %r'u'Close %r'b'Returns True if the event loop was closed.'u'Returns True if the event loop was closed.'b'unclosed event loop 'u'unclosed event loop 'b'Returns True if the event loop is running.'u'Returns True if the event loop is running.'b'Return the time according to the event loop's clock. + + This is a float expressed in seconds since an epoch, but the + epoch, precision, accuracy and drift are unspecified and may + differ per event loop. + 'u'Return the time according to the event loop's clock. + + This is a float expressed in seconds since an epoch, but the + epoch, precision, accuracy and drift are unspecified and may + differ per event loop. + 'b'Arrange for a callback to be called at a given time. + + Return a Handle: an opaque object with a cancel() method that + can be used to cancel the call. + + The delay can be an int or float, expressed in seconds. It is + always relative to the current time. + + Each callback will be called exactly once. If two callbacks + are scheduled for exactly the same time, it undefined which + will be called first. + + Any positional arguments after the callback will be passed to + the callback when it is called. + 'u'Arrange for a callback to be called at a given time. + + Return a Handle: an opaque object with a cancel() method that + can be used to cancel the call. + + The delay can be an int or float, expressed in seconds. It is + always relative to the current time. + + Each callback will be called exactly once. If two callbacks + are scheduled for exactly the same time, it undefined which + will be called first. + + Any positional arguments after the callback will be passed to + the callback when it is called. + 'b'Like call_later(), but uses an absolute time. + + Absolute time corresponds to the event loop's time() method. + 'u'Like call_later(), but uses an absolute time. + + Absolute time corresponds to the event loop's time() method. + 'b'call_at'u'call_at'b'Arrange for a callback to be called as soon as possible. + + This operates as a FIFO queue: callbacks are called in the + order in which they are registered. Each callback will be + called exactly once. + + Any positional arguments after the callback will be passed to + the callback when it is called. + 'u'Arrange for a callback to be called as soon as possible. + + This operates as a FIFO queue: callbacks are called in the + order in which they are registered. Each callback will be + called exactly once. + + Any positional arguments after the callback will be passed to + the callback when it is called. + 'b'call_soon'u'call_soon'b'coroutines cannot be used with 'u'coroutines cannot be used with 'b'()'u'()'b'a callable object was expected by 'u'a callable object was expected by 'b'(), got 'u'(), got 'b'Check that the current thread is the thread running the event loop. + + Non-thread-safe methods of this class make this assumption and will + likely behave incorrectly when the assumption is violated. + + Should only be called when (self._debug == True). The caller is + responsible for checking this condition for performance reasons. + 'u'Check that the current thread is the thread running the event loop. + + Non-thread-safe methods of this class make this assumption and will + likely behave incorrectly when the assumption is violated. + + Should only be called when (self._debug == True). The caller is + responsible for checking this condition for performance reasons. + 'b'Non-thread-safe operation invoked on an event loop other than the current one'u'Non-thread-safe operation invoked on an event loop other than the current one'b'Like call_soon(), but thread-safe.'u'Like call_soon(), but thread-safe.'b'call_soon_threadsafe'u'call_soon_threadsafe'b'run_in_executor'u'run_in_executor'b'Using the default executor that is not an instance of ThreadPoolExecutor is deprecated and will be prohibited in Python 3.9'u'Using the default executor that is not an instance of ThreadPoolExecutor is deprecated and will be prohibited in Python 3.9'b'family='u'family='b'type='u'type='b'proto='u'proto='b'flags='u'flags='b'Get address info %s'u'Get address info %s'b'Getting address info 'u'Getting address info 'b' took 'u' took 'b'ms: 'u'ms: 'b'the socket must be non-blocking'u'the socket must be non-blocking'b'syscall sendfile is not available for socket 'u'syscall sendfile is not available for socket 'b' and file {file!r} combination'u' and file {file!r} combination'b'seek'u'seek'b'mode'u'mode'b'file should be opened in binary mode'u'file should be opened in binary mode'b'only SOCK_STREAM type sockets are supported'u'only SOCK_STREAM type sockets are supported'b'count must be a positive integer (got {!r})'u'count must be a positive integer (got {!r})'b'offset must be a non-negative integer (got {!r})'u'offset must be a non-negative integer (got {!r})'b'Create, bind and connect one socket.'u'Create, bind and connect one socket.'b'error while attempting to bind on address 'u'error while attempting to bind on address 'b'Connect to a TCP server. + + Create a streaming transport connection to a given Internet host and + port: socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_STREAM. protocol_factory must be + a callable returning a protocol instance. + + This method is a coroutine which will try to establish the connection + in the background. When successful, the coroutine returns a + (transport, protocol) pair. + 'u'Connect to a TCP server. + + Create a streaming transport connection to a given Internet host and + port: socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_STREAM. protocol_factory must be + a callable returning a protocol instance. + + This method is a coroutine which will try to establish the connection + in the background. When successful, the coroutine returns a + (transport, protocol) pair. + 'b'server_hostname is only meaningful with ssl'u'server_hostname is only meaningful with ssl'b'You must set server_hostname when using ssl without a host'u'You must set server_hostname when using ssl without a host'b'ssl_handshake_timeout is only meaningful with ssl'u'ssl_handshake_timeout is only meaningful with ssl'b'host/port and sock can not be specified at the same time'u'host/port and sock can not be specified at the same time'b'getaddrinfo() returned empty list'u'getaddrinfo() returned empty list'b'Multiple exceptions: {}'u'Multiple exceptions: {}'b'host and port was not specified and no sock specified'u'host and port was not specified and no sock specified'b'A Stream Socket was expected, got 'u'A Stream Socket was expected, got 'b'%r connected to %s:%r: (%r, %r)'u'%r connected to %s:%r: (%r, %r)'b'Send a file to transport. + + Return the total number of bytes which were sent. + + The method uses high-performance os.sendfile if available. + + file must be a regular file object opened in binary mode. + + offset tells from where to start reading the file. If specified, + count is the total number of bytes to transmit as opposed to + sending the file until EOF is reached. File position is updated on + return or also in case of error in which case file.tell() + can be used to figure out the number of bytes + which were sent. + + fallback set to True makes asyncio to manually read and send + the file when the platform does not support the sendfile syscall + (e.g. Windows or SSL socket on Unix). + + Raise SendfileNotAvailableError if the system does not support + sendfile syscall and fallback is False. + 'u'Send a file to transport. + + Return the total number of bytes which were sent. + + The method uses high-performance os.sendfile if available. + + file must be a regular file object opened in binary mode. + + offset tells from where to start reading the file. If specified, + count is the total number of bytes to transmit as opposed to + sending the file until EOF is reached. File position is updated on + return or also in case of error in which case file.tell() + can be used to figure out the number of bytes + which were sent. + + fallback set to True makes asyncio to manually read and send + the file when the platform does not support the sendfile syscall + (e.g. Windows or SSL socket on Unix). + + Raise SendfileNotAvailableError if the system does not support + sendfile syscall and fallback is False. + 'b'Transport is closing'u'Transport is closing'b'_sendfile_compatible'u'_sendfile_compatible'b'sendfile is not supported for transport 'u'sendfile is not supported for transport 'b'fallback is disabled and native sendfile is not supported for transport 'u'fallback is disabled and native sendfile is not supported for transport 'b'sendfile syscall is not supported'u'sendfile syscall is not supported'b'Upgrade transport to TLS. + + Return a new transport that *protocol* should start using + immediately. + 'u'Upgrade transport to TLS. + + Return a new transport that *protocol* should start using + immediately. + 'b'Python ssl module is not available'u'Python ssl module is not available'b'sslcontext is expected to be an instance of ssl.SSLContext, got 'u'sslcontext is expected to be an instance of ssl.SSLContext, got 'b'_start_tls_compatible'u'_start_tls_compatible'b'transport 'u'transport 'b' is not supported by start_tls()'u' is not supported by start_tls()'b'Create datagram connection.'u'Create datagram connection.'b'A UDP Socket was expected, got 'u'A UDP Socket was expected, got 'b'socket modifier keyword arguments can not be used when sock is specified. ('u'socket modifier keyword arguments can not be used when sock is specified. ('b'unexpected address family'u'unexpected address family'b'string is expected'u'string is expected'u''b'Unable to check or remove stale UNIX socket %r: %r'u'Unable to check or remove stale UNIX socket %r: %r'b'2-tuple is expected'u'2-tuple is expected'b'can not get address information'u'can not get address information'b'Passing `reuse_address=True` is no longer supported, as the usage of SO_REUSEPORT in UDP poses a significant security concern.'u'Passing `reuse_address=True` is no longer supported, as the usage of SO_REUSEPORT in UDP poses a significant security concern.'b'The *reuse_address* parameter has been deprecated as of 3.5.10 and is scheduled for removal in 3.11.'u'The *reuse_address* parameter has been deprecated as of 3.5.10 and is scheduled for removal in 3.11.'b'Datagram endpoint local_addr=%r remote_addr=%r created: (%r, %r)'u'Datagram endpoint local_addr=%r remote_addr=%r created: (%r, %r)'b'Datagram endpoint remote_addr=%r created: (%r, %r)'u'Datagram endpoint remote_addr=%r created: (%r, %r)'b'getaddrinfo('u'getaddrinfo('b') returned empty list'u') returned empty list'b'Create a TCP server. + + The host parameter can be a string, in that case the TCP server is + bound to host and port. + + The host parameter can also be a sequence of strings and in that case + the TCP server is bound to all hosts of the sequence. If a host + appears multiple times (possibly indirectly e.g. when hostnames + resolve to the same IP address), the server is only bound once to that + host. + + Return a Server object which can be used to stop the service. + + This method is a coroutine. + 'u'Create a TCP server. + + The host parameter can be a string, in that case the TCP server is + bound to host and port. + + The host parameter can also be a sequence of strings and in that case + the TCP server is bound to all hosts of the sequence. If a host + appears multiple times (possibly indirectly e.g. when hostnames + resolve to the same IP address), the server is only bound once to that + host. + + Return a Server object which can be used to stop the service. + + This method is a coroutine. + 'b'ssl argument must be an SSLContext or None'u'ssl argument must be an SSLContext or None'b'create_server() failed to create socket.socket(%r, %r, %r)'u'create_server() failed to create socket.socket(%r, %r, %r)'b'IPPROTO_IPV6'u'IPPROTO_IPV6'b'error while attempting to bind on address %r: %s'u'error while attempting to bind on address %r: %s'b'Neither host/port nor sock were specified'u'Neither host/port nor sock were specified'b'%r is serving'u'%r is serving'b'Handle an accepted connection. + + This is used by servers that accept connections outside of + asyncio but that use asyncio to handle connections. + + This method is a coroutine. When completed, the coroutine + returns a (transport, protocol) pair. + 'u'Handle an accepted connection. + + This is used by servers that accept connections outside of + asyncio but that use asyncio to handle connections. + + This method is a coroutine. When completed, the coroutine + returns a (transport, protocol) pair. + 'b'%r handled: (%r, %r)'u'%r handled: (%r, %r)'b'Read pipe %r connected: (%r, %r)'u'Read pipe %r connected: (%r, %r)'b'Write pipe %r connected: (%r, %r)'u'Write pipe %r connected: (%r, %r)'b'stdin='u'stdin='b'stdout=stderr='u'stdout=stderr='b'stdout='u'stdout='b'stderr='u'stderr='b'cmd must be a string'u'cmd must be a string'b'universal_newlines must be False'u'universal_newlines must be False'b'shell must be True'u'shell must be True'b'bufsize must be 0'u'bufsize must be 0'b'text must be False'u'text must be False'b'encoding must be None'u'encoding must be None'b'errors must be None'u'errors must be None'b'run shell command %r'u'run shell command %r'b'%s: %r'u'%s: %r'b'shell must be False'u'shell must be False'b'execute program 'u'execute program 'b'Return an exception handler, or None if the default one is in use. + 'u'Return an exception handler, or None if the default one is in use. + 'b'Set handler as the new event loop exception handler. + + If handler is None, the default exception handler will + be set. + + If handler is a callable object, it should have a + signature matching '(loop, context)', where 'loop' + will be a reference to the active event loop, 'context' + will be a dict object (see `call_exception_handler()` + documentation for details about context). + 'u'Set handler as the new event loop exception handler. + + If handler is None, the default exception handler will + be set. + + If handler is a callable object, it should have a + signature matching '(loop, context)', where 'loop' + will be a reference to the active event loop, 'context' + will be a dict object (see `call_exception_handler()` + documentation for details about context). + 'b'A callable object or None is expected, got 'u'A callable object or None is expected, got 'b'Default exception handler. + + This is called when an exception occurs and no exception + handler is set, and can be called by a custom exception + handler that wants to defer to the default behavior. + + This default handler logs the error message and other + context-dependent information. In debug mode, a truncated + stack trace is also appended showing where the given object + (e.g. a handle or future or task) was created, if any. + + The context parameter has the same meaning as in + `call_exception_handler()`. + 'u'Default exception handler. + + This is called when an exception occurs and no exception + handler is set, and can be called by a custom exception + handler that wants to defer to the default behavior. + + This default handler logs the error message and other + context-dependent information. In debug mode, a truncated + stack trace is also appended showing where the given object + (e.g. a handle or future or task) was created, if any. + + The context parameter has the same meaning as in + `call_exception_handler()`. + 'b'Unhandled exception in event loop'u'Unhandled exception in event loop'b'source_traceback'u'source_traceback'b'handle_traceback'u'handle_traceback'b'Object created at (most recent call last): +'u'Object created at (most recent call last): +'b'Handle created at (most recent call last): +'u'Handle created at (most recent call last): +'b'Call the current event loop's exception handler. + + The context argument is a dict containing the following keys: + + - 'message': Error message; + - 'exception' (optional): Exception object; + - 'future' (optional): Future instance; + - 'task' (optional): Task instance; + - 'handle' (optional): Handle instance; + - 'protocol' (optional): Protocol instance; + - 'transport' (optional): Transport instance; + - 'socket' (optional): Socket instance; + - 'asyncgen' (optional): Asynchronous generator that caused + the exception. + + New keys maybe introduced in the future. + + Note: do not overload this method in an event loop subclass. + For custom exception handling, use the + `set_exception_handler()` method. + 'u'Call the current event loop's exception handler. + + The context argument is a dict containing the following keys: + + - 'message': Error message; + - 'exception' (optional): Exception object; + - 'future' (optional): Future instance; + - 'task' (optional): Task instance; + - 'handle' (optional): Handle instance; + - 'protocol' (optional): Protocol instance; + - 'transport' (optional): Transport instance; + - 'socket' (optional): Socket instance; + - 'asyncgen' (optional): Asynchronous generator that caused + the exception. + + New keys maybe introduced in the future. + + Note: do not overload this method in an event loop subclass. + For custom exception handling, use the + `set_exception_handler()` method. + 'b'Exception in default exception handler'u'Exception in default exception handler'b'Unhandled error in exception handler'u'Unhandled error in exception handler'b'context'u'context'b'Exception in default exception handler while handling an unexpected error in custom exception handler'u'Exception in default exception handler while handling an unexpected error in custom exception handler'b'Add a Handle to _scheduled (TimerHandle) or _ready.'u'Add a Handle to _scheduled (TimerHandle) or _ready.'b'A Handle is required here'u'A Handle is required here'b'Like _add_callback() but called from a signal handler.'u'Like _add_callback() but called from a signal handler.'b'Notification that a TimerHandle has been cancelled.'u'Notification that a TimerHandle has been cancelled.'b'Run one full iteration of the event loop. + + This calls all currently ready callbacks, polls for I/O, + schedules the resulting callbacks, and finally schedules + 'call_later' callbacks. + 'u'Run one full iteration of the event loop. + + This calls all currently ready callbacks, polls for I/O, + schedules the resulting callbacks, and finally schedules + 'call_later' callbacks. + 'b'Executing %s took %.3f seconds'u'Executing %s took %.3f seconds'u'asyncio.base_events'u'base_events'format_helpers_PENDING_CANCELLED_FINISHEDCheck for a Future. + + This returns True when obj is a Future instance or is advertising + itself as duck-type compatible by setting _asyncio_future_blocking. + See comment in Future for more details. + _format_callbackshelper function for Future.__repr__format_cb_format_callback_source{}, {}{}, <{} more>, {}cb=[_repr_running_future_repr_infoexception=result=created at # States for Future.# bpo-42183: _repr_running is needed for repr protection# when a Future or Task result contains itself directly or indirectly.# The logic is borrowed from @reprlib.recursive_repr decorator.# Unfortunately, the direct decorator usage is impossible because of# AttributeError: '_asyncio.Task' object has no attribute '__module__' error.# After fixing this thing we can return to the decorator based approach.# (Future) -> str# use reprlib to limit the length of the output, especially# for very long stringsb'Check for a Future. + + This returns True when obj is a Future instance or is advertising + itself as duck-type compatible by setting _asyncio_future_blocking. + See comment in Future for more details. + 'u'Check for a Future. + + This returns True when obj is a Future instance or is advertising + itself as duck-type compatible by setting _asyncio_future_blocking. + See comment in Future for more details. + 'b'_asyncio_future_blocking'u'_asyncio_future_blocking'b'helper function for Future.__repr__'u'helper function for Future.__repr__'b'{}, {}'u'{}, {}'b'{}, <{} more>, {}'u'{}, <{} more>, {}'b'cb=['u'cb=['b'exception='u'exception='b'result='u'result='b'created at 'u'created at 'u'asyncio.base_futures'u'base_futures'BaseSubprocessTransportSubprocessTransport_protocol_proc_pid_returncode_exit_waiters_pending_calls_pipes_finished_extraprocess %r created: pid %s_connect_pipespid=returncode=not started<{}>pollClose running child process: kill %rkillunclosed transport get_pidget_returncodeget_pipe_transport_check_procsend_signalWriteSubprocessPipeProtoReadSubprocessPipeProto_pipe_connection_lostpipe_connection_lost_try_finish_pipe_data_receivedpipe_data_received_process_exited%r exited with return code %rprocess_exited_waitWait until the process exit and return the process return code. + + This method is a coroutine.disconnected_call_connection_lostBaseProtocol fd= pipe=# Create the child process: set the _proc attribute# has the child process finished?# the child process has finished, but the# transport hasn't been notified yet?# Don't clear the _proc reference yet: _post_init() may still run# asyncio uses a child watcher: copy the status into the Popen# object. On Python 3.6, it is required to avoid a ResourceWarning.# wake up futures waiting for wait()b'process %r created: pid %s'u'process %r created: pid %s'b'closed'u'closed'b'pid='u'pid='b'returncode='u'returncode='b'not started'u'not started'b'<{}>'u'<{}>'b'Close running child process: kill %r'u'Close running child process: kill %r'b'unclosed transport 'u'unclosed transport 'b'%r exited with return code %r'u'%r exited with return code %r'b'Wait until the process exit and return the process return code. + + This method is a coroutine.'u'Wait until the process exit and return the process return code. + + This method is a coroutine.'b' fd='u' fd='b' pipe='u' pipe='u'asyncio.base_subprocess'u'base_subprocess'linecachebase_futures_task_repr_infocancellingname=%r_format_coroutinecoro=", generated in interactive + mode, are returned unchanged. + Set values of attributes as ready to start debugging.botframe_set_stopinfotrace_dispatchDispatch a trace function for debugged frames based on the event. + + This function is installed as the trace function for debugged + frames. Its return value is the new trace function, which is + usually itself. The default implementation decides how to + dispatch a frame, depending on the type of event (passed in as a + string) that is about to be executed. + + The event can be one of the following: + line: A new line of code is going to be executed. + call: A function is about to be called or another code block + is entered. + return: A function or other code block is about to return. + exception: An exception has occurred. + c_call: A C function is about to be called. + c_return: A C function has returned. + c_exception: A C function has raised an exception. + + For the Python events, specialized functions (see the dispatch_*() + methods) are called. For the C events, no action is taken. + + The arg parameter depends on the previous event. + quittingdispatch_linedispatch_callreturndispatch_returndispatch_exceptionc_callc_exceptionc_returnbdb.Bdb.dispatch: unknown debugging event:Invoke user function and return trace function for line event. + + If the debugger stops on the current line, invoke + self.user_line(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + stop_herebreak_hereuser_lineInvoke user function and return trace function for call event. + + If the debugger stops on this function call, invoke + self.user_call(). Raise BbdQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + break_anywherestopframeco_flagsuser_callInvoke user function and return trace function for return event. + + If the debugger stops on this function return, invoke + self.user_return(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + returnframeuser_returnstoplinenoInvoke user function and return trace function for exception event. + + If the debugger stops on this exception, invoke + self.user_exception(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + user_exceptionis_skipped_moduleReturn True if module_name matches any skip pattern.Return True if frame is below the starting frame in the stack.Return True if there is an effective breakpoint for this line. + + Check for line or function breakpoint and if in effect. + Delete temporary breakpoints if effective() says to. + co_firstlinenoeffectivebpcurrentbptemporarydo_clearRemove temporary breakpoint. + + Must implement in derived classes or get NotImplementedError. + subclass of bdb must implement do_clear()Return True if there is any breakpoint for frame's filename. + argument_listCalled if we might stop in a function.Called when we stop or break at a line.return_valueCalled when a return trap is set here.Called when we stop on an exception.Set the attributes for stopping. + + If stoplineno is greater than or equal to 0, then stop at line + greater than or equal to the stopline. If stoplineno is -1, then + don't stop at all. + set_untilStop when the line with the lineno greater than the current one is + reached or when returning from current frame.set_stepStop after one line of code.caller_framef_traceset_nextStop on the next line in or below the given frame.set_returnStop when returning from the given frame.set_traceStart debugging from frame. + + If frame is not specified, debugging starts from caller's frame. + set_continueStop only at breakpoints or when finished. + + If there are no breakpoints, set the system trace function to None. + set_quitSet quitting attribute to True. + + Raises BdbQuit exception in the next call to a dispatch_*() method. + set_breakcondfuncnameSet a new breakpoint for filename:lineno. + + If lineno doesn't exist for the filename, return an error message. + The filename should be in canonical form. + Line %s:%d does not exist_prune_breaksPrune breakpoints for filename:lineno. + + A list of breakpoints is maintained in the Bdb instance and in + the Breakpoint class. If a breakpoint in the Bdb instance no + longer exists in the Breakpoint class, then it's removed from the + Bdb instance. + bplistclear_breakDelete breakpoints for filename:lineno. + + If no breakpoints were set, return an error message. + There are no breakpoints in %sThere is no breakpoint at %s:%ddeleteMeclear_bpbynumberDelete a breakpoint by its index in Breakpoint.bpbynumber. + + If arg is invalid, return an error message. + get_bpbynumberclear_all_file_breaksDelete all breakpoints in filename. + + If none were set, return an error message. + blistclear_all_breaksDelete all existing breakpoints. + + If none were set, return an error message. + There are no breakpointsbpbynumberReturn a breakpoint by its index in Breakpoint.bybpnumber. + + For invalid arg values or if the breakpoint doesn't exist, + raise a ValueError. + Breakpoint number expectedNon-numeric breakpoint number %sBreakpoint number %d out of rangeBreakpoint %d already deletedget_breakReturn True if there is a breakpoint for filename:lineno.get_breaksReturn all breakpoints for filename:lineno. + + If no breakpoints are set, return an empty list. + get_file_breaksReturn all lines with breakpoints for filename. + + If no breakpoints are set, return an empty list. + get_all_breaksReturn all breakpoints that are set.Return a list of (frame, lineno) in a stack trace and a size. + + List starts with original calling frame, if there is one. + Size may be number of frames above or below f. + tb_linenoformat_stack_entryframe_linenolprefixReturn a string with information about a stack entry. + + The stack entry frame_lineno is a (frame, lineno) tuple. The + return string contains the canonical filename, the function name + or '', the input arguments, the return value, and the + line of code (if it exists). + + __return__f_locals->Debug a statement executed via the exec() function. + + globals defaults to __main__.dict; locals defaults to globals. + runevalDebug an expression executed via the eval() function. + + globals defaults to __main__.dict; locals defaults to globals. + runctxFor backwards-compatibility. Defers to run().runcallDebug a single function call. + + Return the result of the function call. + descriptor 'runcall' of 'Bdb' object needs an argument"descriptor 'runcall' of 'Bdb' object "Passing 'func' as keyword argument is deprecatedruncall expected at least 1 positional argument, got %d'runcall expected at least 1 positional argument, '($self, func, /, *args, **kwds)Start debugging with a Bdb instance from the caller's frame.Breakpoint class. + + Implements temporary breakpoints, ignore counts, disabling and + (re)-enabling, and conditionals. + + Breakpoints are indexed by number through bpbynumber and by + the (file, line) tuple using bplist. The former points to a + single instance of class Breakpoint. The latter points to a + list of such instances since there may be more than one + breakpoint per line. + + When creating a breakpoint, its associated filename should be + in canonical form. If funcname is defined, a breakpoint hit will be + counted when the first line of that function is executed. A + conditional breakpoint always counts a hit. + func_first_executable_linehitsDelete the breakpoint from the list associated to a file:line. + + If it is the last breakpoint in that position, it also deletes + the entry for the file:line. + Mark the breakpoint as enabled.Mark the breakpoint as disabled.bpprintPrint the output of bpformat(). + + The optional out argument directs where the output is sent + and defaults to standard output. + bpformatReturn a string with information about the breakpoint. + + The information includes the breakpoint number, temporary + status, file:line position, break condition, number of times to + ignore, and number of times hit. + + del dispkeep yes no %-4dbreakpoint %s at %s:%d + stop only if %s + ignore next %d hitsss + breakpoint already hit %d time%sReturn a condensed description of the breakpoint.breakpoint %s at %s:%scheckfuncnameReturn True if break should happen here. + + Whether a break should happen depends on the way that b (the breakpoint) + was set. If it was set via line number, check if b.line is the same as + the one in the frame. If it was set via function name, check if this is + the right function and if it is on the first executable line. + Determine which breakpoint for this file:line is to be acted upon. + + Called only if we know there is a breakpoint at this location. Return + the breakpoint that was triggered and a boolean that indicates if it is + ok to delete a temporary breakpoint. Return (None, None) if there is no + matching breakpoint. + possiblesTdb???+++ call+++retval+++ returnexc_stuff+++ exceptionfoofoo(barbar returnedbar(import bdb; bdb.foo(10)# None# XXX 'arg' is no longer used# First call of dispatch since reset()# (CT) Note that this may also be None!# No need to trace this function# Ignore call events in generator except when stepping.# Ignore return events in generator except when stepping.# The user issued a 'next' or 'until' command.# When stepping with next/until/return in a generator frame, skip# the internal StopIteration exception (with no traceback)# triggered by a subiterator run with the 'yield from' statement.# Stop at the StopIteration or GeneratorExit exception when the user# has set stopframe in a generator by issuing a return command, or a# next/until command at the last statement in the generator before the# exception.# Normally derived classes don't override the following# methods, but they may if they want to redefine the# definition of stopping and breakpoints.# some modules do not have names# (CT) stopframe may now also be None, see dispatch_call.# (CT) the former test for None is therefore removed from here.# The line itself has no breakpoint, but maybe the line is the# first line of a function with breakpoint set by function name.# flag says ok to delete temp. bp# Derived classes should override the user_* methods# to gain control.# stoplineno >= 0 means: stop at line >= the stoplineno# stoplineno -1 means: don't stop at all# Derived classes and clients can call the following methods# to affect the stepping state.# the name "until" is borrowed from gdb# Issue #13183: pdb skips frames after hitting a breakpoint and running# step commands.# Restore the trace function in the caller (that may not have been set# for performance reasons) when returning from the current frame.# Don't stop except at breakpoints or when finished# no breakpoints; run without debugger overhead# to manipulate breakpoints. These methods return an# error message if something went wrong, None if all is well.# Set_break prints out the breakpoint line and file:lineno.# Call self.get_*break*() to see the breakpoints or better# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().# Import as late as possible# If there's only one bp in the list for that file,line# pair, then remove the breaks entry# Derived classes and clients can call the following method# to get a data structure representing a stack trace.# The following methods can be called by clients to use# a debugger to debug a statement or an expression.# Both can be given as a string, or a code object.# B/W compatibility# This method is more useful to debug a single function call.# XXX Keeping state in the class is a mistake -- this means# you cannot have more than one active Bdb instance.# Next bp to be assigned# indexed by (file, lineno) tuple# Each entry is None or an instance of Bpt# index 0 is unused, except for marking an# effective break .... see effective()# Needed if funcname is not None.# This better be in canonical form!# Build the two lists# No longer in list# No more bp for this f:l combo# -----------end of Breakpoint class----------# Breakpoint was set via line number.# Breakpoint was set at a line with a def statement and the function# defined is called: don't break.# Breakpoint set via function name.# It's not a function call, but rather execution of def statement.# We are in the right frame.# The function is entered for the 1st time.# But we are not at the first line number: don't break.# Determines if there is an effective (active) breakpoint at this# line of code. Returns breakpoint number or 0 if none# Count every hit when bp is enabled# If unconditional, and ignoring go on to next, else break# breakpoint and marker that it's ok to delete if temporary# Conditional bp.# Ignore count applies only to those bpt hits where the# condition evaluates to true.# continue# else:# continue# if eval fails, most conservative thing is to stop on# breakpoint regardless of ignore count. Don't delete# temporary, as another hint to user.# -------------------- testing --------------------b'Debugger basics'u'Debugger basics'b'BdbQuit'u'BdbQuit'b'Bdb'u'Bdb'b'Breakpoint'u'Breakpoint'b'Exception to give up completely.'u'Exception to give up completely.'b'Generic Python debugger base class. + + This class takes care of details of the trace facility; + a derived class should implement user interaction. + The standard debugger class (pdb.Pdb) is an example. + + The optional skip argument must be an iterable of glob-style + module name patterns. The debugger will not step into frames + that originate in a module that matches one of these patterns. + Whether a frame is considered to originate in a certain module + is determined by the __name__ in the frame globals. + 'u'Generic Python debugger base class. + + This class takes care of details of the trace facility; + a derived class should implement user interaction. + The standard debugger class (pdb.Pdb) is an example. + + The optional skip argument must be an iterable of glob-style + module name patterns. The debugger will not step into frames + that originate in a module that matches one of these patterns. + Whether a frame is considered to originate in a certain module + is determined by the __name__ in the frame globals. + 'b'Return canonical form of filename. + + For real filenames, the canonical form is a case-normalized (on + case insensitive filesystems) absolute path. 'Filenames' with + angle brackets, such as "", generated in interactive + mode, are returned unchanged. + 'u'Return canonical form of filename. + + For real filenames, the canonical form is a case-normalized (on + case insensitive filesystems) absolute path. 'Filenames' with + angle brackets, such as "", generated in interactive + mode, are returned unchanged. + 'b'Set values of attributes as ready to start debugging.'u'Set values of attributes as ready to start debugging.'b'Dispatch a trace function for debugged frames based on the event. + + This function is installed as the trace function for debugged + frames. Its return value is the new trace function, which is + usually itself. The default implementation decides how to + dispatch a frame, depending on the type of event (passed in as a + string) that is about to be executed. + + The event can be one of the following: + line: A new line of code is going to be executed. + call: A function is about to be called or another code block + is entered. + return: A function or other code block is about to return. + exception: An exception has occurred. + c_call: A C function is about to be called. + c_return: A C function has returned. + c_exception: A C function has raised an exception. + + For the Python events, specialized functions (see the dispatch_*() + methods) are called. For the C events, no action is taken. + + The arg parameter depends on the previous event. + 'u'Dispatch a trace function for debugged frames based on the event. + + This function is installed as the trace function for debugged + frames. Its return value is the new trace function, which is + usually itself. The default implementation decides how to + dispatch a frame, depending on the type of event (passed in as a + string) that is about to be executed. + + The event can be one of the following: + line: A new line of code is going to be executed. + call: A function is about to be called or another code block + is entered. + return: A function or other code block is about to return. + exception: An exception has occurred. + c_call: A C function is about to be called. + c_return: A C function has returned. + c_exception: A C function has raised an exception. + + For the Python events, specialized functions (see the dispatch_*() + methods) are called. For the C events, no action is taken. + + The arg parameter depends on the previous event. + 'b'call'u'call'b'return'u'return'b'c_call'u'c_call'b'c_exception'u'c_exception'b'c_return'u'c_return'b'bdb.Bdb.dispatch: unknown debugging event:'u'bdb.Bdb.dispatch: unknown debugging event:'b'Invoke user function and return trace function for line event. + + If the debugger stops on the current line, invoke + self.user_line(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + 'u'Invoke user function and return trace function for line event. + + If the debugger stops on the current line, invoke + self.user_line(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + 'b'Invoke user function and return trace function for call event. + + If the debugger stops on this function call, invoke + self.user_call(). Raise BbdQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + 'u'Invoke user function and return trace function for call event. + + If the debugger stops on this function call, invoke + self.user_call(). Raise BbdQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + 'b'Invoke user function and return trace function for return event. + + If the debugger stops on this function return, invoke + self.user_return(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + 'u'Invoke user function and return trace function for return event. + + If the debugger stops on this function return, invoke + self.user_return(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + 'b'Invoke user function and return trace function for exception event. + + If the debugger stops on this exception, invoke + self.user_exception(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + 'u'Invoke user function and return trace function for exception event. + + If the debugger stops on this exception, invoke + self.user_exception(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + 'b'Return True if module_name matches any skip pattern.'u'Return True if module_name matches any skip pattern.'b'Return True if frame is below the starting frame in the stack.'u'Return True if frame is below the starting frame in the stack.'b'Return True if there is an effective breakpoint for this line. + + Check for line or function breakpoint and if in effect. + Delete temporary breakpoints if effective() says to. + 'u'Return True if there is an effective breakpoint for this line. + + Check for line or function breakpoint and if in effect. + Delete temporary breakpoints if effective() says to. + 'b'Remove temporary breakpoint. + + Must implement in derived classes or get NotImplementedError. + 'u'Remove temporary breakpoint. + + Must implement in derived classes or get NotImplementedError. + 'b'subclass of bdb must implement do_clear()'u'subclass of bdb must implement do_clear()'b'Return True if there is any breakpoint for frame's filename. + 'u'Return True if there is any breakpoint for frame's filename. + 'b'Called if we might stop in a function.'u'Called if we might stop in a function.'b'Called when we stop or break at a line.'u'Called when we stop or break at a line.'b'Called when a return trap is set here.'u'Called when a return trap is set here.'b'Called when we stop on an exception.'u'Called when we stop on an exception.'b'Set the attributes for stopping. + + If stoplineno is greater than or equal to 0, then stop at line + greater than or equal to the stopline. If stoplineno is -1, then + don't stop at all. + 'u'Set the attributes for stopping. + + If stoplineno is greater than or equal to 0, then stop at line + greater than or equal to the stopline. If stoplineno is -1, then + don't stop at all. + 'b'Stop when the line with the lineno greater than the current one is + reached or when returning from current frame.'u'Stop when the line with the lineno greater than the current one is + reached or when returning from current frame.'b'Stop after one line of code.'u'Stop after one line of code.'b'Stop on the next line in or below the given frame.'u'Stop on the next line in or below the given frame.'b'Stop when returning from the given frame.'u'Stop when returning from the given frame.'b'Start debugging from frame. + + If frame is not specified, debugging starts from caller's frame. + 'u'Start debugging from frame. + + If frame is not specified, debugging starts from caller's frame. + 'b'Stop only at breakpoints or when finished. + + If there are no breakpoints, set the system trace function to None. + 'u'Stop only at breakpoints or when finished. + + If there are no breakpoints, set the system trace function to None. + 'b'Set quitting attribute to True. + + Raises BdbQuit exception in the next call to a dispatch_*() method. + 'u'Set quitting attribute to True. + + Raises BdbQuit exception in the next call to a dispatch_*() method. + 'b'Set a new breakpoint for filename:lineno. + + If lineno doesn't exist for the filename, return an error message. + The filename should be in canonical form. + 'u'Set a new breakpoint for filename:lineno. + + If lineno doesn't exist for the filename, return an error message. + The filename should be in canonical form. + 'b'Line %s:%d does not exist'u'Line %s:%d does not exist'b'Prune breakpoints for filename:lineno. + + A list of breakpoints is maintained in the Bdb instance and in + the Breakpoint class. If a breakpoint in the Bdb instance no + longer exists in the Breakpoint class, then it's removed from the + Bdb instance. + 'u'Prune breakpoints for filename:lineno. + + A list of breakpoints is maintained in the Bdb instance and in + the Breakpoint class. If a breakpoint in the Bdb instance no + longer exists in the Breakpoint class, then it's removed from the + Bdb instance. + 'b'Delete breakpoints for filename:lineno. + + If no breakpoints were set, return an error message. + 'u'Delete breakpoints for filename:lineno. + + If no breakpoints were set, return an error message. + 'b'There are no breakpoints in %s'u'There are no breakpoints in %s'b'There is no breakpoint at %s:%d'u'There is no breakpoint at %s:%d'b'Delete a breakpoint by its index in Breakpoint.bpbynumber. + + If arg is invalid, return an error message. + 'u'Delete a breakpoint by its index in Breakpoint.bpbynumber. + + If arg is invalid, return an error message. + 'b'Delete all breakpoints in filename. + + If none were set, return an error message. + 'u'Delete all breakpoints in filename. + + If none were set, return an error message. + 'b'Delete all existing breakpoints. + + If none were set, return an error message. + 'u'Delete all existing breakpoints. + + If none were set, return an error message. + 'b'There are no breakpoints'u'There are no breakpoints'b'Return a breakpoint by its index in Breakpoint.bybpnumber. + + For invalid arg values or if the breakpoint doesn't exist, + raise a ValueError. + 'u'Return a breakpoint by its index in Breakpoint.bybpnumber. + + For invalid arg values or if the breakpoint doesn't exist, + raise a ValueError. + 'b'Breakpoint number expected'u'Breakpoint number expected'b'Non-numeric breakpoint number %s'u'Non-numeric breakpoint number %s'b'Breakpoint number %d out of range'u'Breakpoint number %d out of range'b'Breakpoint %d already deleted'u'Breakpoint %d already deleted'b'Return True if there is a breakpoint for filename:lineno.'u'Return True if there is a breakpoint for filename:lineno.'b'Return all breakpoints for filename:lineno. + + If no breakpoints are set, return an empty list. + 'u'Return all breakpoints for filename:lineno. + + If no breakpoints are set, return an empty list. + 'b'Return all lines with breakpoints for filename. + + If no breakpoints are set, return an empty list. + 'u'Return all lines with breakpoints for filename. + + If no breakpoints are set, return an empty list. + 'b'Return all breakpoints that are set.'u'Return all breakpoints that are set.'b'Return a list of (frame, lineno) in a stack trace and a size. + + List starts with original calling frame, if there is one. + Size may be number of frames above or below f. + 'u'Return a list of (frame, lineno) in a stack trace and a size. + + List starts with original calling frame, if there is one. + Size may be number of frames above or below f. + 'b'Return a string with information about a stack entry. + + The stack entry frame_lineno is a (frame, lineno) tuple. The + return string contains the canonical filename, the function name + or '', the input arguments, the return value, and the + line of code (if it exists). + + 'u'Return a string with information about a stack entry. + + The stack entry frame_lineno is a (frame, lineno) tuple. The + return string contains the canonical filename, the function name + or '', the input arguments, the return value, and the + line of code (if it exists). + + 'b''u''b'__return__'u'__return__'b'->'u'->'b'Debug a statement executed via the exec() function. + + globals defaults to __main__.dict; locals defaults to globals. + 'u'Debug a statement executed via the exec() function. + + globals defaults to __main__.dict; locals defaults to globals. + 'b'Debug an expression executed via the eval() function. + + globals defaults to __main__.dict; locals defaults to globals. + 'u'Debug an expression executed via the eval() function. + + globals defaults to __main__.dict; locals defaults to globals. + 'b'For backwards-compatibility. Defers to run().'u'For backwards-compatibility. Defers to run().'b'Debug a single function call. + + Return the result of the function call. + 'u'Debug a single function call. + + Return the result of the function call. + 'b'descriptor 'runcall' of 'Bdb' object needs an argument'u'descriptor 'runcall' of 'Bdb' object needs an argument'b'func'b'Passing 'func' as keyword argument is deprecated'u'Passing 'func' as keyword argument is deprecated'b'runcall expected at least 1 positional argument, got %d'u'runcall expected at least 1 positional argument, got %d'b'($self, func, /, *args, **kwds)'u'($self, func, /, *args, **kwds)'b'Start debugging with a Bdb instance from the caller's frame.'u'Start debugging with a Bdb instance from the caller's frame.'b'Breakpoint class. + + Implements temporary breakpoints, ignore counts, disabling and + (re)-enabling, and conditionals. + + Breakpoints are indexed by number through bpbynumber and by + the (file, line) tuple using bplist. The former points to a + single instance of class Breakpoint. The latter points to a + list of such instances since there may be more than one + breakpoint per line. + + When creating a breakpoint, its associated filename should be + in canonical form. If funcname is defined, a breakpoint hit will be + counted when the first line of that function is executed. A + conditional breakpoint always counts a hit. + 'u'Breakpoint class. + + Implements temporary breakpoints, ignore counts, disabling and + (re)-enabling, and conditionals. + + Breakpoints are indexed by number through bpbynumber and by + the (file, line) tuple using bplist. The former points to a + single instance of class Breakpoint. The latter points to a + list of such instances since there may be more than one + breakpoint per line. + + When creating a breakpoint, its associated filename should be + in canonical form. If funcname is defined, a breakpoint hit will be + counted when the first line of that function is executed. A + conditional breakpoint always counts a hit. + 'b'Delete the breakpoint from the list associated to a file:line. + + If it is the last breakpoint in that position, it also deletes + the entry for the file:line. + 'u'Delete the breakpoint from the list associated to a file:line. + + If it is the last breakpoint in that position, it also deletes + the entry for the file:line. + 'b'Mark the breakpoint as enabled.'u'Mark the breakpoint as enabled.'b'Mark the breakpoint as disabled.'u'Mark the breakpoint as disabled.'b'Print the output of bpformat(). + + The optional out argument directs where the output is sent + and defaults to standard output. + 'u'Print the output of bpformat(). + + The optional out argument directs where the output is sent + and defaults to standard output. + 'b'Return a string with information about the breakpoint. + + The information includes the breakpoint number, temporary + status, file:line position, break condition, number of times to + ignore, and number of times hit. + + 'u'Return a string with information about the breakpoint. + + The information includes the breakpoint number, temporary + status, file:line position, break condition, number of times to + ignore, and number of times hit. + + 'b'del 'u'del 'b'keep 'u'keep 'b'yes 'u'yes 'b'no 'u'no 'b'%-4dbreakpoint %s at %s:%d'u'%-4dbreakpoint %s at %s:%d'b' + stop only if %s'u' + stop only if %s'b' + ignore next %d hits'u' + ignore next %d hits'b' + breakpoint already hit %d time%s'u' + breakpoint already hit %d time%s'b'Return a condensed description of the breakpoint.'u'Return a condensed description of the breakpoint.'b'breakpoint %s at %s:%s'u'breakpoint %s at %s:%s'b'Return True if break should happen here. + + Whether a break should happen depends on the way that b (the breakpoint) + was set. If it was set via line number, check if b.line is the same as + the one in the frame. If it was set via function name, check if this is + the right function and if it is on the first executable line. + 'u'Return True if break should happen here. + + Whether a break should happen depends on the way that b (the breakpoint) + was set. If it was set via line number, check if b.line is the same as + the one in the frame. If it was set via function name, check if this is + the right function and if it is on the first executable line. + 'b'Determine which breakpoint for this file:line is to be acted upon. + + Called only if we know there is a breakpoint at this location. Return + the breakpoint that was triggered and a boolean that indicates if it is + ok to delete a temporary breakpoint. Return (None, None) if there is no + matching breakpoint. + 'u'Determine which breakpoint for this file:line is to be acted upon. + + Called only if we know there is a breakpoint at this location. Return + the breakpoint that was triggered and a boolean that indicates if it is + ok to delete a temporary breakpoint. Return (None, None) if there is no + matching breakpoint. + 'b'???'u'???'b'+++ call'u'+++ call'b'+++'u'+++'b'+++ return'u'+++ return'b'+++ exception'u'+++ exception'b'foo('u'foo('b'bar returned'u'bar returned'b'bar('u'bar('b'import bdb; bdb.foo(10)'u'import bdb; bdb.foo(10)'u'bdb'u'binascii'binascii.Erroru'Incomplete.__weakref__'binascii.IncompleteIncompleteu'Conversion between binary data and ASCII'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/binascii.cpython-38-darwin.so'a2b_hexa2b_hqxa2b_qpa2b_uub2a_hexb2a_hqxb2a_qpb2a_uucrc32crc_hqxrlecode_hqxrledecode_hqxBisection algorithms.lohiInsert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the right of the rightmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e <= x, and all e in + a[i:] have e > x. So if x already appears in the list, a.insert(x) will + insert just after the rightmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + lo must be non-negativemidInsert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the left of the leftmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e < x, and all e in + a[i:] have e >= x. So if x already appears in the list, a.insert(x) will + insert just before the leftmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + bisectinsort# Overwrite above definitions with a fast C implementation# Create aliasesb'Bisection algorithms.'u'Bisection algorithms.'b'Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the right of the rightmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + 'u'Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the right of the rightmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + 'b'Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e <= x, and all e in + a[i:] have e > x. So if x already appears in the list, a.insert(x) will + insert just after the rightmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + 'u'Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e <= x, and all e in + a[i:] have e > x. So if x already appears in the list, a.insert(x) will + insert just after the rightmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + 'b'lo must be non-negative'u'lo must be non-negative'b'Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the left of the leftmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + 'u'Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the left of the leftmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + 'b'Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e < x, and all e in + a[i:] have e >= x. So if x already appears in the list, a.insert(x) will + insert just before the leftmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + 'u'Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e < x, and all e in + a[i:] have e >= x. So if x already appears in the list, a.insert(x) will + insert just before the leftmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + 'u'bisect'A bottom-up tree matching algorithm implementation meant to speed +up 2to3's matching process. After the tree patterns are reduced to +their rarest linear path, a linear Aho-Corasick automaton is +created. The linear automaton traverses the linear paths from the +leaves to the root of the AST and returns a set of nodes for further +matching. This reduces significantly the number of candidate nodes.George Boutsioukis pytreebtm_utilsreduce_treeBMNodeClass for a node of the Aho-Corasick automaton used in matchingtransition_tablefixerscontentBottomMatcherThe main matcher class. After instantiating the patterns should + be added using the add_fixer methodnodesRefactoringTooladd_fixerfixerReduces a fixer's pattern tree to a linear path and adds it + to the matcher(a common Aho-Corasick automaton). The fixer is + appended on the matching states and called when they are + reachedpattern_treeget_linear_subpatternlinearmatch_nodesmatch_nodeRecursively adds a linear pattern to the AC automatonalternativeend_nodesnext_nodeleavesThe main interface with the bottom matcher. The tree is + traversed from the bottom using the constructed + automaton. Nodes are only checked once as the tree is + retraversed. When the automaton fails, we give it one more + shot(in case the above tree matches as a whole with the + rejected leaf), then we break for the next leaf. There is the + special case of multiple arguments(see code comments) where we + recheck the nodes + + Args: + The leaves of the AST tree to be matched + + Returns: + A dictionary of node matches with fixers as the keys + current_ac_nodeleafcurrent_ast_nodewas_checkedLeafnode_tokenprint_acPrints a graphviz diagram of the BM automaton(for debugging)digraph g{print_nodesubnode_keysubnode%d -> %d [label=%s] //%stype_repr_type_reprstype_numpygrampython_symbols#print("adding pattern", pattern, "to", start)#print("empty pattern")#alternatives#print("alternatives")#add all alternatives, and add the rest of the pattern#to each end node#single token#not last#transition did not exist, create new#transition exists already, follow# multiple statements, recheck#name#token matches#matching failed, reset automaton#the rest of the tree upwards has been checked, next leaf#recheck the rejected node once from the root# taken from pytree.py for debugging; only used by print_ac# printing tokens is possible but not as useful# from .pgen2 import token // token.__dict__.items():b'A bottom-up tree matching algorithm implementation meant to speed +up 2to3's matching process. After the tree patterns are reduced to +their rarest linear path, a linear Aho-Corasick automaton is +created. The linear automaton traverses the linear paths from the +leaves to the root of the AST and returns a set of nodes for further +matching. This reduces significantly the number of candidate nodes.'u'A bottom-up tree matching algorithm implementation meant to speed +up 2to3's matching process. After the tree patterns are reduced to +their rarest linear path, a linear Aho-Corasick automaton is +created. The linear automaton traverses the linear paths from the +leaves to the root of the AST and returns a set of nodes for further +matching. This reduces significantly the number of candidate nodes.'b'George Boutsioukis 'u'George Boutsioukis 'b'Class for a node of the Aho-Corasick automaton used in matching'u'Class for a node of the Aho-Corasick automaton used in matching'b'The main matcher class. After instantiating the patterns should + be added using the add_fixer method'u'The main matcher class. After instantiating the patterns should + be added using the add_fixer method'b'RefactoringTool'u'RefactoringTool'b'Reduces a fixer's pattern tree to a linear path and adds it + to the matcher(a common Aho-Corasick automaton). The fixer is + appended on the matching states and called when they are + reached'u'Reduces a fixer's pattern tree to a linear path and adds it + to the matcher(a common Aho-Corasick automaton). The fixer is + appended on the matching states and called when they are + reached'b'Recursively adds a linear pattern to the AC automaton'u'Recursively adds a linear pattern to the AC automaton'b'The main interface with the bottom matcher. The tree is + traversed from the bottom using the constructed + automaton. Nodes are only checked once as the tree is + retraversed. When the automaton fails, we give it one more + shot(in case the above tree matches as a whole with the + rejected leaf), then we break for the next leaf. There is the + special case of multiple arguments(see code comments) where we + recheck the nodes + + Args: + The leaves of the AST tree to be matched + + Returns: + A dictionary of node matches with fixers as the keys + 'u'The main interface with the bottom matcher. The tree is + traversed from the bottom using the constructed + automaton. Nodes are only checked once as the tree is + retraversed. When the automaton fails, we give it one more + shot(in case the above tree matches as a whole with the + rejected leaf), then we break for the next leaf. There is the + special case of multiple arguments(see code comments) where we + recheck the nodes + + Args: + The leaves of the AST tree to be matched + + Returns: + A dictionary of node matches with fixers as the keys + 'b'Prints a graphviz diagram of the BM automaton(for debugging)'u'Prints a graphviz diagram of the BM automaton(for debugging)'b'digraph g{'u'digraph g{'b'%d -> %d [label=%s] //%s'u'%d -> %d [label=%s] //%s'u'lib2to3.btm_matcher'u'btm_matcher'Utility functions used by the btm_matcher modulepgen2grammarpattern_symbolssymspysymsopmaptokenstoken_labelsTYPE_ANYTYPE_ALTERNATIVESTYPE_GROUPMinNodeThis class serves as an intermediate representation of the + pattern tree during the conversion to sets of leaf-to-root + subpatternsalternativesleaf_to_rootInternal method. Returns a characteristic path of the + pattern tree. This method must be run for all leaves until the + linear subpatterns are merged into a singlesubpget_characteristic_subpatternNAMEDrives the leaf_to_root method. The reason that + leaf_to_root must be run multiple times is because we need to + reject 'group' matches; for example the alternative form + (a | b c) creates a group [b c] that needs to be matched. Since + matching multiple linear patterns overcomes the automaton's + capabilities, leaf_to_root merges each group into a single + choice based on 'characteristic'ity, + + i.e. (a|b c) -> (a|b) if b more characteristic than c + + Returns: The most 'characteristic'(as defined by + get_characteristic_subpattern) path for the compiled pattern + tree. + Generator that returns the leaves of the tree + Internal function. Reduces a compiled pattern tree to an + intermediate representation suitable for feeding the + automaton. This also trims off any optional pattern elements(like + [a], a*). + AlternativesreducedAlternativeUnitdetails_nodealternatives_nodehas_repeaterrepeater_nodehas_variable_nameDetailsRepeatername_leafSTRINGsubpatternsPicks the most characteristic from a list of linear patterns + Current order used is: + names > common_names > common_chars + subpatterns_with_namessubpatterns_with_common_namesforifnotcommon_namessubpatterns_with_common_chars[]().,:common_charssubpatternrec_testtest_funcTests test_func on all items of sequence and items of included + sub-iterables#last alternative#probably should check the number of leaves#in case of type=name, use the name instead#switch on the node type#skip#2 cases#just a single 'Alternative', skip this node#real alternatives#skip odd children('|' tokens)# delete the group if all of the children were reduced to None#skip parentheses#skip whole unit if its optional# variable name#skip variable name#skip variable name, '='# skip parenthesis#set node type#(python) non-name or wildcard#(python) name or character; remove the apostrophes from#the string value#handle repeaters#reduce to None#reduce to a single occurrence i.e. do nothing#TODO: handle {min, max} repeaters#add children#skip '<', '>' markers# first pick out the ones containing variable names# of the remaining subpatterns pick out the longest oneb'Utility functions used by the btm_matcher module'u'Utility functions used by the btm_matcher module'b'This class serves as an intermediate representation of the + pattern tree during the conversion to sets of leaf-to-root + subpatterns'u'This class serves as an intermediate representation of the + pattern tree during the conversion to sets of leaf-to-root + subpatterns'b'Internal method. Returns a characteristic path of the + pattern tree. This method must be run for all leaves until the + linear subpatterns are merged into a single'u'Internal method. Returns a characteristic path of the + pattern tree. This method must be run for all leaves until the + linear subpatterns are merged into a single'b'Drives the leaf_to_root method. The reason that + leaf_to_root must be run multiple times is because we need to + reject 'group' matches; for example the alternative form + (a | b c) creates a group [b c] that needs to be matched. Since + matching multiple linear patterns overcomes the automaton's + capabilities, leaf_to_root merges each group into a single + choice based on 'characteristic'ity, + + i.e. (a|b c) -> (a|b) if b more characteristic than c + + Returns: The most 'characteristic'(as defined by + get_characteristic_subpattern) path for the compiled pattern + tree. + 'u'Drives the leaf_to_root method. The reason that + leaf_to_root must be run multiple times is because we need to + reject 'group' matches; for example the alternative form + (a | b c) creates a group [b c] that needs to be matched. Since + matching multiple linear patterns overcomes the automaton's + capabilities, leaf_to_root merges each group into a single + choice based on 'characteristic'ity, + + i.e. (a|b c) -> (a|b) if b more characteristic than c + + Returns: The most 'characteristic'(as defined by + get_characteristic_subpattern) path for the compiled pattern + tree. + 'b'Generator that returns the leaves of the tree'u'Generator that returns the leaves of the tree'b' + Internal function. Reduces a compiled pattern tree to an + intermediate representation suitable for feeding the + automaton. This also trims off any optional pattern elements(like + [a], a*). + 'u' + Internal function. Reduces a compiled pattern tree to an + intermediate representation suitable for feeding the + automaton. This also trims off any optional pattern elements(like + [a], a*). + 'b'any'u'any'b'Picks the most characteristic from a list of linear patterns + Current order used is: + names > common_names > common_chars + 'u'Picks the most characteristic from a list of linear patterns + Current order used is: + names > common_names > common_chars + 'b'for'u'for'b'if'u'if'b'not'u'not'b'None'u'None'b'[]().,:'u'[]().,:'b'Tests test_func on all items of sequence and items of included + sub-iterables'u'Tests test_func on all items of sequence and items of included + sub-iterables'u'lib2to3.btm_utils'u'btm_utils'Interface to the libbzip2 compression library. + +This module provides a file interface, classes for incremental +(de)compression, and functions for one-shot (de)compression. +BZ2FileNadeem Vawda _builtin_open_compression_MODE_CLOSED_MODE_READ_MODE_WRITE_sentinelA file object providing transparent bzip2 (de)compression. + + A BZ2File can act as a wrapper for an existing file object, or refer + directly to a named file on disk. + + Note that BZ2File provides a *binary* file interface - data read is + returned as bytes, and data to be written should be given as bytes. + bufferingcompresslevelOpen a bzip2-compressed file. + + If filename is a str, bytes, or PathLike object, it gives the + name of the file to be opened. Otherwise, it should be a file + object, which will be used to read or write the compressed data. + + mode can be 'r' for reading (default), 'w' for (over)writing, + 'x' for creating exclusively, or 'a' for appending. These can + equivalently be given as 'rb', 'wb', 'xb', and 'ab'. + + buffering is ignored since Python 3.0. Its use is deprecated. + + If mode is 'w', 'x' or 'a', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 (default) produces the most compression. + + If mode is 'r', the input file may be the concatenation of + multiple compressed streams. + _closefpUse of 'buffering' argument is deprecated and ignored since Python 3.0."Use of 'buffering' argument is deprecated and ignored ""since Python 3.0."compresslevel must be between 1 and 9mode_code_compressorxbabInvalid mode: %rPathLikefilename must be a str, bytes, file or PathLike object_bufferFlush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + True if this file is closed.Return the file descriptor for the underlying file.Return whether the file supports seeking.Return whether the file was opened for reading.Return whether the file was opened for writing.Return buffered data without advancing the file position. + + Always returns at least one byte of data, unless at EOF. + The exact number of bytes returned is unspecified. + Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b'' if the file is already at EOF. + Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b'' if the file is at EOF. + Read bytes into b. + + Returns the number of bytes read (0 for EOF). + Read a line of uncompressed bytes from the file. + + The terminating newline (if present) is retained. If size is + non-negative, no more than size bytes will be read (in which + case the line may be incomplete). Returns b'' if already at EOF. + Integer argument expectedRead a list of lines of uncompressed bytes from the file. + + size can be specified to control the number of lines read: no + further lines will be read once the total size of the lines read + so far equals or exceeds size. + Write a byte string to the file. + + Returns the number of uncompressed bytes written, which is + always len(data). Note that due to buffering, the file on disk + may not reflect the data written until close() is called. + compressedWrite a sequence of byte strings to the file. + + Returns the number of uncompressed bytes written. + seq can be any iterable yielding byte strings. + + Line separators are not added between the written byte strings. + Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Values for whence are: + + 0: start of stream (default); offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + Open a bzip2-compressed file in binary or text mode. + + The filename argument can be an actual filename (a str, bytes, or + PathLike object), or an existing file object to read from or write + to. + + The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or + "ab" for binary mode, or "rt", "wt", "xt" or "at" for text mode. + The default mode is "rb", and the default compresslevel is 9. + + For binary mode, this function is equivalent to the BZ2File + constructor: BZ2File(filename, mode, compresslevel). In this case, + the encoding, errors and newline arguments must not be provided. + + For text mode, a BZ2File object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error + handling behavior, and line ending(s). + + Argument 'encoding' not supported in binary modeArgument 'errors' not supported in binary modeArgument 'newline' not supported in binary modebz_modebinary_fileCompress a block of data. + + compresslevel, if given, must be a number between 1 and 9. + + For incremental compression, use a BZ2Compressor object instead. + compDecompress a block of data. + + For incremental decompression, use a BZ2Decompressor object instead. + decompCompressed data ended before the end-of-stream marker was reached"Compressed data ended before the "# Value 2 no longer used# This lock must be recursive, so that BufferedIOBase's# writelines() does not deadlock.# Relies on the undocumented fact that BufferedReader.peek()# always returns at least one byte (except at EOF), independent# of the value of n# Leftover data is not a valid bzip2 stream; ignore it.# Error on the first iteration; bail out.b'Interface to the libbzip2 compression library. + +This module provides a file interface, classes for incremental +(de)compression, and functions for one-shot (de)compression. +'u'Interface to the libbzip2 compression library. + +This module provides a file interface, classes for incremental +(de)compression, and functions for one-shot (de)compression. +'b'BZ2File'u'BZ2File'b'BZ2Compressor'u'BZ2Compressor'b'BZ2Decompressor'u'BZ2Decompressor'b'open'u'open'b'compress'u'compress'b'decompress'u'decompress'b'Nadeem Vawda 'u'Nadeem Vawda 'b'A file object providing transparent bzip2 (de)compression. + + A BZ2File can act as a wrapper for an existing file object, or refer + directly to a named file on disk. + + Note that BZ2File provides a *binary* file interface - data read is + returned as bytes, and data to be written should be given as bytes. + 'u'A file object providing transparent bzip2 (de)compression. + + A BZ2File can act as a wrapper for an existing file object, or refer + directly to a named file on disk. + + Note that BZ2File provides a *binary* file interface - data read is + returned as bytes, and data to be written should be given as bytes. + 'b'Open a bzip2-compressed file. + + If filename is a str, bytes, or PathLike object, it gives the + name of the file to be opened. Otherwise, it should be a file + object, which will be used to read or write the compressed data. + + mode can be 'r' for reading (default), 'w' for (over)writing, + 'x' for creating exclusively, or 'a' for appending. These can + equivalently be given as 'rb', 'wb', 'xb', and 'ab'. + + buffering is ignored since Python 3.0. Its use is deprecated. + + If mode is 'w', 'x' or 'a', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 (default) produces the most compression. + + If mode is 'r', the input file may be the concatenation of + multiple compressed streams. + 'u'Open a bzip2-compressed file. + + If filename is a str, bytes, or PathLike object, it gives the + name of the file to be opened. Otherwise, it should be a file + object, which will be used to read or write the compressed data. + + mode can be 'r' for reading (default), 'w' for (over)writing, + 'x' for creating exclusively, or 'a' for appending. These can + equivalently be given as 'rb', 'wb', 'xb', and 'ab'. + + buffering is ignored since Python 3.0. Its use is deprecated. + + If mode is 'w', 'x' or 'a', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 (default) produces the most compression. + + If mode is 'r', the input file may be the concatenation of + multiple compressed streams. + 'b'Use of 'buffering' argument is deprecated and ignored since Python 3.0.'u'Use of 'buffering' argument is deprecated and ignored since Python 3.0.'b'compresslevel must be between 1 and 9'u'compresslevel must be between 1 and 9'b'xb'u'xb'b'ab'u'ab'b'Invalid mode: %r'u'Invalid mode: %r'b'filename must be a str, bytes, file or PathLike object'u'filename must be a str, bytes, file or PathLike object'b'Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + 'u'Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + 'b'True if this file is closed.'u'True if this file is closed.'b'Return the file descriptor for the underlying file.'u'Return the file descriptor for the underlying file.'b'Return whether the file supports seeking.'u'Return whether the file supports seeking.'b'Return whether the file was opened for reading.'u'Return whether the file was opened for reading.'b'Return whether the file was opened for writing.'u'Return whether the file was opened for writing.'b'Return buffered data without advancing the file position. + + Always returns at least one byte of data, unless at EOF. + The exact number of bytes returned is unspecified. + 'u'Return buffered data without advancing the file position. + + Always returns at least one byte of data, unless at EOF. + The exact number of bytes returned is unspecified. + 'b'Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b'' if the file is already at EOF. + 'u'Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b'' if the file is already at EOF. + 'b'Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b'' if the file is at EOF. + 'u'Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b'' if the file is at EOF. + 'b'Read bytes into b. + + Returns the number of bytes read (0 for EOF). + 'u'Read bytes into b. + + Returns the number of bytes read (0 for EOF). + 'b'Read a line of uncompressed bytes from the file. + + The terminating newline (if present) is retained. If size is + non-negative, no more than size bytes will be read (in which + case the line may be incomplete). Returns b'' if already at EOF. + 'u'Read a line of uncompressed bytes from the file. + + The terminating newline (if present) is retained. If size is + non-negative, no more than size bytes will be read (in which + case the line may be incomplete). Returns b'' if already at EOF. + 'b'__index__'u'__index__'b'Integer argument expected'u'Integer argument expected'b'Read a list of lines of uncompressed bytes from the file. + + size can be specified to control the number of lines read: no + further lines will be read once the total size of the lines read + so far equals or exceeds size. + 'u'Read a list of lines of uncompressed bytes from the file. + + size can be specified to control the number of lines read: no + further lines will be read once the total size of the lines read + so far equals or exceeds size. + 'b'Write a byte string to the file. + + Returns the number of uncompressed bytes written, which is + always len(data). Note that due to buffering, the file on disk + may not reflect the data written until close() is called. + 'u'Write a byte string to the file. + + Returns the number of uncompressed bytes written, which is + always len(data). Note that due to buffering, the file on disk + may not reflect the data written until close() is called. + 'b'Write a sequence of byte strings to the file. + + Returns the number of uncompressed bytes written. + seq can be any iterable yielding byte strings. + + Line separators are not added between the written byte strings. + 'u'Write a sequence of byte strings to the file. + + Returns the number of uncompressed bytes written. + seq can be any iterable yielding byte strings. + + Line separators are not added between the written byte strings. + 'b'Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Values for whence are: + + 0: start of stream (default); offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + 'u'Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Values for whence are: + + 0: start of stream (default); offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + 'b'Open a bzip2-compressed file in binary or text mode. + + The filename argument can be an actual filename (a str, bytes, or + PathLike object), or an existing file object to read from or write + to. + + The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or + "ab" for binary mode, or "rt", "wt", "xt" or "at" for text mode. + The default mode is "rb", and the default compresslevel is 9. + + For binary mode, this function is equivalent to the BZ2File + constructor: BZ2File(filename, mode, compresslevel). In this case, + the encoding, errors and newline arguments must not be provided. + + For text mode, a BZ2File object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error + handling behavior, and line ending(s). + + 'u'Open a bzip2-compressed file in binary or text mode. + + The filename argument can be an actual filename (a str, bytes, or + PathLike object), or an existing file object to read from or write + to. + + The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or + "ab" for binary mode, or "rt", "wt", "xt" or "at" for text mode. + The default mode is "rb", and the default compresslevel is 9. + + For binary mode, this function is equivalent to the BZ2File + constructor: BZ2File(filename, mode, compresslevel). In this case, + the encoding, errors and newline arguments must not be provided. + + For text mode, a BZ2File object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error + handling behavior, and line ending(s). + + 'b'Argument 'encoding' not supported in binary mode'u'Argument 'encoding' not supported in binary mode'b'Argument 'errors' not supported in binary mode'u'Argument 'errors' not supported in binary mode'b'Argument 'newline' not supported in binary mode'u'Argument 'newline' not supported in binary mode'b'Compress a block of data. + + compresslevel, if given, must be a number between 1 and 9. + + For incremental compression, use a BZ2Compressor object instead. + 'u'Compress a block of data. + + compresslevel, if given, must be a number between 1 and 9. + + For incremental compression, use a BZ2Compressor object instead. + 'b'Decompress a block of data. + + For incremental decompression, use a BZ2Decompressor object instead. + 'u'Decompress a block of data. + + For incremental decompression, use a BZ2Decompressor object instead. + 'b'Compressed data ended before the end-of-stream marker was reached'u'Compressed data ended before the end-of-stream marker was reached'Calendar printing functions + +Note when comparing these calendars to the ones printed by cal(1): By +default, these calendars have Monday as the first day of the week, and +Sunday as the last (the European convention). Use setfirstweekday() to +set the first day of the week (0=Monday, 6=Sunday).IllegalMonthErrorIllegalWeekdayErrorsetfirstweekdayfirstweekdayisleapleapdaysmonthrangemonthcalendarprmonthprcalmonth_namemonth_abbrday_nameday_abbrCalendarTextCalendarHTMLCalendarLocaleTextCalendarLocaleHTMLCalendarweekheaderbad month number %r; must be 1-12bad weekday number %r; must be 0 (Monday) to 6 (Sunday)JanuaryFebruarymdays_localized_month2001_monthsfuncs_localized_day_days%a%BMONDAYTUESDAYWEDNESDAYTHURSDAYFRIDAYSATURDAYSUNDAYReturn True for leap years, False for non-leap years.Return number of leap years in range [y1, y2). + Assume y1 <= y2.Return weekday (0-6 ~ Mon-Sun) for year, month (1-12), day (1-31).Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for + year, month.day1ndays_monthlen_prevmonth_nextmonth + Base calendar class. This class doesn't do any formatting. It simply + provides data to subclasses. + getfirstweekday_firstweekdayiterweekdays + Return an iterator for one week of weekday numbers starting with the + configured first one. + itermonthdates + Return an iterator for one month. The iterator will yield datetime.date + values and will always iterate through complete weeks, so it will yield + dates outside the specified month. + itermonthdays3itermonthdays + Like itermonthdates(), but will yield day numbers. For days outside + the specified month the day number is 0. + days_beforedays_afteritermonthdays2 + Like itermonthdates(), but will yield (day number, weekday number) + tuples. For days outside the specified month the day number is 0. + + Like itermonthdates(), but will yield (year, month, day) tuples. Can be + used for dates outside of datetime.date range. + itermonthdays4 + Like itermonthdates(), but will yield (year, month, day, day_of_week) tuples. + Can be used for dates outside of datetime.date range. + monthdatescalendar + Return a matrix (list of lists) representing a month's calendar. + Each row represents a week; week entries are datetime.date values. + datesmonthdays2calendar + Return a matrix representing a month's calendar. + Each row represents a week; week entries are + (day number, weekday number) tuples. Day numbers outside this month + are zero. + monthdayscalendar + Return a matrix representing a month's calendar. + Each row represents a week; days outside this month are zero. + yeardatescalendar + Return the data for the specified year ready for formatting. The return + value is a list of month rows. Each month row contains up to width months. + Each month contains between 4 and 6 weeks and each week contains 1-7 + days. Days are datetime.date objects. + monthsyeardays2calendar + Return the data for the specified year ready for formatting (similar to + yeardatescalendar()). Entries in the week lists are + (day number, weekday number) tuples. Day numbers outside this month are + zero. + yeardayscalendar + Return the data for the specified year ready for formatting (similar to + yeardatescalendar()). Entries in the week lists are day numbers. + Day numbers outside this month are zero. + + Subclass of Calendar that outputs a calendar as a simple plain text + similar to the UNIX program cal. + prweektheweek + Print a single week (no newline). + formatweekformatday + Returns a formatted day. + %2i + Returns a single week in a string (no newline). + wdformatweekday + Returns a formatted week day name. + formatweekheader + Return a header for a week. + formatmonthnametheyearthemonthwithyear + Return a formatted month name. + %s %r + Print a month's calendar. + formatmonth + Return a month's calendar string (multi-line). + weekformatyear + Returns a year's calendar as a multi-line string. + colwidthformatstringcalweekspryearPrint a year's calendar. + This calendar returns complete HTML pages. + cssclassescssclasses_weekday_headnodaycssclass_nodaycssclass_month_headcssclass_monthcssclass_year_headcssclass_year + Return a day as a table cell. +  %d + Return a complete week as a table row. + %s + Return a weekday name as a table header. + %s + Return a header for a week as a table row. + + Return a month name as a table row. + %s + Return a formatted month as a table. +
+ Return a formatted year as a table of tables. + %sformatyearpagecalendar.csscss + Return a formatted year as a complete HTML page. + + + + + + +Calendar for %d + + + + +different_localegetlocaleoldlocale + This class can be passed a locale name in the constructor and will return + month and weekday names in the specified locale. If this locale includes + an encoding all strings containing month and weekday names will be returned + as unicode. + %s_colwidth_spacingcolsspacingPrints multi-column formatting for year calendarsReturns a string formatted from n strings, centered within n columns.1970EPOCH_EPOCH_ORDUnrelated but handy function to calculate Unix timestamp from GMT.hoursminutesargparsetext only argumentstextgrouphtml only argumentshtmlgroup-w--widthwidth of date column (default 2)-l--linesnumber of lines for each week (default 1)-s--spacingspacing between months (default 6)-m--monthsmonths per row (default 3)-c--cssCSS to use for page-L--localelocale to be used from month and weekday names--encodingencoding to use for output--typeoutput type (text or html)year number (1-9999)month number (1-12, text only)if --locale is specified --encoding is requiredoptdictincorrect number of arguments# Exception raised for bad input (with string parameter for details)# Exceptions raised for bad input# Constants for months referenced later# Number of days per month (except for February in leap years)# This module used to have hard-coded lists of day and month names, as# English strings. The classes following emulate a read-only version of# that, but supply localized names. Note that the values are computed# fresh on each call, in case the user changes locale between calls.# January 1, 2001, was a Monday.# Full and abbreviated names of weekdays# Full and abbreviated names of months (1-based arrays!!!)# Constants for weekdays# 0 = Monday, 6 = Sunday# right-align single-digit days# months in this row# max number of weeks for this row# CSS classes for the day s# CSS classes for the day s# CSS class for the days before and after current month# CSS class for the month's head# CSS class for the month# CSS class for the year's table head# CSS class for the whole year table# day outside month# Support for old module level interface# Spacing of month columns for multi-column year calendar# Amount printed by prweek()# Number of spaces between columnsb'Calendar printing functions + +Note when comparing these calendars to the ones printed by cal(1): By +default, these calendars have Monday as the first day of the week, and +Sunday as the last (the European convention). Use setfirstweekday() to +set the first day of the week (0=Monday, 6=Sunday).'u'Calendar printing functions + +Note when comparing these calendars to the ones printed by cal(1): By +default, these calendars have Monday as the first day of the week, and +Sunday as the last (the European convention). Use setfirstweekday() to +set the first day of the week (0=Monday, 6=Sunday).'b'IllegalMonthError'u'IllegalMonthError'b'IllegalWeekdayError'u'IllegalWeekdayError'b'setfirstweekday'u'setfirstweekday'b'firstweekday'u'firstweekday'b'isleap'u'isleap'b'leapdays'u'leapdays'b'weekday'u'weekday'b'monthrange'u'monthrange'b'monthcalendar'u'monthcalendar'b'prmonth'u'prmonth'b'month'u'month'b'prcal'u'prcal'b'calendar'u'calendar'b'timegm'u'timegm'b'month_name'u'month_name'b'month_abbr'u'month_abbr'b'day_name'u'day_name'b'day_abbr'u'day_abbr'b'Calendar'u'Calendar'b'TextCalendar'u'TextCalendar'b'HTMLCalendar'u'HTMLCalendar'b'LocaleTextCalendar'u'LocaleTextCalendar'b'LocaleHTMLCalendar'u'LocaleHTMLCalendar'b'weekheader'u'weekheader'b'bad month number %r; must be 1-12'u'bad month number %r; must be 1-12'b'bad weekday number %r; must be 0 (Monday) to 6 (Sunday)'u'bad weekday number %r; must be 0 (Monday) to 6 (Sunday)'b'%a'u'%a'b'%B'u'%B'b'Return True for leap years, False for non-leap years.'u'Return True for leap years, False for non-leap years.'b'Return number of leap years in range [y1, y2). + Assume y1 <= y2.'u'Return number of leap years in range [y1, y2). + Assume y1 <= y2.'b'Return weekday (0-6 ~ Mon-Sun) for year, month (1-12), day (1-31).'u'Return weekday (0-6 ~ Mon-Sun) for year, month (1-12), day (1-31).'b'Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for + year, month.'u'Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for + year, month.'b' + Base calendar class. This class doesn't do any formatting. It simply + provides data to subclasses. + 'u' + Base calendar class. This class doesn't do any formatting. It simply + provides data to subclasses. + 'b' + Return an iterator for one week of weekday numbers starting with the + configured first one. + 'u' + Return an iterator for one week of weekday numbers starting with the + configured first one. + 'b' + Return an iterator for one month. The iterator will yield datetime.date + values and will always iterate through complete weeks, so it will yield + dates outside the specified month. + 'u' + Return an iterator for one month. The iterator will yield datetime.date + values and will always iterate through complete weeks, so it will yield + dates outside the specified month. + 'b' + Like itermonthdates(), but will yield day numbers. For days outside + the specified month the day number is 0. + 'u' + Like itermonthdates(), but will yield day numbers. For days outside + the specified month the day number is 0. + 'b' + Like itermonthdates(), but will yield (day number, weekday number) + tuples. For days outside the specified month the day number is 0. + 'u' + Like itermonthdates(), but will yield (day number, weekday number) + tuples. For days outside the specified month the day number is 0. + 'b' + Like itermonthdates(), but will yield (year, month, day) tuples. Can be + used for dates outside of datetime.date range. + 'u' + Like itermonthdates(), but will yield (year, month, day) tuples. Can be + used for dates outside of datetime.date range. + 'b' + Like itermonthdates(), but will yield (year, month, day, day_of_week) tuples. + Can be used for dates outside of datetime.date range. + 'u' + Like itermonthdates(), but will yield (year, month, day, day_of_week) tuples. + Can be used for dates outside of datetime.date range. + 'b' + Return a matrix (list of lists) representing a month's calendar. + Each row represents a week; week entries are datetime.date values. + 'u' + Return a matrix (list of lists) representing a month's calendar. + Each row represents a week; week entries are datetime.date values. + 'b' + Return a matrix representing a month's calendar. + Each row represents a week; week entries are + (day number, weekday number) tuples. Day numbers outside this month + are zero. + 'u' + Return a matrix representing a month's calendar. + Each row represents a week; week entries are + (day number, weekday number) tuples. Day numbers outside this month + are zero. + 'b' + Return a matrix representing a month's calendar. + Each row represents a week; days outside this month are zero. + 'u' + Return a matrix representing a month's calendar. + Each row represents a week; days outside this month are zero. + 'b' + Return the data for the specified year ready for formatting. The return + value is a list of month rows. Each month row contains up to width months. + Each month contains between 4 and 6 weeks and each week contains 1-7 + days. Days are datetime.date objects. + 'u' + Return the data for the specified year ready for formatting. The return + value is a list of month rows. Each month row contains up to width months. + Each month contains between 4 and 6 weeks and each week contains 1-7 + days. Days are datetime.date objects. + 'b' + Return the data for the specified year ready for formatting (similar to + yeardatescalendar()). Entries in the week lists are + (day number, weekday number) tuples. Day numbers outside this month are + zero. + 'u' + Return the data for the specified year ready for formatting (similar to + yeardatescalendar()). Entries in the week lists are + (day number, weekday number) tuples. Day numbers outside this month are + zero. + 'b' + Return the data for the specified year ready for formatting (similar to + yeardatescalendar()). Entries in the week lists are day numbers. + Day numbers outside this month are zero. + 'u' + Return the data for the specified year ready for formatting (similar to + yeardatescalendar()). Entries in the week lists are day numbers. + Day numbers outside this month are zero. + 'b' + Subclass of Calendar that outputs a calendar as a simple plain text + similar to the UNIX program cal. + 'u' + Subclass of Calendar that outputs a calendar as a simple plain text + similar to the UNIX program cal. + 'b' + Print a single week (no newline). + 'u' + Print a single week (no newline). + 'b' + Returns a formatted day. + 'u' + Returns a formatted day. + 'b'%2i'u'%2i'b' + Returns a single week in a string (no newline). + 'u' + Returns a single week in a string (no newline). + 'b' + Returns a formatted week day name. + 'u' + Returns a formatted week day name. + 'b' + Return a header for a week. + 'u' + Return a header for a week. + 'b' + Return a formatted month name. + 'u' + Return a formatted month name. + 'b'%s %r'u'%s %r'b' + Print a month's calendar. + 'u' + Print a month's calendar. + 'b' + Return a month's calendar string (multi-line). + 'u' + Return a month's calendar string (multi-line). + 'b' + Returns a year's calendar as a multi-line string. + 'u' + Returns a year's calendar as a multi-line string. + 'b'Print a year's calendar.'u'Print a year's calendar.'b' + This calendar returns complete HTML pages. + 'u' + This calendar returns complete HTML pages. + 'b'noday'u'noday'b'year'u'year'b' + Return a day as a table cell. + 'u' + Return a day as a table cell. + 'b' 'u' 'b'%d'u'%d'b' + Return a complete week as a table row. + 'u' + Return a complete week as a table row. + 'b'%s'u'%s'b' + Return a weekday name as a table header. + 'u' + Return a weekday name as a table header. + 'b'%s'u'%s'b' + Return a header for a week as a table row. + 'u' + Return a header for a week as a table row. + 'b' + Return a month name as a table row. + 'u' + Return a month name as a table row. + 'b'%s'u'%s'b' + Return a formatted month as a table. + 'u' + Return a formatted month as a table. + 'b''u'
'b'
'u''b' + Return a formatted year as a table of tables. + 'u' + Return a formatted year as a table of tables. + 'b'%s'u'%s'b''u''b''u''b''u''b''u''b'calendar.css'u'calendar.css'b' + Return a formatted year as a complete HTML page. + 'u' + Return a formatted year as a complete HTML page. + 'b' +'u' +'b' +'u' +'b' +'u' +'b' +'u' +'b' +'u' +'b' +'u' +'b'Calendar for %d +'u'Calendar for %d +'b' +'u' +'b' +'u' +'b' +'u' +'b' +'u' +'b' + This class can be passed a locale name in the constructor and will return + month and weekday names in the specified locale. If this locale includes + an encoding all strings containing month and weekday names will be returned + as unicode. + 'u' + This class can be passed a locale name in the constructor and will return + month and weekday names in the specified locale. If this locale includes + an encoding all strings containing month and weekday names will be returned + as unicode. + 'b'%s'u'%s'b'Prints multi-column formatting for year calendars'u'Prints multi-column formatting for year calendars'b'Returns a string formatted from n strings, centered within n columns.'u'Returns a string formatted from n strings, centered within n columns.'b'Unrelated but handy function to calculate Unix timestamp from GMT.'u'Unrelated but handy function to calculate Unix timestamp from GMT.'b'text only arguments'u'text only arguments'b'html only arguments'u'html only arguments'b'-w'u'-w'b'--width'u'--width'b'width of date column (default 2)'u'width of date column (default 2)'b'-l'u'-l'b'--lines'u'--lines'b'number of lines for each week (default 1)'u'number of lines for each week (default 1)'b'-s'u'-s'b'--spacing'u'--spacing'b'spacing between months (default 6)'u'spacing between months (default 6)'b'-m'u'-m'b'--months'u'--months'b'months per row (default 3)'u'months per row (default 3)'b'-c'b'--css'u'--css'b'CSS to use for page'u'CSS to use for page'b'-L'u'-L'b'--locale'u'--locale'b'locale to be used from month and weekday names'u'locale to be used from month and weekday names'b'--encoding'u'--encoding'b'encoding to use for output'u'encoding to use for output'b'--type'u'--type'b'output type (text or html)'u'output type (text or html)'b'year number (1-9999)'u'year number (1-9999)'b'month number (1-12, text only)'u'month number (1-12, text only)'b'if --locale is specified --encoding is required'u'if --locale is specified --encoding is required'b'incorrect number of arguments'u'incorrect number of arguments'Test case implementationdifflibpprintstrclasssafe_repr_count_diff_all_purpose_count_diff_hashable_common_shorten_repr_subtest_msg_sentinel +Diff is %s characters long. Set self.maxDiff to None to see it.'\nDiff is %s characters long. ''Set self.maxDiff to None to see it.'DIFF_OMITTED + Raise this exception in a test to skip it. + + Usually you can use TestCase.skipTest() or one of the skipping decorators + instead of raising this directly. + _ShouldStop + The test should stop. + _UnexpectedSuccess + The test was supposed to fail, but it didn't! + _Outcomeexpecting_failureaddSubTestresult_supports_subtestssuccesstestPartExecutorisTestold_success_module_cleanupsSame as addCleanup, except the cleanup items are called even if + setUpModule fails (unlike tearDownModule).doModuleCleanupsExecute all module cleanup functions. Normally called for you after + tearDownModule. + Unconditionally skip a test. + test_itemskip_wrapper__unittest_skip____unittest_skip_why__ + Skip a test if the condition is true. + + Skip a test unless the condition is true. + __unittest_expecting_failure___is_subtypebasetype_BaseTestCaseContext_raiseFailurestandardMsg_formatMessagefailureException_AssertRaisesBaseContextexpected_regexobj_name + If args is empty, assertRaises/Warns is being used as a + context manager, so check for a 'msg' kwarg and return self. + If args is not empty, call a callable passing positional and keyword + arguments. + _base_type%s() arg 1 must be %s_base_type_str%r is an invalid keyword argument for this function'%r is an invalid keyword argument for ''this function'callable_obj_AssertRaisesContextA context manager used to implement TestCase.assertRaises* methods.an exception type or tuple of exception typesexc_name{} not raised by {}{} not raisedclear_frames"{}" does not match "{}"_AssertWarnsContextA context manager used to implement TestCase.assertWarns* methods.a warning type or tuple of warning typeswarnings_managerfirst_matching{} not triggered by {}{} not triggered_LoggingWatcher_CapturingHandler + A logging handler capturing all (raw and formatted) logging output. + watcher_AssertLogsContextA context manager used to implement TestCase.assertLogs().LOGGING_FORMATlogger_nameold_handlersold_levelold_propagateno logs of level {} or higher triggered on {}_OrderedChainMapA class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + longMessagemaxDiff_diffThreshold_classSetupFailed_class_cleanupsCreate an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + _testMethodName_outcomeNo test_testMethodDoctestMethodno such test method in %s: %s_cleanups_subtest_type_equality_funcsaddTypeEqualityFuncassertDictEqualassertListEqualassertTupleEqualassertSetEqualassertMultiLineEqualtypeobjAdd a type specific assertEqual style function to compare a type. + + This method is for use by TestCase subclasses that need to register + their own type equality functions to provide nicer error messages. + + Args: + typeobj: The data type to call this function on when both values + are of the same type in assertEqual(). + function: The callable taking two arguments and an optional + msg= argument that raises self.failureException with a + useful error message when the two arguments are not equal. + Add a function, with arguments, to be called when the test is + completed. Functions added are called on a LIFO basis and are + called after tearDown on test failure or success. + + Cleanup items are called even if setUp fails (unlike tearDown).descriptor 'addCleanup' of 'TestCase' object needs an argument"descriptor 'addCleanup' of 'TestCase' object "Passing 'function' as keyword argument is deprecatedaddCleanup expected at least 1 positional argument, got %d'addCleanup expected at least 1 positional ''argument, got %d'($self, function, /, *args, **kwargs)addClassCleanupSame as addCleanup, except the cleanup items are called even if + setUpClass fails (unlike tearDownClass).Hook method for setting up the test fixture before exercising it.Hook method for deconstructing the test fixture after testing it.setUpClassHook method for setting up class fixture before running tests in the class.tearDownClassHook method for deconstructing the class fixture after running all tests in the class.countTestCasesdefaultTestResultshortDescriptionReturns a one-line description of the test, or None if no + description has been provided. + + The default implementation of this method returns the first line of + the specified test method's docstring. + %s.%s%s (%s)<%s testMethod=%s>_addSkipaddSkipTestResult has no addSkip method, skips not reportedaddSuccesssubTestReturn a context manager that will return the enclosed block + of code in a subtest identified by the optional message and + keyword parameters. A failure in the subtest marks the test + case as failed but resumes execution at the end of the enclosed + block, allowing further test code to be executed. + params_map_SubTest_feedErrorsToResultaddFailureaddError_addExpectedFailureaddExpectedFailureTestResult has no addExpectedFailure method, reporting as passes_addUnexpectedSuccessaddUnexpectedSuccessTestResult has no addUnexpectedSuccess method, reporting as failureorig_resultstartTestRunstartTestskip_whystopTestexpecting_failure_methodexpecting_failure_classoutcomedoCleanupsstopTestRunExecute all cleanup functions. Normally called for you after + tearDown.doClassCleanupsExecute all class cleanup functions. Normally called for you after + tearDownClass.tearDown_exceptionsRun the test without collecting errors in a TestResultskipTestSkip this test.failFail immediately, with the given message.assertFalseCheck that the expression is false.%s is not falseCheck that the expression is true.%s is not trueHonour the longMessage attribute when generating failure messages. + If longMessage is False this means: + * Use only an explicit message if it is provided + * Otherwise use the standard message for the assert + + If longMessage is True: + * Use the standard message + * If an explicit message is provided, plus ' : ' and the explicit message + %s : %sexpected_exceptionFail unless an exception of class expected_exception is raised + by the callable when invoked with specified positional and + keyword arguments. If a different type of exception is + raised, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + If called with the callable and arguments omitted, will return a + context object used like this:: + + with self.assertRaises(SomeException): + do_something() + + An optional keyword argument 'msg' can be provided when assertRaises + is used as a context object. + + The context manager keeps a reference to the exception as + the 'exception' attribute. This allows you to inspect the + exception after the assertion:: + + with self.assertRaises(SomeException) as cm: + do_something() + the_exception = cm.exception + self.assertEqual(the_exception.error_code, 3) + assertWarnsexpected_warningFail unless a warning of class warnClass is triggered + by the callable when invoked with specified positional and + keyword arguments. If a different type of warning is + triggered, it will not be handled: depending on the other + warning filtering rules in effect, it might be silenced, printed + out, or raised as an exception. + + If called with the callable and arguments omitted, will return a + context object used like this:: + + with self.assertWarns(SomeWarning): + do_something() + + An optional keyword argument 'msg' can be provided when assertWarns + is used as a context object. + + The context manager keeps a reference to the first matching + warning as the 'warning' attribute; similarly, the 'filename' + and 'lineno' attributes give you information about the line + of Python code from which the warning was triggered. + This allows you to inspect the warning after the assertion:: + + with self.assertWarns(SomeWarning) as cm: + do_something() + the_warning = cm.warning + self.assertEqual(the_warning.some_attribute, 147) + assertLogsFail unless a log message of level *level* or higher is emitted + on *logger_name* or its children. If omitted, *level* defaults to + INFO and *logger* defaults to the root logger. + + This method must be used as a context manager, and will yield + a recording object with two attributes: `output` and `records`. + At the end of the context manager, the `output` attribute will + be a list of the matching formatted log messages and the + `records` attribute will be a list of the corresponding LogRecord + objects. + + Example:: + + with self.assertLogs('foo', level='INFO') as cm: + logging.getLogger('foo').info('first message') + logging.getLogger('foo.bar').error('second message') + self.assertEqual(cm.output, ['INFO:foo:first message', + 'ERROR:foo.bar:second message']) + _getAssertEqualityFuncGet a detailed comparison function for the types of the two args. + + Returns: A callable accepting (first, second, msg=None) that will + raise a failure exception if first != second with a useful human + readable error message for those types. + asserter_baseAssertEqualThe default assertEqual implementation, not type specific.%s != %sFail if the two objects are unequal as determined by the '==' + operator. + assertion_funcassertNotEqualFail if the two objects are equal as determined by the '!=' + operator. + %s == %sassertAlmostEqualFail if the two objects are unequal as determined by their + difference rounded to the given number of decimal places + (default 7) and comparing to zero, or by comparing that the + difference between the two objects is more than the given + delta. + + Note that decimal places (from zero) are usually not the same + as significant digits (measured from the most significant digit). + + If the two objects compare equal then they will automatically + compare almost equal. + specify delta or places not bothdiff%s != %s within %s delta (%s difference)%s != %s within %r places (%s difference)assertNotAlmostEqualFail if the two objects are equal as determined by their + difference rounded to the given number of decimal places + (default 7) and comparing to zero, or by comparing that the + difference between the two objects is less than the given delta. + + Note that decimal places (from zero) are usually not the same + as significant digits (measured from the most significant digit). + + Objects that are equal automatically fail. + %s == %s within %s delta (%s difference)%s == %s within %r placesassertSequenceEqualseq1seq2seq_typeAn equality assertion for ordered sequences (like lists and tuples). + + For the purposes of this function, a valid ordered sequence type is one + which can be indexed, has a length, and has an equality operator. + + Args: + seq1: The first sequence to compare. + seq2: The second sequence to compare. + seq_type: The expected datatype of the sequences, or None if no + datatype should be enforced. + msg: Optional message to use on failure instead of a list of + differences. + seq_type_nameFirst sequence is not a %s: %sSecond sequence is not a %s: %sdifferinglen1First %s has no length. Non-sequence?len2Second %s has no length. Non-sequence?%ss differ: %s != %s +item1 +Unable to index element %d of first %s +item2 +Unable to index element %d of second %s + +First differing element %d: +%s +%s + +First %s contains %d additional elements. +'\nFirst %s contains %d additional ''elements.\n'First extra element %d: +%s +Unable to index element %d of first %s +'Unable to index element %d ''of first %s\n' +Second %s contains %d additional elements. +'\nSecond %s contains %d additional 'Unable to index element %d of second %s +'of second %s\n'ndiffpformatdiffMsg_truncateMessagemax_difflist1list2A list-specific equality assertion. + + Args: + list1: The first list to compare. + list2: The second list to compare. + msg: Optional message to use on failure instead of a list of + differences. + + tuple1tuple2A tuple-specific equality assertion. + + Args: + tuple1: The first tuple to compare. + tuple2: The second tuple to compare. + msg: Optional message to use on failure instead of a list of + differences. + set1set2A set-specific equality assertion. + + Args: + set1: The first set to compare. + set2: The second set to compare. + msg: Optional message to use on failure instead of a list of + differences. + + assertSetEqual uses ducktyping to support different types of sets, and + is optimized for sets specifically (parameters must support a + difference method). + difference1invalid type when attempting set difference: %sfirst argument does not support set difference: %sdifference2second argument does not support set difference: %sItems in the first set but not the second:Items in the second set but not the first:assertInJust like self.assertTrue(a in b), but with a nicer default message.%s not found in %sassertNotInJust like self.assertTrue(a not in b), but with a nicer default message.%s unexpectedly found in %sassertIsexpr1expr2Just like self.assertTrue(a is b), but with a nicer default message.%s is not %sassertIsNotJust like self.assertTrue(a is not b), but with a nicer default message.unexpectedly identical: %sd1assertIsInstanceFirst argument is not a dictionarySecond argument is not a dictionaryassertDictContainsSubsetsubsetdictionaryChecks whether dictionary is a superset of subset.assertDictContainsSubset is deprecatedmismatched%s, expected: %s, actual: %sMissing: %s; Mismatched values: %sAsserts that two iterables have the same elements, the same number of + times, without regard to order. + + self.assertEqual(Counter(list(first)), + Counter(list(second))) + + Example: + - [0, 1, 1] and [1, 0, 1] compare equal. + - [0, 0, 1] and [0, 1] compare unequal. + + first_seqsecond_seqdifferencesElement counts were not equal: +First has %d, Second has %d: %rAssert that two multi-line strings are equal.First argument is not a stringSecond argument is not a stringfirstlinessecondlinesassertLessJust like self.assertTrue(a < b), but with a nicer default message.%s not less than %sassertLessEqualJust like self.assertTrue(a <= b), but with a nicer default message.%s not less than or equal to %sassertGreaterJust like self.assertTrue(a > b), but with a nicer default message.%s not greater than %sassertGreaterEqualJust like self.assertTrue(a >= b), but with a nicer default message.%s not greater than or equal to %sassertIsNoneSame as self.assertTrue(obj is None), with a nicer default message.%s is not NoneIncluded for symmetry with assertIsNone.unexpectedly NoneSame as self.assertTrue(isinstance(obj, cls)), with a nicer + default message.%s is not an instance of %rassertNotIsInstanceIncluded for symmetry with assertIsInstance.%s is an instance of %rAsserts that the message in a raised exception matches a regex. + + Args: + expected_exception: Exception class expected to be raised. + expected_regex: Regex (re.Pattern object or string) expected + to be found in error message. + args: Function to be called and extra positional args. + kwargs: Extra kwargs. + msg: Optional message used in case of failure. Can only be used + when assertRaisesRegex is used as a context manager. + assertWarnsRegexAsserts that the message in a triggered warning matches a regexp. + Basic functioning is similar to assertWarns() with the addition + that only warnings whose messages also match the regular expression + are considered successful matches. + + Args: + expected_warning: Warning class expected to be triggered. + expected_regex: Regex (re.Pattern object or string) expected + to be found in error message. + args: Function to be called and extra positional args. + kwargs: Extra kwargs. + msg: Optional message used in case of failure. Can only be used + when assertWarnsRegex is used as a context manager. + Fail the test unless the text matches the regular expression.expected_regex must not be empty.Regex didn't match: %r not found in %rassertNotRegexunexpected_regexFail the test if the text matches the regular expression.Regex matched: %r matches %r in %r_deprecateoriginal_funcdeprecated_funcPlease use {0} instead.failUnlessEqualassertEqualsfailIfEqualassertNotEqualsfailUnlessAlmostEqualassertAlmostEqualsfailIfAlmostEqualassertNotAlmostEqualsfailUnlessassert_failUnlessRaisesfailIfassertRaisesRegexpassertRegexpMatchesassertNotRegexpMatchesA test case that wraps a test function. + + This is useful for slipping pre-existing test functions into the + unittest framework. Optionally, set-up and tidy-up functions can be + supplied. As with TestCase, the tidy-up ('tearDown') function will + always be called if the set-up ('setUp') function ran successfully. + testFunc_setUpFunc_tearDownFunc_testFunc_description<%s tec=%s>_messagesubtests cannot be run directly_subDescription[{}]params_desc({})(){} {}Returns a one-line description of the subtest, or None if no + description has been provided. + # explicitly break a reference cycle:# exc_info -> frame -> exc_info# Swallows all but first exception. If a multi-exception handler# gets written we should use that here instead.# bpo-23890: manually break a reference cycle# let unexpected exceptions pass through# store exception, without traceback, for later retrieval# The __warningregistry__'s need to be in a pristine state for tests# to work properly.# store warning for later retrieval# Now we simply try to choose a helpful failure message# If a string is longer than _diffThreshold, use normal comparison instead# of difflib. See #11763.# Attribute used by TestSuite for classSetUp# we allow instantiation with no explicit method name# but not an *incorrect* or missing method name# Map types to custom assertEqual functions that will compare# instances of said type in more detail to generate a more useful# error message.# If the test is expecting a failure, we really want to# stop now and register the expected failure.# We need to pass an actual exception and traceback to addFailure,# otherwise the legacy result can choke.# If the class or method was skipped.# explicitly break reference cycles:# outcome.errors -> frame -> outcome -> outcome.errors# outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure# clear the outcome, no more needed# return this for backwards compatibility# even though we no longer use it internally# don't switch to '{}' formatting in Python 2.X# it changes the way unicode input is handled# NOTE(gregory.p.smith): I considered isinstance(first, type(second))# and vice versa. I opted for the conservative approach in case# subclasses are not intended to be compared in detail to their super# class instances using a type equality func. This means testing# subtypes won't automagically use the detailed comparison. Callers# should use their type specific assertSpamEqual method to compare# subclasses if the detailed comparison is desired and appropriate.# See the discussion in http://bugs.python.org/issue2578.# shortcut# The sequences are the same, but have differing types.# Handle case with unhashable elements# don't use difflib if the strings are too long# _formatMessage ensures the longMessage option is respected# see #9424b'Test case implementation'u'Test case implementation'b' +Diff is %s characters long. Set self.maxDiff to None to see it.'u' +Diff is %s characters long. Set self.maxDiff to None to see it.'b' + Raise this exception in a test to skip it. + + Usually you can use TestCase.skipTest() or one of the skipping decorators + instead of raising this directly. + 'u' + Raise this exception in a test to skip it. + + Usually you can use TestCase.skipTest() or one of the skipping decorators + instead of raising this directly. + 'b' + The test should stop. + 'u' + The test should stop. + 'b' + The test was supposed to fail, but it didn't! + 'u' + The test was supposed to fail, but it didn't! + 'b'addSubTest'u'addSubTest'b'Same as addCleanup, except the cleanup items are called even if + setUpModule fails (unlike tearDownModule).'u'Same as addCleanup, except the cleanup items are called even if + setUpModule fails (unlike tearDownModule).'b'Execute all module cleanup functions. Normally called for you after + tearDownModule.'u'Execute all module cleanup functions. Normally called for you after + tearDownModule.'b' + Unconditionally skip a test. + 'u' + Unconditionally skip a test. + 'b' + Skip a test if the condition is true. + 'u' + Skip a test if the condition is true. + 'b' + Skip a test unless the condition is true. + 'u' + Skip a test unless the condition is true. + 'b' + If args is empty, assertRaises/Warns is being used as a + context manager, so check for a 'msg' kwarg and return self. + If args is not empty, call a callable passing positional and keyword + arguments. + 'u' + If args is empty, assertRaises/Warns is being used as a + context manager, so check for a 'msg' kwarg and return self. + If args is not empty, call a callable passing positional and keyword + arguments. + 'b'%s() arg 1 must be %s'u'%s() arg 1 must be %s'b'%r is an invalid keyword argument for this function'u'%r is an invalid keyword argument for this function'b'A context manager used to implement TestCase.assertRaises* methods.'u'A context manager used to implement TestCase.assertRaises* methods.'b'an exception type or tuple of exception types'u'an exception type or tuple of exception types'b'{} not raised by {}'u'{} not raised by {}'b'{} not raised'u'{} not raised'b'"{}" does not match "{}"'u'"{}" does not match "{}"'b'A context manager used to implement TestCase.assertWarns* methods.'u'A context manager used to implement TestCase.assertWarns* methods.'b'a warning type or tuple of warning types'u'a warning type or tuple of warning types'b'{} not triggered by {}'u'{} not triggered by {}'b'{} not triggered'u'{} not triggered'b'_LoggingWatcher'u'_LoggingWatcher'b'records'u'records'b'output'u'output'b' + A logging handler capturing all (raw and formatted) logging output. + 'u' + A logging handler capturing all (raw and formatted) logging output. + 'b'A context manager used to implement TestCase.assertLogs().'u'A context manager used to implement TestCase.assertLogs().'b'no logs of level {} or higher triggered on {}'u'no logs of level {} or higher triggered on {}'b'A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + 'u'A class whose instances are single test cases. + + By default, the test code itself should be placed in a method named + 'runTest'. + + If the fixture may be used for many test cases, create as + many test methods as are needed. When instantiating such a TestCase + subclass, specify in the constructor arguments the name of the test method + that the instance is to execute. + + Test authors should subclass TestCase for their own tests. Construction + and deconstruction of the test's environment ('fixture') can be + implemented by overriding the 'setUp' and 'tearDown' methods respectively. + + If it is necessary to override the __init__ method, the base class + __init__ method must always be called. It is important that subclasses + should not change the signature of their __init__ method, since instances + of the classes are instantiated automatically by parts of the framework + in order to be run. + + When subclassing TestCase, you can set these attributes: + * failureException: determines which exception will be raised when + the instance's assertion methods fail; test methods raising this + exception will be deemed to have 'failed' rather than 'errored'. + * longMessage: determines whether long messages (including repr of + objects used in assert methods) will be printed on failure in *addition* + to any explicit message passed. + * maxDiff: sets the maximum length of a diff in failure messages + by assert methods using difflib. It is looked up as an instance + attribute so can be configured by individual tests if required. + 'b'Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + 'u'Create an instance of the class that will use the named test + method when executed. Raises a ValueError if the instance does + not have a method with the specified name. + 'b'No test'u'No test'b'no such test method in %s: %s'u'no such test method in %s: %s'b'assertDictEqual'u'assertDictEqual'b'assertListEqual'u'assertListEqual'b'assertTupleEqual'u'assertTupleEqual'b'assertSetEqual'u'assertSetEqual'b'assertMultiLineEqual'u'assertMultiLineEqual'b'Add a type specific assertEqual style function to compare a type. + + This method is for use by TestCase subclasses that need to register + their own type equality functions to provide nicer error messages. + + Args: + typeobj: The data type to call this function on when both values + are of the same type in assertEqual(). + function: The callable taking two arguments and an optional + msg= argument that raises self.failureException with a + useful error message when the two arguments are not equal. + 'u'Add a type specific assertEqual style function to compare a type. + + This method is for use by TestCase subclasses that need to register + their own type equality functions to provide nicer error messages. + + Args: + typeobj: The data type to call this function on when both values + are of the same type in assertEqual(). + function: The callable taking two arguments and an optional + msg= argument that raises self.failureException with a + useful error message when the two arguments are not equal. + 'b'Add a function, with arguments, to be called when the test is + completed. Functions added are called on a LIFO basis and are + called after tearDown on test failure or success. + + Cleanup items are called even if setUp fails (unlike tearDown).'u'Add a function, with arguments, to be called when the test is + completed. Functions added are called on a LIFO basis and are + called after tearDown on test failure or success. + + Cleanup items are called even if setUp fails (unlike tearDown).'b'descriptor 'addCleanup' of 'TestCase' object needs an argument'u'descriptor 'addCleanup' of 'TestCase' object needs an argument'b'function'u'function'b'Passing 'function' as keyword argument is deprecated'u'Passing 'function' as keyword argument is deprecated'b'addCleanup expected at least 1 positional argument, got %d'u'addCleanup expected at least 1 positional argument, got %d'b'($self, function, /, *args, **kwargs)'u'($self, function, /, *args, **kwargs)'b'Same as addCleanup, except the cleanup items are called even if + setUpClass fails (unlike tearDownClass).'u'Same as addCleanup, except the cleanup items are called even if + setUpClass fails (unlike tearDownClass).'b'Hook method for setting up the test fixture before exercising it.'u'Hook method for setting up the test fixture before exercising it.'b'Hook method for deconstructing the test fixture after testing it.'u'Hook method for deconstructing the test fixture after testing it.'b'Hook method for setting up class fixture before running tests in the class.'u'Hook method for setting up class fixture before running tests in the class.'b'Hook method for deconstructing the class fixture after running all tests in the class.'u'Hook method for deconstructing the class fixture after running all tests in the class.'b'Returns a one-line description of the test, or None if no + description has been provided. + + The default implementation of this method returns the first line of + the specified test method's docstring. + 'u'Returns a one-line description of the test, or None if no + description has been provided. + + The default implementation of this method returns the first line of + the specified test method's docstring. + 'b'%s.%s'u'%s.%s'b'%s (%s)'u'%s (%s)'b'<%s testMethod=%s>'u'<%s testMethod=%s>'b'addSkip'u'addSkip'b'TestResult has no addSkip method, skips not reported'u'TestResult has no addSkip method, skips not reported'b'Return a context manager that will return the enclosed block + of code in a subtest identified by the optional message and + keyword parameters. A failure in the subtest marks the test + case as failed but resumes execution at the end of the enclosed + block, allowing further test code to be executed. + 'u'Return a context manager that will return the enclosed block + of code in a subtest identified by the optional message and + keyword parameters. A failure in the subtest marks the test + case as failed but resumes execution at the end of the enclosed + block, allowing further test code to be executed. + 'b'TestResult has no addExpectedFailure method, reporting as passes'u'TestResult has no addExpectedFailure method, reporting as passes'b'TestResult has no addUnexpectedSuccess method, reporting as failure'u'TestResult has no addUnexpectedSuccess method, reporting as failure'b'startTestRun'u'startTestRun'b'__unittest_skip__'u'__unittest_skip__'b'__unittest_skip_why__'u'__unittest_skip_why__'b'__unittest_expecting_failure__'u'__unittest_expecting_failure__'b'stopTestRun'u'stopTestRun'b'Execute all cleanup functions. Normally called for you after + tearDown.'u'Execute all cleanup functions. Normally called for you after + tearDown.'b'Execute all class cleanup functions. Normally called for you after + tearDownClass.'u'Execute all class cleanup functions. Normally called for you after + tearDownClass.'b'Run the test without collecting errors in a TestResult'u'Run the test without collecting errors in a TestResult'b'Skip this test.'u'Skip this test.'b'Fail immediately, with the given message.'u'Fail immediately, with the given message.'b'Check that the expression is false.'u'Check that the expression is false.'b'%s is not false'u'%s is not false'b'Check that the expression is true.'u'Check that the expression is true.'b'%s is not true'u'%s is not true'b'Honour the longMessage attribute when generating failure messages. + If longMessage is False this means: + * Use only an explicit message if it is provided + * Otherwise use the standard message for the assert + + If longMessage is True: + * Use the standard message + * If an explicit message is provided, plus ' : ' and the explicit message + 'u'Honour the longMessage attribute when generating failure messages. + If longMessage is False this means: + * Use only an explicit message if it is provided + * Otherwise use the standard message for the assert + + If longMessage is True: + * Use the standard message + * If an explicit message is provided, plus ' : ' and the explicit message + 'b'%s : %s'u'%s : %s'b'Fail unless an exception of class expected_exception is raised + by the callable when invoked with specified positional and + keyword arguments. If a different type of exception is + raised, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + If called with the callable and arguments omitted, will return a + context object used like this:: + + with self.assertRaises(SomeException): + do_something() + + An optional keyword argument 'msg' can be provided when assertRaises + is used as a context object. + + The context manager keeps a reference to the exception as + the 'exception' attribute. This allows you to inspect the + exception after the assertion:: + + with self.assertRaises(SomeException) as cm: + do_something() + the_exception = cm.exception + self.assertEqual(the_exception.error_code, 3) + 'u'Fail unless an exception of class expected_exception is raised + by the callable when invoked with specified positional and + keyword arguments. If a different type of exception is + raised, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + If called with the callable and arguments omitted, will return a + context object used like this:: + + with self.assertRaises(SomeException): + do_something() + + An optional keyword argument 'msg' can be provided when assertRaises + is used as a context object. + + The context manager keeps a reference to the exception as + the 'exception' attribute. This allows you to inspect the + exception after the assertion:: + + with self.assertRaises(SomeException) as cm: + do_something() + the_exception = cm.exception + self.assertEqual(the_exception.error_code, 3) + 'b'assertRaises'u'assertRaises'b'Fail unless a warning of class warnClass is triggered + by the callable when invoked with specified positional and + keyword arguments. If a different type of warning is + triggered, it will not be handled: depending on the other + warning filtering rules in effect, it might be silenced, printed + out, or raised as an exception. + + If called with the callable and arguments omitted, will return a + context object used like this:: + + with self.assertWarns(SomeWarning): + do_something() + + An optional keyword argument 'msg' can be provided when assertWarns + is used as a context object. + + The context manager keeps a reference to the first matching + warning as the 'warning' attribute; similarly, the 'filename' + and 'lineno' attributes give you information about the line + of Python code from which the warning was triggered. + This allows you to inspect the warning after the assertion:: + + with self.assertWarns(SomeWarning) as cm: + do_something() + the_warning = cm.warning + self.assertEqual(the_warning.some_attribute, 147) + 'u'Fail unless a warning of class warnClass is triggered + by the callable when invoked with specified positional and + keyword arguments. If a different type of warning is + triggered, it will not be handled: depending on the other + warning filtering rules in effect, it might be silenced, printed + out, or raised as an exception. + + If called with the callable and arguments omitted, will return a + context object used like this:: + + with self.assertWarns(SomeWarning): + do_something() + + An optional keyword argument 'msg' can be provided when assertWarns + is used as a context object. + + The context manager keeps a reference to the first matching + warning as the 'warning' attribute; similarly, the 'filename' + and 'lineno' attributes give you information about the line + of Python code from which the warning was triggered. + This allows you to inspect the warning after the assertion:: + + with self.assertWarns(SomeWarning) as cm: + do_something() + the_warning = cm.warning + self.assertEqual(the_warning.some_attribute, 147) + 'b'assertWarns'u'assertWarns'b'Fail unless a log message of level *level* or higher is emitted + on *logger_name* or its children. If omitted, *level* defaults to + INFO and *logger* defaults to the root logger. + + This method must be used as a context manager, and will yield + a recording object with two attributes: `output` and `records`. + At the end of the context manager, the `output` attribute will + be a list of the matching formatted log messages and the + `records` attribute will be a list of the corresponding LogRecord + objects. + + Example:: + + with self.assertLogs('foo', level='INFO') as cm: + logging.getLogger('foo').info('first message') + logging.getLogger('foo.bar').error('second message') + self.assertEqual(cm.output, ['INFO:foo:first message', + 'ERROR:foo.bar:second message']) + 'u'Fail unless a log message of level *level* or higher is emitted + on *logger_name* or its children. If omitted, *level* defaults to + INFO and *logger* defaults to the root logger. + + This method must be used as a context manager, and will yield + a recording object with two attributes: `output` and `records`. + At the end of the context manager, the `output` attribute will + be a list of the matching formatted log messages and the + `records` attribute will be a list of the corresponding LogRecord + objects. + + Example:: + + with self.assertLogs('foo', level='INFO') as cm: + logging.getLogger('foo').info('first message') + logging.getLogger('foo.bar').error('second message') + self.assertEqual(cm.output, ['INFO:foo:first message', + 'ERROR:foo.bar:second message']) + 'b'Get a detailed comparison function for the types of the two args. + + Returns: A callable accepting (first, second, msg=None) that will + raise a failure exception if first != second with a useful human + readable error message for those types. + 'u'Get a detailed comparison function for the types of the two args. + + Returns: A callable accepting (first, second, msg=None) that will + raise a failure exception if first != second with a useful human + readable error message for those types. + 'b'The default assertEqual implementation, not type specific.'u'The default assertEqual implementation, not type specific.'b'%s != %s'u'%s != %s'b'Fail if the two objects are unequal as determined by the '==' + operator. + 'u'Fail if the two objects are unequal as determined by the '==' + operator. + 'b'Fail if the two objects are equal as determined by the '!=' + operator. + 'u'Fail if the two objects are equal as determined by the '!=' + operator. + 'b'%s == %s'u'%s == %s'b'Fail if the two objects are unequal as determined by their + difference rounded to the given number of decimal places + (default 7) and comparing to zero, or by comparing that the + difference between the two objects is more than the given + delta. + + Note that decimal places (from zero) are usually not the same + as significant digits (measured from the most significant digit). + + If the two objects compare equal then they will automatically + compare almost equal. + 'u'Fail if the two objects are unequal as determined by their + difference rounded to the given number of decimal places + (default 7) and comparing to zero, or by comparing that the + difference between the two objects is more than the given + delta. + + Note that decimal places (from zero) are usually not the same + as significant digits (measured from the most significant digit). + + If the two objects compare equal then they will automatically + compare almost equal. + 'b'specify delta or places not both'u'specify delta or places not both'b'%s != %s within %s delta (%s difference)'u'%s != %s within %s delta (%s difference)'b'%s != %s within %r places (%s difference)'u'%s != %s within %r places (%s difference)'b'Fail if the two objects are equal as determined by their + difference rounded to the given number of decimal places + (default 7) and comparing to zero, or by comparing that the + difference between the two objects is less than the given delta. + + Note that decimal places (from zero) are usually not the same + as significant digits (measured from the most significant digit). + + Objects that are equal automatically fail. + 'u'Fail if the two objects are equal as determined by their + difference rounded to the given number of decimal places + (default 7) and comparing to zero, or by comparing that the + difference between the two objects is less than the given delta. + + Note that decimal places (from zero) are usually not the same + as significant digits (measured from the most significant digit). + + Objects that are equal automatically fail. + 'b'%s == %s within %s delta (%s difference)'u'%s == %s within %s delta (%s difference)'b'%s == %s within %r places'u'%s == %s within %r places'b'An equality assertion for ordered sequences (like lists and tuples). + + For the purposes of this function, a valid ordered sequence type is one + which can be indexed, has a length, and has an equality operator. + + Args: + seq1: The first sequence to compare. + seq2: The second sequence to compare. + seq_type: The expected datatype of the sequences, or None if no + datatype should be enforced. + msg: Optional message to use on failure instead of a list of + differences. + 'u'An equality assertion for ordered sequences (like lists and tuples). + + For the purposes of this function, a valid ordered sequence type is one + which can be indexed, has a length, and has an equality operator. + + Args: + seq1: The first sequence to compare. + seq2: The second sequence to compare. + seq_type: The expected datatype of the sequences, or None if no + datatype should be enforced. + msg: Optional message to use on failure instead of a list of + differences. + 'b'First sequence is not a %s: %s'u'First sequence is not a %s: %s'b'Second sequence is not a %s: %s'u'Second sequence is not a %s: %s'b'sequence'u'sequence'b'First %s has no length. Non-sequence?'u'First %s has no length. Non-sequence?'b'Second %s has no length. Non-sequence?'u'Second %s has no length. Non-sequence?'b'%ss differ: %s != %s +'u'%ss differ: %s != %s +'b' +Unable to index element %d of first %s +'u' +Unable to index element %d of first %s +'b' +Unable to index element %d of second %s +'u' +Unable to index element %d of second %s +'b' +First differing element %d: +%s +%s +'u' +First differing element %d: +%s +%s +'b' +First %s contains %d additional elements. +'u' +First %s contains %d additional elements. +'b'First extra element %d: +%s +'u'First extra element %d: +%s +'b'Unable to index element %d of first %s +'u'Unable to index element %d of first %s +'b' +Second %s contains %d additional elements. +'u' +Second %s contains %d additional elements. +'b'Unable to index element %d of second %s +'u'Unable to index element %d of second %s +'b'A list-specific equality assertion. + + Args: + list1: The first list to compare. + list2: The second list to compare. + msg: Optional message to use on failure instead of a list of + differences. + + 'u'A list-specific equality assertion. + + Args: + list1: The first list to compare. + list2: The second list to compare. + msg: Optional message to use on failure instead of a list of + differences. + + 'b'A tuple-specific equality assertion. + + Args: + tuple1: The first tuple to compare. + tuple2: The second tuple to compare. + msg: Optional message to use on failure instead of a list of + differences. + 'u'A tuple-specific equality assertion. + + Args: + tuple1: The first tuple to compare. + tuple2: The second tuple to compare. + msg: Optional message to use on failure instead of a list of + differences. + 'b'A set-specific equality assertion. + + Args: + set1: The first set to compare. + set2: The second set to compare. + msg: Optional message to use on failure instead of a list of + differences. + + assertSetEqual uses ducktyping to support different types of sets, and + is optimized for sets specifically (parameters must support a + difference method). + 'u'A set-specific equality assertion. + + Args: + set1: The first set to compare. + set2: The second set to compare. + msg: Optional message to use on failure instead of a list of + differences. + + assertSetEqual uses ducktyping to support different types of sets, and + is optimized for sets specifically (parameters must support a + difference method). + 'b'invalid type when attempting set difference: %s'u'invalid type when attempting set difference: %s'b'first argument does not support set difference: %s'u'first argument does not support set difference: %s'b'second argument does not support set difference: %s'u'second argument does not support set difference: %s'b'Items in the first set but not the second:'u'Items in the first set but not the second:'b'Items in the second set but not the first:'u'Items in the second set but not the first:'b'Just like self.assertTrue(a in b), but with a nicer default message.'u'Just like self.assertTrue(a in b), but with a nicer default message.'b'%s not found in %s'u'%s not found in %s'b'Just like self.assertTrue(a not in b), but with a nicer default message.'u'Just like self.assertTrue(a not in b), but with a nicer default message.'b'%s unexpectedly found in %s'u'%s unexpectedly found in %s'b'Just like self.assertTrue(a is b), but with a nicer default message.'u'Just like self.assertTrue(a is b), but with a nicer default message.'b'%s is not %s'u'%s is not %s'b'Just like self.assertTrue(a is not b), but with a nicer default message.'u'Just like self.assertTrue(a is not b), but with a nicer default message.'b'unexpectedly identical: %s'u'unexpectedly identical: %s'b'First argument is not a dictionary'u'First argument is not a dictionary'b'Second argument is not a dictionary'u'Second argument is not a dictionary'b'Checks whether dictionary is a superset of subset.'u'Checks whether dictionary is a superset of subset.'b'assertDictContainsSubset is deprecated'u'assertDictContainsSubset is deprecated'b'%s, expected: %s, actual: %s'u'%s, expected: %s, actual: %s'b'Missing: %s'u'Missing: %s'b'; 'u'; 'b'Mismatched values: %s'u'Mismatched values: %s'b'Asserts that two iterables have the same elements, the same number of + times, without regard to order. + + self.assertEqual(Counter(list(first)), + Counter(list(second))) + + Example: + - [0, 1, 1] and [1, 0, 1] compare equal. + - [0, 0, 1] and [0, 1] compare unequal. + + 'u'Asserts that two iterables have the same elements, the same number of + times, without regard to order. + + self.assertEqual(Counter(list(first)), + Counter(list(second))) + + Example: + - [0, 1, 1] and [1, 0, 1] compare equal. + - [0, 0, 1] and [0, 1] compare unequal. + + 'b'Element counts were not equal: +'u'Element counts were not equal: +'b'First has %d, Second has %d: %r'u'First has %d, Second has %d: %r'b'Assert that two multi-line strings are equal.'u'Assert that two multi-line strings are equal.'b'First argument is not a string'u'First argument is not a string'b'Second argument is not a string'u'Second argument is not a string'b'Just like self.assertTrue(a < b), but with a nicer default message.'u'Just like self.assertTrue(a < b), but with a nicer default message.'b'%s not less than %s'u'%s not less than %s'b'Just like self.assertTrue(a <= b), but with a nicer default message.'u'Just like self.assertTrue(a <= b), but with a nicer default message.'b'%s not less than or equal to %s'u'%s not less than or equal to %s'b'Just like self.assertTrue(a > b), but with a nicer default message.'u'Just like self.assertTrue(a > b), but with a nicer default message.'b'%s not greater than %s'u'%s not greater than %s'b'Just like self.assertTrue(a >= b), but with a nicer default message.'u'Just like self.assertTrue(a >= b), but with a nicer default message.'b'%s not greater than or equal to %s'u'%s not greater than or equal to %s'b'Same as self.assertTrue(obj is None), with a nicer default message.'u'Same as self.assertTrue(obj is None), with a nicer default message.'b'%s is not None'u'%s is not None'b'Included for symmetry with assertIsNone.'u'Included for symmetry with assertIsNone.'b'unexpectedly None'u'unexpectedly None'b'Same as self.assertTrue(isinstance(obj, cls)), with a nicer + default message.'u'Same as self.assertTrue(isinstance(obj, cls)), with a nicer + default message.'b'%s is not an instance of %r'u'%s is not an instance of %r'b'Included for symmetry with assertIsInstance.'u'Included for symmetry with assertIsInstance.'b'%s is an instance of %r'u'%s is an instance of %r'b'Asserts that the message in a raised exception matches a regex. + + Args: + expected_exception: Exception class expected to be raised. + expected_regex: Regex (re.Pattern object or string) expected + to be found in error message. + args: Function to be called and extra positional args. + kwargs: Extra kwargs. + msg: Optional message used in case of failure. Can only be used + when assertRaisesRegex is used as a context manager. + 'u'Asserts that the message in a raised exception matches a regex. + + Args: + expected_exception: Exception class expected to be raised. + expected_regex: Regex (re.Pattern object or string) expected + to be found in error message. + args: Function to be called and extra positional args. + kwargs: Extra kwargs. + msg: Optional message used in case of failure. Can only be used + when assertRaisesRegex is used as a context manager. + 'b'assertRaisesRegex'u'assertRaisesRegex'b'Asserts that the message in a triggered warning matches a regexp. + Basic functioning is similar to assertWarns() with the addition + that only warnings whose messages also match the regular expression + are considered successful matches. + + Args: + expected_warning: Warning class expected to be triggered. + expected_regex: Regex (re.Pattern object or string) expected + to be found in error message. + args: Function to be called and extra positional args. + kwargs: Extra kwargs. + msg: Optional message used in case of failure. Can only be used + when assertWarnsRegex is used as a context manager. + 'u'Asserts that the message in a triggered warning matches a regexp. + Basic functioning is similar to assertWarns() with the addition + that only warnings whose messages also match the regular expression + are considered successful matches. + + Args: + expected_warning: Warning class expected to be triggered. + expected_regex: Regex (re.Pattern object or string) expected + to be found in error message. + args: Function to be called and extra positional args. + kwargs: Extra kwargs. + msg: Optional message used in case of failure. Can only be used + when assertWarnsRegex is used as a context manager. + 'b'assertWarnsRegex'u'assertWarnsRegex'b'Fail the test unless the text matches the regular expression.'u'Fail the test unless the text matches the regular expression.'b'expected_regex must not be empty.'u'expected_regex must not be empty.'b'Regex didn't match: %r not found in %r'u'Regex didn't match: %r not found in %r'b'Fail the test if the text matches the regular expression.'u'Fail the test if the text matches the regular expression.'b'Regex matched: %r matches %r in %r'u'Regex matched: %r matches %r in %r'b'Please use {0} instead.'u'Please use {0} instead.'b'A test case that wraps a test function. + + This is useful for slipping pre-existing test functions into the + unittest framework. Optionally, set-up and tidy-up functions can be + supplied. As with TestCase, the tidy-up ('tearDown') function will + always be called if the set-up ('setUp') function ran successfully. + 'u'A test case that wraps a test function. + + This is useful for slipping pre-existing test functions into the + unittest framework. Optionally, set-up and tidy-up functions can be + supplied. As with TestCase, the tidy-up ('tearDown') function will + always be called if the set-up ('setUp') function ran successfully. + 'b'<%s tec=%s>'u'<%s tec=%s>'b'subtests cannot be run directly'u'subtests cannot be run directly'b'[{}]'u'[{}]'b'({})'u'({})'b'()'u'()'b'{} {}'u'{} {}'b'Returns a one-line description of the subtest, or None if no + description has been provided. + 'u'Returns a one-line description of the subtest, or None if no + description has been provided. + 'u'unittest.case'u'case'distutils.ccompiler + +Contains CCompiler, an abstract base class that defines the interface +for the Distutils compiler abstraction model.distutils.errorsdistutils.spawndistutils.file_utilmove_filedistutils.dir_utilmkpathdistutils.dep_utilnewer_pairwisenewer_groupdistutils.utilsplit_quotedexecuteCCompilerAbstract base class to define the interface that must be implemented + by real compiler classes. Also has some utility methods used by + several compiler classes. + + The basic idea behind a compiler abstraction class is that each + instance can be used for all the compile/link steps in building a + single project. Thus, attributes common to all of those compile and + link steps -- include directories, macros to define, libraries to link + against, etc. -- are attributes of the compiler instance. To allow for + variability in how individual files are treated, most of those + attributes may be varied on a per-compilation or per-link basis. + compiler_typesrc_extensionsobj_extensionstatic_lib_extensionshared_lib_extensionstatic_lib_formatshared_lib_formatexe_extension.cc++.cc.cpp.cxxobjc.mlanguage_maplanguage_orderoutput_dirmacrosinclude_dirslibrarieslibrary_dirsruntime_library_dirsset_executableset_executablesDefine the executables (and options for them) that will be run + to perform the various stages of compilation. The exact set of + executables that may be specified here depends on the compiler + class (via the 'executables' class attribute), but most will have: + compiler the C/C++ compiler + linker_so linker used to create shared objects and libraries + linker_exe linker used to create binary executables + archiver static library creator + + On platforms with a command-line (Unix, DOS/Windows), each of these + is a string that will be split into executable name and (optional) + list of arguments. (Splitting the string is done similarly to how + Unix shells operate: words are delimited by spaces, but quotes and + backslashes can override this. See + 'distutils.util.split_quoted()'.) + unknown executable '%s' for class %s_find_macrodefn_check_macro_definitionsdefinitionsEnsures that every element of 'definitions' is a valid macro + definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do + nothing if all definitions are OK, raise TypeError otherwise. + invalid macro definition '%s': must be tuple (string,), (string, string), or (string, None)define_macroDefine a preprocessor macro for all compilations driven by this + compiler object. The optional parameter 'value' should be a + string; if it is not supplied, then the macro will be defined + without an explicit value and the exact outcome depends on the + compiler used (XXX true? does ANSI say anything about this?) + undefine_macroUndefine a preprocessor macro for all compilations driven by + this compiler object. If the same macro is defined by + 'define_macro()' and undefined by 'undefine_macro()' the last call + takes precedence (including multiple redefinitions or + undefinitions). If the macro is redefined/undefined on a + per-compilation basis (ie. in the call to 'compile()'), then that + takes precedence. + undefnadd_include_dirAdd 'dir' to the list of directories that will be searched for + header files. The compiler is instructed to search directories in + the order in which they are supplied by successive calls to + 'add_include_dir()'. + set_include_dirsdirsSet the list of directories that will be searched to 'dirs' (a + list of strings). Overrides any preceding calls to + 'add_include_dir()'; subsequence calls to 'add_include_dir()' add + to the list passed to 'set_include_dirs()'. This does not affect + any list of standard include directories that the compiler may + search by default. + add_libraryAdd 'libname' to the list of libraries that will be included in + all links driven by this compiler object. Note that 'libname' + should *not* be the name of a file containing a library, but the + name of the library itself: the actual filename will be inferred by + the linker, the compiler, or the compiler class (depending on the + platform). + + The linker will be instructed to link against libraries in the + order they were supplied to 'add_library()' and/or + 'set_libraries()'. It is perfectly valid to duplicate library + names; the linker will be instructed to link against libraries as + many times as they are mentioned. + set_librariesSet the list of libraries to be included in all links driven by + this compiler object to 'libnames' (a list of strings). This does + not affect any standard system libraries that the linker may + include by default. + add_library_dirAdd 'dir' to the list of directories that will be searched for + libraries specified to 'add_library()' and 'set_libraries()'. The + linker will be instructed to search for libraries in the order they + are supplied to 'add_library_dir()' and/or 'set_library_dirs()'. + set_library_dirsSet the list of library search directories to 'dirs' (a list of + strings). This does not affect any standard library search path + that the linker may search by default. + add_runtime_library_dirAdd 'dir' to the list of directories that will be searched for + shared libraries at runtime. + set_runtime_library_dirsSet the list of directories to search for shared libraries at + runtime to 'dirs' (a list of strings). This does not affect any + standard search path that the runtime linker may search by + default. + add_link_objectAdd 'object' to the list of object files (or analogues, such as + explicitly named library files or the output of "resource + compilers") to be included in every link driven by this compiler + object. + set_link_objectsSet the list of object files (or analogues) to be included in + every link to 'objects'. This does not affect any standard object + files that the linker may include by default (such as system + libraries). + _setup_compileoutdirincdirssourcesdependsProcess arguments and decide which source files to compile.'output_dir' must be a string or None'macros' (if supplied) must be a list of tuples'include_dirs' (if supplied) must be a list of stringsobject_filenamesstrip_dirgen_preprocess_optionspp_optsbuildsrc_get_cc_args-g_fix_compile_argsTypecheck and fix-up some of the arguments to the 'compile()' + method, and return fixed-up values. Specifically: if 'output_dir' + is None, replaces it with 'self.output_dir'; ensures that 'macros' + is a list, and augments it with 'self.macros'; ensures that + 'include_dirs' is a list, and augments it with 'self.include_dirs'. + Guarantees that the returned values are of the correct type, + i.e. for 'output_dir' either string or None, and for 'macros' and + 'include_dirs' either list or None. + _prep_compileDecide which souce files must be recompiled. + + Determine the list of object files corresponding to 'sources', + and figure out which ones really need to be recompiled. + Return a list of all object files and a dictionary telling + which source files can be skipped. + _fix_object_argsTypecheck and fix up some arguments supplied to various methods. + Specifically: ensure that 'objects' is a list; if output_dir is + None, replace with self.output_dir. Return fixed versions of + 'objects' and 'output_dir'. + 'objects' must be a list or tuple of strings_fix_lib_argsTypecheck and fix up some of the arguments supplied to the + 'link_*' methods. Specifically: ensure that all arguments are + lists, and augment them with their permanent versions + (eg. 'self.libraries' augments 'libraries'). Return a tuple with + fixed versions of all arguments. + 'libraries' (if supplied) must be a list of strings'library_dirs' (if supplied) must be a list of strings'runtime_library_dirs' (if supplied) must be a list of strings"'runtime_library_dirs' (if supplied) ""must be a list of strings"_need_linkoutput_fileReturn true if we need to relink the files listed in 'objects' + to recreate 'output_file'. + newerdetect_languageDetect the language of a given file, or list of files. Uses + language_map, and language_order to do the job. + extlangextindexpreprocessextra_preargsextra_postargsPreprocess a single C/C++ source file, named in 'source'. + Output will be written to file named 'output_file', or stdout if + 'output_file' not supplied. 'macros' is a list of macro + definitions as for 'compile()', which will augment the macros set + with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a + list of directory names that will be added to the default list. + + Raises PreprocessError on failure. + Compile one or more source files. + + 'sources' must be a list of filenames, most likely C/C++ + files, but in reality anything that can be handled by a + particular compiler and compiler class (eg. MSVCCompiler can + handle resource files in 'sources'). Return a list of object + filenames, one per source filename in 'sources'. Depending on + the implementation, not all source files will necessarily be + compiled, but all corresponding object filenames will be + returned. + + If 'output_dir' is given, object files will be put under it, while + retaining their original path component. That is, "foo/bar.c" + normally compiles to "foo/bar.o" (for a Unix implementation); if + 'output_dir' is "build", then it would compile to + "build/foo/bar.o". + + 'macros', if given, must be a list of macro definitions. A macro + definition is either a (name, value) 2-tuple or a (name,) 1-tuple. + The former defines a macro; if the value is None, the macro is + defined without an explicit value. The 1-tuple case undefines a + macro. Later definitions/redefinitions/ undefinitions take + precedence. + + 'include_dirs', if given, must be a list of strings, the + directories to add to the default include file search path for this + compilation only. + + 'debug' is a boolean; if true, the compiler will be instructed to + output debug symbols in (or alongside) the object file(s). + + 'extra_preargs' and 'extra_postargs' are implementation- dependent. + On platforms that have the notion of a command-line (e.g. Unix, + DOS/Windows), they are most likely lists of strings: extra + command-line arguments to prepend/append to the compiler command + line. On other platforms, consult the implementation class + documentation. In any event, they are intended as an escape hatch + for those occasions when the abstract compiler framework doesn't + cut the mustard. + + 'depends', if given, is a list of filenames that all targets + depend on. If a source file is older than any file in + depends, then the source file will be recompiled. This + supports dependency tracking, but only at a coarse + granularity. + + Raises CompileError on failure. + _compileCompile 'src' to product 'obj'.create_static_liboutput_libnametarget_langLink a bunch of stuff together to create a static library file. + The "bunch of stuff" consists of the list of object files supplied + as 'objects', the extra object files supplied to + 'add_link_object()' and/or 'set_link_objects()', the libraries + supplied to 'add_library()' and/or 'set_libraries()', and the + libraries supplied as 'libraries' (if any). + + 'output_libname' should be a library name, not a filename; the + filename will be inferred from the library name. 'output_dir' is + the directory where the library file will be put. + + 'debug' is a boolean; if true, debugging information will be + included in the library (note that on most platforms, it is the + compile step where this matters: the 'debug' flag is included here + just for consistency). + + 'target_lang' is the target language for which the given objects + are being compiled. This allows specific linkage time treatment of + certain languages. + + Raises LibError on failure. + shared_objectSHARED_OBJECTshared_librarySHARED_LIBRARYEXECUTABLEtarget_descoutput_filenameexport_symbolsbuild_tempLink a bunch of stuff together to create an executable or + shared library file. + + The "bunch of stuff" consists of the list of object files supplied + as 'objects'. 'output_filename' should be a filename. If + 'output_dir' is supplied, 'output_filename' is relative to it + (i.e. 'output_filename' can provide directory components if + needed). + + 'libraries' is a list of libraries to link against. These are + library names, not filenames, since they're translated into + filenames in a platform-specific way (eg. "foo" becomes "libfoo.a" + on Unix and "foo.lib" on DOS/Windows). However, they can include a + directory component, which means the linker will look in that + specific directory rather than searching all the normal locations. + + 'library_dirs', if supplied, should be a list of directories to + search for libraries that were specified as bare library names + (ie. no directory component). These are on top of the system + default and those supplied to 'add_library_dir()' and/or + 'set_library_dirs()'. 'runtime_library_dirs' is a list of + directories that will be embedded into the shared library and used + to search for other shared libraries that *it* depends on at + run-time. (This may only be relevant on Unix.) + + 'export_symbols' is a list of symbols that the shared library will + export. (This appears to be relevant only on Windows.) + + 'debug' is as for 'compile()' and 'create_static_lib()', with the + slight distinction that it actually matters on most platforms (as + opposed to 'create_static_lib()', which includes a 'debug' flag + mostly for form's sake). + + 'extra_preargs' and 'extra_postargs' are as for 'compile()' (except + of course that they supply command-line arguments for the + particular linker being used). + + 'target_lang' is the target language for which the given objects + are being compiled. This allows specific linkage time treatment of + certain languages. + + Raises LinkError on failure. + link_shared_liblibrary_filenamelib_typelink_shared_objectlink_executableoutput_prognameexecutable_filenamelibrary_dir_optionReturn the compiler option to add 'dir' to the list of + directories searched for libraries. + runtime_library_dir_optionReturn the compiler option to add 'dir' to the list of + directories searched for runtime libraries. + library_optionReturn the compiler option to add 'lib' to the list of libraries + linked into the shared library or executable. + has_functionReturn a boolean indicating whether funcname is supported on + the current platform. The optional arguments can be used to + augment the compilation environment. + fnamefdopenincl#include "%s" +int main (int argc, char **argv) { + %s(); + return 0; +} +CompileErrora.outLinkErrorfind_library_fileSearch the specified list of directories for a static or shared + library file 'lib' and return the full path to that file. If + 'debug' true, look for a debugging version (if that makes sense on + the current platform). Return None if 'lib' wasn't found in any of + the specified directories. + source_filenamesobj_namessrc_namesplitdriveUnknownFileErrorunknown file type '%s' (from '%s')shared_object_filenamestaticdylibxcode_stub'lib_type' must be "static", "shared", "dylib", or "xcode_stub"_lib_format_lib_extensionannouncedebug_printdistutils.debugwarning: %s +0o777cygwin.*unixmsvc_default_compilersget_default_compilerDetermine the default compiler to use for the given platform. + + osname should be one of the standard Python OS names (i.e. the + ones returned by os.name) and platform the common value + returned by sys.platform for the platform in question. + + The default values are os.name and sys.platform in case the + parameters are not given. + unixccompilerUnixCCompilerstandard UNIX-style compiler_msvccompilerMSVCCompilerMicrosoft Visual C++cygwinccompilerCygwinCCompilerCygwin port of GNU C Compiler for Win32Mingw32CCompilerMingw32 port of GNU C Compiler for Win32mingw32bcppcompilerBCPPCompilerBorland C++ Compilerbcppcompiler_classshow_compilersPrint list of available compilers (used by the "--help-compiler" + options to "build", "build_ext", "build_clib"). + distutils.fancy_getoptFancyGetoptcompilerscompiler=pretty_printerList of available compilers:platGenerate an instance of some CCompiler subclass for the supplied + platform/compiler combination. 'plat' defaults to 'os.name' + (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler + for that platform. Currently only 'posix' and 'nt' are supported, and + the default compilers are "traditional Unix interface" (UnixCCompiler + class) and Visual C++ (MSVCCompiler class). Note that it's perfectly + possible to ask for a Unix compiler object under Windows, and a + Microsoft compiler object under Unix -- if you supply a value for + 'compiler', 'plat' is ignored. + class_namelong_descriptiondon't know how to compile C/C++ code on platform '%s' with '%s' compilerDistutilsPlatformErrordistutils.DistutilsModuleErrorcan't compile C/C++ code: unable to load module '%s'can't compile C/C++ code: unable to find class '%s' in module '%s'"can't compile C/C++ code: unable to find class '%s' ""in module '%s'"Generate C pre-processor options (-D, -U, -I) as used by at least + two types of compilers: the typical Unix compiler and Visual C++. + 'macros' is the usual thing, a list of 1- or 2-tuples, where (name,) + means undefine (-U) macro 'name', and (name,value) means define (-D) + macro 'name' to 'value'. 'include_dirs' is just a list of directory + names to be added to the header file search path (-I). Returns a list + of command-line options suitable for either Unix compilers or Visual + C++. + macrobad macro definition '%s': each element of 'macros' list must be a 1- or 2-tuple"bad macro definition '%s': ""each element of 'macros' list must be a 1- or 2-tuple"-U%s-D%s-D%s=%s-I%sgen_lib_optionsGenerate linker options for searching library directories and + linking with specific libraries. 'libraries' and 'library_dirs' are, + respectively, lists of library names (not filenames!) and search + directories. Returns a list of command-line options suitable for use + with some compiler (depending on the two format strings passed in). + lib_optslib_dirlib_namelib_fileno library file corresponding to '%s' found (skipping)"no library file corresponding to ""'%s' found (skipping)"# 'compiler_type' is a class attribute that identifies this class. It# keeps code that wants to know what kind of compiler it's dealing with# from having to import all possible compiler classes just to do an# 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'# should really, really be one of the keys of the 'compiler_class'# dictionary (see below -- used by the 'new_compiler()' factory# function) -- authors of new compiler interface classes are# responsible for updating 'compiler_class'!# XXX things not handled by this compiler abstraction model:# * client can't provide additional options for a compiler,# e.g. warning, optimization, debugging flags. Perhaps this# should be the domain of concrete compiler abstraction classes# (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base# class should have methods for the common ones.# * can't completely override the include or library searchg# path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".# I'm not sure how widely supported this is even by Unix# compilers, much less on other platforms. And I'm even less# sure how useful it is; maybe for cross-compiling, but# support for that is a ways off. (And anyways, cross# compilers probably have a dedicated binary with the# right paths compiled in. I hope.)# * can't do really freaky things with the library list/library# dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against# different versions of libfoo.a in different locations. I# think this is useless without the ability to null out the# library search path anyways.# Subclasses that rely on the standard filename generation methods# implemented below should override these; see the comment near# those methods ('object_filenames()' et. al.) for details:# list of strings# string# format string# prob. same as static_lib_format# Default language settings. language_map is used to detect a source# file or Extension target language, checking source filenames.# language_order is used to detect the language precedence, when deciding# what language to use when mixing source types. For example, if some# extension has two files with ".c" extension, and one with ".cpp", it# is still linked as c++.# 'output_dir': a common output directory for object, library,# shared object, and shared library files# 'macros': a list of macro definitions (or undefinitions). A# macro definition is a 2-tuple (name, value), where the value is# either a string or None (no explicit value). A macro# undefinition is a 1-tuple (name,).# 'include_dirs': a list of directories to search for include files# 'libraries': a list of libraries to include in any link# (library names, not filenames: eg. "foo" not "libfoo.a")# 'library_dirs': a list of directories to search for libraries# 'runtime_library_dirs': a list of directories to search for# shared libraries/objects at runtime# 'objects': a list of object files (or similar, such as explicitly# named library files) to include on any link# Note that some CCompiler implementation classes will define class# attributes 'cpp', 'cc', etc. with hard-coded executable names;# this is appropriate when a compiler class is for exactly one# compiler/OS combination (eg. MSVCCompiler). Other compiler# classes (UnixCCompiler, in particular) are driven by information# discovered at run-time, since there are many different ways to do# basically the same things with Unix C compilers.# -- Bookkeeping methods -------------------------------------------# Delete from the list of macro definitions/undefinitions if# already there (so that this one will take precedence).# -- Private utility methods --------------------------------------# (here for the convenience of subclasses)# Helper method to prep compiler in subclass compile() methods# Get the list of expected output (object) files# works for unixccompiler, cygwinccompiler# Return an empty dict for the "which source files can be skipped"# return value to preserve API compatibility.# -- Worker methods ------------------------------------------------# (must be implemented by subclasses)# A concrete compiler class can either override this method# entirely or implement _compile().# Return *all* object filenames, not just the ones we just built.# A concrete compiler class that does not override compile()# should implement _compile().# values for target_desc parameter in link()# Old 'link_*()' methods, rewritten to use the new 'link()' method.# -- Miscellaneous methods -----------------------------------------# These are all used by the 'gen_lib_options() function; there is# no appropriate default implementation so subclasses should# implement all of these.# this can't be included at module scope because it tries to# import math which might not be available at that point - maybe# the necessary logic should just be inlined?# -- Filename generation methods -----------------------------------# The default implementation of the filename generating methods are# prejudiced towards the Unix/DOS/Windows view of the world:# * object files are named by replacing the source file extension# (eg. .c/.cpp -> .o/.obj)# * library files (shared or static) are named by plugging the# library name and extension into a format string, eg.# "lib%s.%s" % (lib_name, ".a") for Unix static libraries# * executables are named by appending an extension (possibly# empty) to the program name: eg. progname + ".exe" for# Windows# To reduce redundant code, these methods expect to find# several attributes in the current object (presumably defined# as class attributes):# * src_extensions -# list of C/C++ source file extensions, eg. ['.c', '.cpp']# * obj_extension -# object file extension, eg. '.o' or '.obj'# * static_lib_extension -# extension for static library files, eg. '.a' or '.lib'# * shared_lib_extension -# extension for shared library/object files, eg. '.so', '.dll'# * static_lib_format -# format string for generating static library filenames,# eg. 'lib%s.%s' or '%s.%s'# * shared_lib_format# format string for generating shared library filenames# (probably same as static_lib_format, since the extension# is one of the intended parameters to the format string)# * exe_extension -# extension for executable files, eg. '' or '.exe'# Chop off the drive# If abs, chop off leading /# or 'shared'# -- Utility methods -----------------------------------------------# Map a sys.platform/os.name ('posix', 'nt') to the default compiler# type for that platform. Keys are interpreted as re match# patterns. Order is important; platform mappings are preferred over# OS names.# Platform string mappings# on a cygwin built python we can use gcc like an ordinary UNIXish# compiler# OS name mappings# Default to Unix compiler# Map compiler types to (module_name, class_name) pairs -- ie. where to# find the code that implements an interface to this compiler. (The module# is assumed to be in the 'distutils' package.)# XXX this "knows" that the compiler option it's describing is# "--compiler", which just happens to be the case for the three# commands that use it.# XXX The None is necessary to preserve backwards compatibility# with classes that expect verbose to be the first positional# argument.# XXX it would be nice (mainly aesthetic, and so we don't generate# stupid-looking command lines) to go over 'macros' and eliminate# redundant definitions/undefinitions (ie. ensure that only the# latest mention of a particular macro winds up on the command# line). I don't think it's essential, though, since most (all?)# Unix C compilers only pay attention to the latest -D or -U# mention of a macro on their command line. Similar situation for# 'include_dirs'. I'm punting on both for now. Anyways, weeding out# redundancies like this should probably be the province of# CCompiler, since the data structures used are inherited from it# and therefore common to all CCompiler classes.# undefine this macro# define with no explicit value# XXX *don't* need to be clever about quoting the# macro value here, because we're going to avoid the# shell at all costs when we spawn the command!# XXX it's important that we *not* remove redundant library mentions!# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to# resolve all symbols. I just hope we never have to say "-lfoo obj.o# -lbar" to get things to work -- that's certainly a possibility, but a# pretty nasty way to arrange your C code.b'distutils.ccompiler + +Contains CCompiler, an abstract base class that defines the interface +for the Distutils compiler abstraction model.'u'distutils.ccompiler + +Contains CCompiler, an abstract base class that defines the interface +for the Distutils compiler abstraction model.'b'Abstract base class to define the interface that must be implemented + by real compiler classes. Also has some utility methods used by + several compiler classes. + + The basic idea behind a compiler abstraction class is that each + instance can be used for all the compile/link steps in building a + single project. Thus, attributes common to all of those compile and + link steps -- include directories, macros to define, libraries to link + against, etc. -- are attributes of the compiler instance. To allow for + variability in how individual files are treated, most of those + attributes may be varied on a per-compilation or per-link basis. + 'u'Abstract base class to define the interface that must be implemented + by real compiler classes. Also has some utility methods used by + several compiler classes. + + The basic idea behind a compiler abstraction class is that each + instance can be used for all the compile/link steps in building a + single project. Thus, attributes common to all of those compile and + link steps -- include directories, macros to define, libraries to link + against, etc. -- are attributes of the compiler instance. To allow for + variability in how individual files are treated, most of those + attributes may be varied on a per-compilation or per-link basis. + 'b'.c'u'.c'b'c++'u'c++'b'.cc'u'.cc'b'.cpp'u'.cpp'b'.cxx'u'.cxx'b'objc'u'objc'b'.m'u'.m'b'Define the executables (and options for them) that will be run + to perform the various stages of compilation. The exact set of + executables that may be specified here depends on the compiler + class (via the 'executables' class attribute), but most will have: + compiler the C/C++ compiler + linker_so linker used to create shared objects and libraries + linker_exe linker used to create binary executables + archiver static library creator + + On platforms with a command-line (Unix, DOS/Windows), each of these + is a string that will be split into executable name and (optional) + list of arguments. (Splitting the string is done similarly to how + Unix shells operate: words are delimited by spaces, but quotes and + backslashes can override this. See + 'distutils.util.split_quoted()'.) + 'u'Define the executables (and options for them) that will be run + to perform the various stages of compilation. The exact set of + executables that may be specified here depends on the compiler + class (via the 'executables' class attribute), but most will have: + compiler the C/C++ compiler + linker_so linker used to create shared objects and libraries + linker_exe linker used to create binary executables + archiver static library creator + + On platforms with a command-line (Unix, DOS/Windows), each of these + is a string that will be split into executable name and (optional) + list of arguments. (Splitting the string is done similarly to how + Unix shells operate: words are delimited by spaces, but quotes and + backslashes can override this. See + 'distutils.util.split_quoted()'.) + 'b'unknown executable '%s' for class %s'u'unknown executable '%s' for class %s'b'Ensures that every element of 'definitions' is a valid macro + definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do + nothing if all definitions are OK, raise TypeError otherwise. + 'u'Ensures that every element of 'definitions' is a valid macro + definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do + nothing if all definitions are OK, raise TypeError otherwise. + 'b'invalid macro definition '%s': 'u'invalid macro definition '%s': 'b'must be tuple (string,), (string, string), or 'u'must be tuple (string,), (string, string), or 'b'(string, None)'u'(string, None)'b'Define a preprocessor macro for all compilations driven by this + compiler object. The optional parameter 'value' should be a + string; if it is not supplied, then the macro will be defined + without an explicit value and the exact outcome depends on the + compiler used (XXX true? does ANSI say anything about this?) + 'u'Define a preprocessor macro for all compilations driven by this + compiler object. The optional parameter 'value' should be a + string; if it is not supplied, then the macro will be defined + without an explicit value and the exact outcome depends on the + compiler used (XXX true? does ANSI say anything about this?) + 'b'Undefine a preprocessor macro for all compilations driven by + this compiler object. If the same macro is defined by + 'define_macro()' and undefined by 'undefine_macro()' the last call + takes precedence (including multiple redefinitions or + undefinitions). If the macro is redefined/undefined on a + per-compilation basis (ie. in the call to 'compile()'), then that + takes precedence. + 'u'Undefine a preprocessor macro for all compilations driven by + this compiler object. If the same macro is defined by + 'define_macro()' and undefined by 'undefine_macro()' the last call + takes precedence (including multiple redefinitions or + undefinitions). If the macro is redefined/undefined on a + per-compilation basis (ie. in the call to 'compile()'), then that + takes precedence. + 'b'Add 'dir' to the list of directories that will be searched for + header files. The compiler is instructed to search directories in + the order in which they are supplied by successive calls to + 'add_include_dir()'. + 'u'Add 'dir' to the list of directories that will be searched for + header files. The compiler is instructed to search directories in + the order in which they are supplied by successive calls to + 'add_include_dir()'. + 'b'Set the list of directories that will be searched to 'dirs' (a + list of strings). Overrides any preceding calls to + 'add_include_dir()'; subsequence calls to 'add_include_dir()' add + to the list passed to 'set_include_dirs()'. This does not affect + any list of standard include directories that the compiler may + search by default. + 'u'Set the list of directories that will be searched to 'dirs' (a + list of strings). Overrides any preceding calls to + 'add_include_dir()'; subsequence calls to 'add_include_dir()' add + to the list passed to 'set_include_dirs()'. This does not affect + any list of standard include directories that the compiler may + search by default. + 'b'Add 'libname' to the list of libraries that will be included in + all links driven by this compiler object. Note that 'libname' + should *not* be the name of a file containing a library, but the + name of the library itself: the actual filename will be inferred by + the linker, the compiler, or the compiler class (depending on the + platform). + + The linker will be instructed to link against libraries in the + order they were supplied to 'add_library()' and/or + 'set_libraries()'. It is perfectly valid to duplicate library + names; the linker will be instructed to link against libraries as + many times as they are mentioned. + 'u'Add 'libname' to the list of libraries that will be included in + all links driven by this compiler object. Note that 'libname' + should *not* be the name of a file containing a library, but the + name of the library itself: the actual filename will be inferred by + the linker, the compiler, or the compiler class (depending on the + platform). + + The linker will be instructed to link against libraries in the + order they were supplied to 'add_library()' and/or + 'set_libraries()'. It is perfectly valid to duplicate library + names; the linker will be instructed to link against libraries as + many times as they are mentioned. + 'b'Set the list of libraries to be included in all links driven by + this compiler object to 'libnames' (a list of strings). This does + not affect any standard system libraries that the linker may + include by default. + 'u'Set the list of libraries to be included in all links driven by + this compiler object to 'libnames' (a list of strings). This does + not affect any standard system libraries that the linker may + include by default. + 'b'Add 'dir' to the list of directories that will be searched for + libraries specified to 'add_library()' and 'set_libraries()'. The + linker will be instructed to search for libraries in the order they + are supplied to 'add_library_dir()' and/or 'set_library_dirs()'. + 'u'Add 'dir' to the list of directories that will be searched for + libraries specified to 'add_library()' and 'set_libraries()'. The + linker will be instructed to search for libraries in the order they + are supplied to 'add_library_dir()' and/or 'set_library_dirs()'. + 'b'Set the list of library search directories to 'dirs' (a list of + strings). This does not affect any standard library search path + that the linker may search by default. + 'u'Set the list of library search directories to 'dirs' (a list of + strings). This does not affect any standard library search path + that the linker may search by default. + 'b'Add 'dir' to the list of directories that will be searched for + shared libraries at runtime. + 'u'Add 'dir' to the list of directories that will be searched for + shared libraries at runtime. + 'b'Set the list of directories to search for shared libraries at + runtime to 'dirs' (a list of strings). This does not affect any + standard search path that the runtime linker may search by + default. + 'u'Set the list of directories to search for shared libraries at + runtime to 'dirs' (a list of strings). This does not affect any + standard search path that the runtime linker may search by + default. + 'b'Add 'object' to the list of object files (or analogues, such as + explicitly named library files or the output of "resource + compilers") to be included in every link driven by this compiler + object. + 'u'Add 'object' to the list of object files (or analogues, such as + explicitly named library files or the output of "resource + compilers") to be included in every link driven by this compiler + object. + 'b'Set the list of object files (or analogues) to be included in + every link to 'objects'. This does not affect any standard object + files that the linker may include by default (such as system + libraries). + 'u'Set the list of object files (or analogues) to be included in + every link to 'objects'. This does not affect any standard object + files that the linker may include by default (such as system + libraries). + 'b'Process arguments and decide which source files to compile.'u'Process arguments and decide which source files to compile.'b''output_dir' must be a string or None'u''output_dir' must be a string or None'b''macros' (if supplied) must be a list of tuples'u''macros' (if supplied) must be a list of tuples'b''include_dirs' (if supplied) must be a list of strings'u''include_dirs' (if supplied) must be a list of strings'b'-g'u'-g'b'Typecheck and fix-up some of the arguments to the 'compile()' + method, and return fixed-up values. Specifically: if 'output_dir' + is None, replaces it with 'self.output_dir'; ensures that 'macros' + is a list, and augments it with 'self.macros'; ensures that + 'include_dirs' is a list, and augments it with 'self.include_dirs'. + Guarantees that the returned values are of the correct type, + i.e. for 'output_dir' either string or None, and for 'macros' and + 'include_dirs' either list or None. + 'u'Typecheck and fix-up some of the arguments to the 'compile()' + method, and return fixed-up values. Specifically: if 'output_dir' + is None, replaces it with 'self.output_dir'; ensures that 'macros' + is a list, and augments it with 'self.macros'; ensures that + 'include_dirs' is a list, and augments it with 'self.include_dirs'. + Guarantees that the returned values are of the correct type, + i.e. for 'output_dir' either string or None, and for 'macros' and + 'include_dirs' either list or None. + 'b'Decide which souce files must be recompiled. + + Determine the list of object files corresponding to 'sources', + and figure out which ones really need to be recompiled. + Return a list of all object files and a dictionary telling + which source files can be skipped. + 'u'Decide which souce files must be recompiled. + + Determine the list of object files corresponding to 'sources', + and figure out which ones really need to be recompiled. + Return a list of all object files and a dictionary telling + which source files can be skipped. + 'b'Typecheck and fix up some arguments supplied to various methods. + Specifically: ensure that 'objects' is a list; if output_dir is + None, replace with self.output_dir. Return fixed versions of + 'objects' and 'output_dir'. + 'u'Typecheck and fix up some arguments supplied to various methods. + Specifically: ensure that 'objects' is a list; if output_dir is + None, replace with self.output_dir. Return fixed versions of + 'objects' and 'output_dir'. + 'b''objects' must be a list or tuple of strings'u''objects' must be a list or tuple of strings'b'Typecheck and fix up some of the arguments supplied to the + 'link_*' methods. Specifically: ensure that all arguments are + lists, and augment them with their permanent versions + (eg. 'self.libraries' augments 'libraries'). Return a tuple with + fixed versions of all arguments. + 'u'Typecheck and fix up some of the arguments supplied to the + 'link_*' methods. Specifically: ensure that all arguments are + lists, and augment them with their permanent versions + (eg. 'self.libraries' augments 'libraries'). Return a tuple with + fixed versions of all arguments. + 'b''libraries' (if supplied) must be a list of strings'u''libraries' (if supplied) must be a list of strings'b''library_dirs' (if supplied) must be a list of strings'u''library_dirs' (if supplied) must be a list of strings'b''runtime_library_dirs' (if supplied) must be a list of strings'u''runtime_library_dirs' (if supplied) must be a list of strings'b'Return true if we need to relink the files listed in 'objects' + to recreate 'output_file'. + 'u'Return true if we need to relink the files listed in 'objects' + to recreate 'output_file'. + 'b'newer'u'newer'b'Detect the language of a given file, or list of files. Uses + language_map, and language_order to do the job. + 'u'Detect the language of a given file, or list of files. Uses + language_map, and language_order to do the job. + 'b'Preprocess a single C/C++ source file, named in 'source'. + Output will be written to file named 'output_file', or stdout if + 'output_file' not supplied. 'macros' is a list of macro + definitions as for 'compile()', which will augment the macros set + with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a + list of directory names that will be added to the default list. + + Raises PreprocessError on failure. + 'u'Preprocess a single C/C++ source file, named in 'source'. + Output will be written to file named 'output_file', or stdout if + 'output_file' not supplied. 'macros' is a list of macro + definitions as for 'compile()', which will augment the macros set + with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a + list of directory names that will be added to the default list. + + Raises PreprocessError on failure. + 'b'Compile one or more source files. + + 'sources' must be a list of filenames, most likely C/C++ + files, but in reality anything that can be handled by a + particular compiler and compiler class (eg. MSVCCompiler can + handle resource files in 'sources'). Return a list of object + filenames, one per source filename in 'sources'. Depending on + the implementation, not all source files will necessarily be + compiled, but all corresponding object filenames will be + returned. + + If 'output_dir' is given, object files will be put under it, while + retaining their original path component. That is, "foo/bar.c" + normally compiles to "foo/bar.o" (for a Unix implementation); if + 'output_dir' is "build", then it would compile to + "build/foo/bar.o". + + 'macros', if given, must be a list of macro definitions. A macro + definition is either a (name, value) 2-tuple or a (name,) 1-tuple. + The former defines a macro; if the value is None, the macro is + defined without an explicit value. The 1-tuple case undefines a + macro. Later definitions/redefinitions/ undefinitions take + precedence. + + 'include_dirs', if given, must be a list of strings, the + directories to add to the default include file search path for this + compilation only. + + 'debug' is a boolean; if true, the compiler will be instructed to + output debug symbols in (or alongside) the object file(s). + + 'extra_preargs' and 'extra_postargs' are implementation- dependent. + On platforms that have the notion of a command-line (e.g. Unix, + DOS/Windows), they are most likely lists of strings: extra + command-line arguments to prepend/append to the compiler command + line. On other platforms, consult the implementation class + documentation. In any event, they are intended as an escape hatch + for those occasions when the abstract compiler framework doesn't + cut the mustard. + + 'depends', if given, is a list of filenames that all targets + depend on. If a source file is older than any file in + depends, then the source file will be recompiled. This + supports dependency tracking, but only at a coarse + granularity. + + Raises CompileError on failure. + 'u'Compile one or more source files. + + 'sources' must be a list of filenames, most likely C/C++ + files, but in reality anything that can be handled by a + particular compiler and compiler class (eg. MSVCCompiler can + handle resource files in 'sources'). Return a list of object + filenames, one per source filename in 'sources'. Depending on + the implementation, not all source files will necessarily be + compiled, but all corresponding object filenames will be + returned. + + If 'output_dir' is given, object files will be put under it, while + retaining their original path component. That is, "foo/bar.c" + normally compiles to "foo/bar.o" (for a Unix implementation); if + 'output_dir' is "build", then it would compile to + "build/foo/bar.o". + + 'macros', if given, must be a list of macro definitions. A macro + definition is either a (name, value) 2-tuple or a (name,) 1-tuple. + The former defines a macro; if the value is None, the macro is + defined without an explicit value. The 1-tuple case undefines a + macro. Later definitions/redefinitions/ undefinitions take + precedence. + + 'include_dirs', if given, must be a list of strings, the + directories to add to the default include file search path for this + compilation only. + + 'debug' is a boolean; if true, the compiler will be instructed to + output debug symbols in (or alongside) the object file(s). + + 'extra_preargs' and 'extra_postargs' are implementation- dependent. + On platforms that have the notion of a command-line (e.g. Unix, + DOS/Windows), they are most likely lists of strings: extra + command-line arguments to prepend/append to the compiler command + line. On other platforms, consult the implementation class + documentation. In any event, they are intended as an escape hatch + for those occasions when the abstract compiler framework doesn't + cut the mustard. + + 'depends', if given, is a list of filenames that all targets + depend on. If a source file is older than any file in + depends, then the source file will be recompiled. This + supports dependency tracking, but only at a coarse + granularity. + + Raises CompileError on failure. + 'b'Compile 'src' to product 'obj'.'u'Compile 'src' to product 'obj'.'b'Link a bunch of stuff together to create a static library file. + The "bunch of stuff" consists of the list of object files supplied + as 'objects', the extra object files supplied to + 'add_link_object()' and/or 'set_link_objects()', the libraries + supplied to 'add_library()' and/or 'set_libraries()', and the + libraries supplied as 'libraries' (if any). + + 'output_libname' should be a library name, not a filename; the + filename will be inferred from the library name. 'output_dir' is + the directory where the library file will be put. + + 'debug' is a boolean; if true, debugging information will be + included in the library (note that on most platforms, it is the + compile step where this matters: the 'debug' flag is included here + just for consistency). + + 'target_lang' is the target language for which the given objects + are being compiled. This allows specific linkage time treatment of + certain languages. + + Raises LibError on failure. + 'u'Link a bunch of stuff together to create a static library file. + The "bunch of stuff" consists of the list of object files supplied + as 'objects', the extra object files supplied to + 'add_link_object()' and/or 'set_link_objects()', the libraries + supplied to 'add_library()' and/or 'set_libraries()', and the + libraries supplied as 'libraries' (if any). + + 'output_libname' should be a library name, not a filename; the + filename will be inferred from the library name. 'output_dir' is + the directory where the library file will be put. + + 'debug' is a boolean; if true, debugging information will be + included in the library (note that on most platforms, it is the + compile step where this matters: the 'debug' flag is included here + just for consistency). + + 'target_lang' is the target language for which the given objects + are being compiled. This allows specific linkage time treatment of + certain languages. + + Raises LibError on failure. + 'b'shared_object'u'shared_object'b'shared_library'u'shared_library'b'executable'u'executable'b'Link a bunch of stuff together to create an executable or + shared library file. + + The "bunch of stuff" consists of the list of object files supplied + as 'objects'. 'output_filename' should be a filename. If + 'output_dir' is supplied, 'output_filename' is relative to it + (i.e. 'output_filename' can provide directory components if + needed). + + 'libraries' is a list of libraries to link against. These are + library names, not filenames, since they're translated into + filenames in a platform-specific way (eg. "foo" becomes "libfoo.a" + on Unix and "foo.lib" on DOS/Windows). However, they can include a + directory component, which means the linker will look in that + specific directory rather than searching all the normal locations. + + 'library_dirs', if supplied, should be a list of directories to + search for libraries that were specified as bare library names + (ie. no directory component). These are on top of the system + default and those supplied to 'add_library_dir()' and/or + 'set_library_dirs()'. 'runtime_library_dirs' is a list of + directories that will be embedded into the shared library and used + to search for other shared libraries that *it* depends on at + run-time. (This may only be relevant on Unix.) + + 'export_symbols' is a list of symbols that the shared library will + export. (This appears to be relevant only on Windows.) + + 'debug' is as for 'compile()' and 'create_static_lib()', with the + slight distinction that it actually matters on most platforms (as + opposed to 'create_static_lib()', which includes a 'debug' flag + mostly for form's sake). + + 'extra_preargs' and 'extra_postargs' are as for 'compile()' (except + of course that they supply command-line arguments for the + particular linker being used). + + 'target_lang' is the target language for which the given objects + are being compiled. This allows specific linkage time treatment of + certain languages. + + Raises LinkError on failure. + 'u'Link a bunch of stuff together to create an executable or + shared library file. + + The "bunch of stuff" consists of the list of object files supplied + as 'objects'. 'output_filename' should be a filename. If + 'output_dir' is supplied, 'output_filename' is relative to it + (i.e. 'output_filename' can provide directory components if + needed). + + 'libraries' is a list of libraries to link against. These are + library names, not filenames, since they're translated into + filenames in a platform-specific way (eg. "foo" becomes "libfoo.a" + on Unix and "foo.lib" on DOS/Windows). However, they can include a + directory component, which means the linker will look in that + specific directory rather than searching all the normal locations. + + 'library_dirs', if supplied, should be a list of directories to + search for libraries that were specified as bare library names + (ie. no directory component). These are on top of the system + default and those supplied to 'add_library_dir()' and/or + 'set_library_dirs()'. 'runtime_library_dirs' is a list of + directories that will be embedded into the shared library and used + to search for other shared libraries that *it* depends on at + run-time. (This may only be relevant on Unix.) + + 'export_symbols' is a list of symbols that the shared library will + export. (This appears to be relevant only on Windows.) + + 'debug' is as for 'compile()' and 'create_static_lib()', with the + slight distinction that it actually matters on most platforms (as + opposed to 'create_static_lib()', which includes a 'debug' flag + mostly for form's sake). + + 'extra_preargs' and 'extra_postargs' are as for 'compile()' (except + of course that they supply command-line arguments for the + particular linker being used). + + 'target_lang' is the target language for which the given objects + are being compiled. This allows specific linkage time treatment of + certain languages. + + Raises LinkError on failure. + 'b'shared'u'shared'b'Return the compiler option to add 'dir' to the list of + directories searched for libraries. + 'u'Return the compiler option to add 'dir' to the list of + directories searched for libraries. + 'b'Return the compiler option to add 'dir' to the list of + directories searched for runtime libraries. + 'u'Return the compiler option to add 'dir' to the list of + directories searched for runtime libraries. + 'b'Return the compiler option to add 'lib' to the list of libraries + linked into the shared library or executable. + 'u'Return the compiler option to add 'lib' to the list of libraries + linked into the shared library or executable. + 'b'Return a boolean indicating whether funcname is supported on + the current platform. The optional arguments can be used to + augment the compilation environment. + 'u'Return a boolean indicating whether funcname is supported on + the current platform. The optional arguments can be used to + augment the compilation environment. + 'b'#include "%s" +'u'#include "%s" +'b'int main (int argc, char **argv) { + %s(); + return 0; +} +'u'int main (int argc, char **argv) { + %s(); + return 0; +} +'b'a.out'u'a.out'b'Search the specified list of directories for a static or shared + library file 'lib' and return the full path to that file. If + 'debug' true, look for a debugging version (if that makes sense on + the current platform). Return None if 'lib' wasn't found in any of + the specified directories. + 'u'Search the specified list of directories for a static or shared + library file 'lib' and return the full path to that file. If + 'debug' true, look for a debugging version (if that makes sense on + the current platform). Return None if 'lib' wasn't found in any of + the specified directories. + 'b'unknown file type '%s' (from '%s')'u'unknown file type '%s' (from '%s')'b'static'u'static'b'dylib'u'dylib'b'xcode_stub'u'xcode_stub'b''lib_type' must be "static", "shared", "dylib", or "xcode_stub"'u''lib_type' must be "static", "shared", "dylib", or "xcode_stub"'b'_lib_format'u'_lib_format'b'_lib_extension'u'_lib_extension'b'warning: %s +'u'warning: %s +'b'cygwin.*'u'cygwin.*'b'unix'u'unix'b'msvc'u'msvc'b'Determine the default compiler to use for the given platform. + + osname should be one of the standard Python OS names (i.e. the + ones returned by os.name) and platform the common value + returned by sys.platform for the platform in question. + + The default values are os.name and sys.platform in case the + parameters are not given. + 'u'Determine the default compiler to use for the given platform. + + osname should be one of the standard Python OS names (i.e. the + ones returned by os.name) and platform the common value + returned by sys.platform for the platform in question. + + The default values are os.name and sys.platform in case the + parameters are not given. + 'b'unixccompiler'u'unixccompiler'b'UnixCCompiler'u'UnixCCompiler'b'standard UNIX-style compiler'u'standard UNIX-style compiler'b'_msvccompiler'u'_msvccompiler'b'MSVCCompiler'u'MSVCCompiler'b'Microsoft Visual C++'u'Microsoft Visual C++'b'cygwinccompiler'u'cygwinccompiler'b'CygwinCCompiler'u'CygwinCCompiler'b'Cygwin port of GNU C Compiler for Win32'u'Cygwin port of GNU C Compiler for Win32'b'Mingw32CCompiler'u'Mingw32CCompiler'b'Mingw32 port of GNU C Compiler for Win32'u'Mingw32 port of GNU C Compiler for Win32'b'mingw32'u'mingw32'b'bcppcompiler'u'bcppcompiler'b'BCPPCompiler'u'BCPPCompiler'b'Borland C++ Compiler'u'Borland C++ Compiler'b'bcpp'u'bcpp'b'Print list of available compilers (used by the "--help-compiler" + options to "build", "build_ext", "build_clib"). + 'u'Print list of available compilers (used by the "--help-compiler" + options to "build", "build_ext", "build_clib"). + 'b'compiler='u'compiler='b'List of available compilers:'u'List of available compilers:'b'Generate an instance of some CCompiler subclass for the supplied + platform/compiler combination. 'plat' defaults to 'os.name' + (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler + for that platform. Currently only 'posix' and 'nt' are supported, and + the default compilers are "traditional Unix interface" (UnixCCompiler + class) and Visual C++ (MSVCCompiler class). Note that it's perfectly + possible to ask for a Unix compiler object under Windows, and a + Microsoft compiler object under Unix -- if you supply a value for + 'compiler', 'plat' is ignored. + 'u'Generate an instance of some CCompiler subclass for the supplied + platform/compiler combination. 'plat' defaults to 'os.name' + (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler + for that platform. Currently only 'posix' and 'nt' are supported, and + the default compilers are "traditional Unix interface" (UnixCCompiler + class) and Visual C++ (MSVCCompiler class). Note that it's perfectly + possible to ask for a Unix compiler object under Windows, and a + Microsoft compiler object under Unix -- if you supply a value for + 'compiler', 'plat' is ignored. + 'b'don't know how to compile C/C++ code on platform '%s''u'don't know how to compile C/C++ code on platform '%s''b' with '%s' compiler'u' with '%s' compiler'b'distutils.'u'distutils.'b'can't compile C/C++ code: unable to load module '%s''u'can't compile C/C++ code: unable to load module '%s''b'can't compile C/C++ code: unable to find class '%s' in module '%s''u'can't compile C/C++ code: unable to find class '%s' in module '%s''b'Generate C pre-processor options (-D, -U, -I) as used by at least + two types of compilers: the typical Unix compiler and Visual C++. + 'macros' is the usual thing, a list of 1- or 2-tuples, where (name,) + means undefine (-U) macro 'name', and (name,value) means define (-D) + macro 'name' to 'value'. 'include_dirs' is just a list of directory + names to be added to the header file search path (-I). Returns a list + of command-line options suitable for either Unix compilers or Visual + C++. + 'u'Generate C pre-processor options (-D, -U, -I) as used by at least + two types of compilers: the typical Unix compiler and Visual C++. + 'macros' is the usual thing, a list of 1- or 2-tuples, where (name,) + means undefine (-U) macro 'name', and (name,value) means define (-D) + macro 'name' to 'value'. 'include_dirs' is just a list of directory + names to be added to the header file search path (-I). Returns a list + of command-line options suitable for either Unix compilers or Visual + C++. + 'b'bad macro definition '%s': each element of 'macros' list must be a 1- or 2-tuple'u'bad macro definition '%s': each element of 'macros' list must be a 1- or 2-tuple'b'-U%s'u'-U%s'b'-D%s'u'-D%s'b'-D%s=%s'u'-D%s=%s'b'-I%s'u'-I%s'b'Generate linker options for searching library directories and + linking with specific libraries. 'libraries' and 'library_dirs' are, + respectively, lists of library names (not filenames!) and search + directories. Returns a list of command-line options suitable for use + with some compiler (depending on the two format strings passed in). + 'u'Generate linker options for searching library directories and + linking with specific libraries. 'libraries' and 'library_dirs' are, + respectively, lists of library names (not filenames!) and search + directories. Returns a list of command-line options suitable for use + with some compiler (depending on the two format strings passed in). + 'b'no library file corresponding to '%s' found (skipping)'u'no library file corresponding to '%s' found (skipping)'u'distutils.ccompiler'u'ccompiler'Charsetadd_aliasadd_charsetadd_codecemail.base64mimeemail.quoprimimeemail.encodersencode_7or8bitQPBASE64SHORTESTRFC2047_CHROME_LENDEFAULT_CHARSETiso-8859-2iso-8859-3iso-8859-4iso-8859-9iso-8859-10iso-8859-13iso-8859-14iso-8859-15iso-8859-16windows-1252visciiiso-2022-jpeuc-jpkoi8-rCHARSETSlatin-1latin_2latin-2latin_3latin-3latin_4latin-4latin_5latin-5latin_6latin-6latin_7latin-7latin_8latin-8latin_9latin-9latin_10latin-10ks_c_5601-1987euc-krALIASESCODEC_MAPheader_encbody_encoutput_charsetAdd character set properties to the global registry. + + charset is the input character set, and must be the canonical name of a + character set. + + Optional header_enc and body_enc is either Charset.QP for + quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for + the shortest of qp or base64 encoding, or None for no encoding. SHORTEST + is only valid for header_enc. It describes how message headers and + message bodies in the input charset are to be encoded. Default is no + encoding. + + Optional output_charset is the character set that the output should be + in. Conversions will proceed from input charset, to Unicode, to the + output charset when the method Charset.convert() is called. The default + is to output in the same character set as the input. + + Both input_charset and output_charset must have Unicode codec entries in + the module's charset-to-codec mapping; use add_codec(charset, codecname) + to add codecs the module does not know about. See the codecs module's + documentation for more information. + SHORTEST not allowed for body_encAdd a character set alias. + + alias is the alias name, e.g. latin-1 + canonical is the character set's canonical name, e.g. iso-8859-1 + codecnameAdd a codec that map characters in the given charset to/from Unicode. + + charset is the canonical name of a character set. codecname is the name + of a Python codec, as appropriate for the second argument to the unicode() + built-in, or to the encode() method of a Unicode string. + _encodecodecMap character sets to their email properties. + + This class provides information about the requirements imposed on email + for a specific character set. It also provides convenience routines for + converting between character sets, given the availability of the + applicable codecs. Given a character set, it will do its best to provide + information on how to use that character set in an email in an + RFC-compliant way. + + Certain character sets must be encoded with quoted-printable or base64 + when used in email headers or bodies. Certain character sets must be + converted outright, and are not allowed in email. Instances of this + module expose the following information about a character set: + + input_charset: The initial character set specified. Common aliases + are converted to their `official' email names (e.g. latin_1 + is converted to iso-8859-1). Defaults to 7-bit us-ascii. + + header_encoding: If the character set must be encoded before it can be + used in an email header, this attribute will be set to + Charset.QP (for quoted-printable), Charset.BASE64 (for + base64 encoding), or Charset.SHORTEST for the shortest of + QP or BASE64 encoding. Otherwise, it will be None. + + body_encoding: Same as header_encoding, but describes the encoding for the + mail message's body, which indeed may be different than the + header encoding. Charset.SHORTEST is not allowed for + body_encoding. + + output_charset: Some character sets must be converted before they can be + used in email headers or bodies. If the input_charset is + one of them, this attribute will contain the name of the + charset output will be converted to. Otherwise, it will + be None. + + input_codec: The name of the Python codec used to convert the + input_charset to Unicode. If no conversion codec is + necessary, this attribute will be None. + + output_codec: The name of the Python codec used to convert Unicode + to the output_charset. If no conversion codec is necessary, + this attribute will have the same value as the input_codec. + input_charsethencbencheader_encodingbody_encodinginput_codecoutput_codecget_body_encodingReturn the content-transfer-encoding used for body encoding. + + This is either the string `quoted-printable' or `base64' depending on + the encoding used, or it is a function in which case you should call + the function with a single argument, the Message object being + encoded. The function should then set the Content-Transfer-Encoding + header itself to whatever is appropriate. + + Returns "quoted-printable" if self.body_encoding is QP. + Returns "base64" if self.body_encoding is BASE64. + Returns conversion function otherwise. + quoted-printableget_output_charsetReturn the output character set. + + This is self.output_charset if that is not None, otherwise it is + self.input_charset. + Header-encode a string by converting it first to bytes. + + The type of encoding (base64 or quoted-printable) will be based on + this charset's `header_encoding`. + + :param string: A unicode string for the header. It must be possible + to encode this string to bytes using the character set's + output codec. + :return: The encoded string, with RFC 2047 chrome. + _get_encoderencoder_moduleheader_encode_linesmaxlengthsHeader-encode a string by converting it first to bytes. + + This is similar to `header_encode()` except that the string is fit + into maximum line lengths as given by the argument. + + :param string: A unicode string for the header. It must be possible + to encode this string to bytes using the character set's + output codec. + :param maxlengths: Maximum line length iterator. Each element + returned from this iterator will provide the next maximum line + length. This parameter is used as an argument to built-in next() + and should never be exhausted. The maximum line lengths should + not count the RFC 2047 chrome. These line lengths are only a + hint; the splitter does the best it can. + :return: Lines of encoded strings, each with RFC 2047 chrome. + encodercurrent_linethis_linejoined_linelen64lenqpBody-encode a string by converting it first to bytes. + + The type of encoding (base64 or quoted-printable) will be based on + self.body_encoding. If body_encoding is None, we assume the + output charset is a 7bit encoding, so re-encoding the decoded + string using the ascii codec produces the correct string version + of the content. + # Author: Ben Gertzfield, Barry Warsaw# Flags for types of header encodings# Quoted-Printable# the shorter of QP and base64, but only for headers# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7# Defaults# input header enc body enc output conv# iso-8859-5 is Cyrillic, and not especially used# iso-8859-6 is Arabic, also not particularly used# iso-8859-7 is Greek, QP will not make it readable# iso-8859-8 is Hebrew, QP will not make it readable# iso-8859-11 is Thai, QP will not make it readable# Aliases for other commonly-used names for character sets. Map# them to the real ones used in email.# Map charsets to their Unicode codec strings.# Hack: We don't want *any* conversion for stuff marked us-ascii, as all# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.# Let that stuff pass through without conversion to/from Unicode.# Convenience functions for extending the above mappings# Convenience function for encoding strings, taking into account# that they might be unknown-8bit (ie: have surrogate-escaped bytes)# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to# unicode because its .lower() is locale insensitive. If the argument# is already a unicode, we leave it at that, but ensure that the# charset is ASCII, as the standard (RFC XXX) requires.# Set the input charset after filtering through the aliases# We can try to guess which encoding and conversion to use by the# charset_map dictionary. Try that first, but let the user override# it.# Set the attributes, allowing the arguments to override the default.# Now set the codecs. If one isn't defined for input_charset,# guess and try a Unicode codec with the same name as input_codec.# 7bit/8bit encodings return the string unchanged (modulo conversions)# See which encoding we should use.# Calculate the number of characters that the RFC 2047 chrome will# contribute to each line.# Now comes the hard part. We must encode bytes but we can't split on# bytes because some character sets are variable length and each# encoded word must stand on its own. So the problem is you have to# encode to bytes to figure out this word's length, but you must split# on characters. This causes two problems: first, we don't know how# many octets a specific substring of unicode characters will get# encoded to, and second, we don't know how many ASCII characters# those octets will get encoded to. Unless we try it. Which seems# inefficient. In the interest of being correct rather than fast (and# in the hope that there will be few encoded headers in any such# message), brute force it. :(# This last character doesn't fit so pop it off.# Does nothing fit on the first line?# quopromime.body_encode takes a string, but operates on it as if# it were a list of byte codes. For a (minimal) history on why# this is so, see changeset 0cf700464177. To correctly encode a# character set, then, we must turn it into pseudo bytes via the# latin1 charset, which will encode any byte as a single code point# between 0 and 255, which is what body_encode is expecting.b'Charset'u'Charset'b'add_alias'u'add_alias'b'add_charset'u'add_charset'b'add_codec'u'add_codec'b'iso-8859-2'u'iso-8859-2'b'iso-8859-3'u'iso-8859-3'b'iso-8859-4'u'iso-8859-4'b'iso-8859-9'u'iso-8859-9'b'iso-8859-10'u'iso-8859-10'b'iso-8859-13'u'iso-8859-13'b'iso-8859-14'u'iso-8859-14'b'iso-8859-15'u'iso-8859-15'b'iso-8859-16'u'iso-8859-16'b'windows-1252'u'windows-1252'b'viscii'u'viscii'b'iso-2022-jp'u'iso-2022-jp'b'euc-jp'u'euc-jp'b'koi8-r'u'koi8-r'b'latin-1'u'latin-1'b'latin_2'u'latin_2'b'latin-2'u'latin-2'b'latin_3'u'latin_3'b'latin-3'u'latin-3'b'latin_4'u'latin_4'b'latin-4'u'latin-4'b'latin_5'u'latin_5'b'latin-5'u'latin-5'b'latin_6'u'latin_6'b'latin-6'u'latin-6'b'latin_7'u'latin_7'b'latin-7'u'latin-7'b'latin_8'u'latin_8'b'latin-8'u'latin-8'b'latin_9'u'latin_9'b'latin-9'u'latin-9'b'latin_10'u'latin_10'b'latin-10'u'latin-10'b'ks_c_5601-1987'u'ks_c_5601-1987'b'euc-kr'u'euc-kr'b'Add character set properties to the global registry. + + charset is the input character set, and must be the canonical name of a + character set. + + Optional header_enc and body_enc is either Charset.QP for + quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for + the shortest of qp or base64 encoding, or None for no encoding. SHORTEST + is only valid for header_enc. It describes how message headers and + message bodies in the input charset are to be encoded. Default is no + encoding. + + Optional output_charset is the character set that the output should be + in. Conversions will proceed from input charset, to Unicode, to the + output charset when the method Charset.convert() is called. The default + is to output in the same character set as the input. + + Both input_charset and output_charset must have Unicode codec entries in + the module's charset-to-codec mapping; use add_codec(charset, codecname) + to add codecs the module does not know about. See the codecs module's + documentation for more information. + 'u'Add character set properties to the global registry. + + charset is the input character set, and must be the canonical name of a + character set. + + Optional header_enc and body_enc is either Charset.QP for + quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for + the shortest of qp or base64 encoding, or None for no encoding. SHORTEST + is only valid for header_enc. It describes how message headers and + message bodies in the input charset are to be encoded. Default is no + encoding. + + Optional output_charset is the character set that the output should be + in. Conversions will proceed from input charset, to Unicode, to the + output charset when the method Charset.convert() is called. The default + is to output in the same character set as the input. + + Both input_charset and output_charset must have Unicode codec entries in + the module's charset-to-codec mapping; use add_codec(charset, codecname) + to add codecs the module does not know about. See the codecs module's + documentation for more information. + 'b'SHORTEST not allowed for body_enc'u'SHORTEST not allowed for body_enc'b'Add a character set alias. + + alias is the alias name, e.g. latin-1 + canonical is the character set's canonical name, e.g. iso-8859-1 + 'u'Add a character set alias. + + alias is the alias name, e.g. latin-1 + canonical is the character set's canonical name, e.g. iso-8859-1 + 'b'Add a codec that map characters in the given charset to/from Unicode. + + charset is the canonical name of a character set. codecname is the name + of a Python codec, as appropriate for the second argument to the unicode() + built-in, or to the encode() method of a Unicode string. + 'u'Add a codec that map characters in the given charset to/from Unicode. + + charset is the canonical name of a character set. codecname is the name + of a Python codec, as appropriate for the second argument to the unicode() + built-in, or to the encode() method of a Unicode string. + 'b'Map character sets to their email properties. + + This class provides information about the requirements imposed on email + for a specific character set. It also provides convenience routines for + converting between character sets, given the availability of the + applicable codecs. Given a character set, it will do its best to provide + information on how to use that character set in an email in an + RFC-compliant way. + + Certain character sets must be encoded with quoted-printable or base64 + when used in email headers or bodies. Certain character sets must be + converted outright, and are not allowed in email. Instances of this + module expose the following information about a character set: + + input_charset: The initial character set specified. Common aliases + are converted to their `official' email names (e.g. latin_1 + is converted to iso-8859-1). Defaults to 7-bit us-ascii. + + header_encoding: If the character set must be encoded before it can be + used in an email header, this attribute will be set to + Charset.QP (for quoted-printable), Charset.BASE64 (for + base64 encoding), or Charset.SHORTEST for the shortest of + QP or BASE64 encoding. Otherwise, it will be None. + + body_encoding: Same as header_encoding, but describes the encoding for the + mail message's body, which indeed may be different than the + header encoding. Charset.SHORTEST is not allowed for + body_encoding. + + output_charset: Some character sets must be converted before they can be + used in email headers or bodies. If the input_charset is + one of them, this attribute will contain the name of the + charset output will be converted to. Otherwise, it will + be None. + + input_codec: The name of the Python codec used to convert the + input_charset to Unicode. If no conversion codec is + necessary, this attribute will be None. + + output_codec: The name of the Python codec used to convert Unicode + to the output_charset. If no conversion codec is necessary, + this attribute will have the same value as the input_codec. + 'u'Map character sets to their email properties. + + This class provides information about the requirements imposed on email + for a specific character set. It also provides convenience routines for + converting between character sets, given the availability of the + applicable codecs. Given a character set, it will do its best to provide + information on how to use that character set in an email in an + RFC-compliant way. + + Certain character sets must be encoded with quoted-printable or base64 + when used in email headers or bodies. Certain character sets must be + converted outright, and are not allowed in email. Instances of this + module expose the following information about a character set: + + input_charset: The initial character set specified. Common aliases + are converted to their `official' email names (e.g. latin_1 + is converted to iso-8859-1). Defaults to 7-bit us-ascii. + + header_encoding: If the character set must be encoded before it can be + used in an email header, this attribute will be set to + Charset.QP (for quoted-printable), Charset.BASE64 (for + base64 encoding), or Charset.SHORTEST for the shortest of + QP or BASE64 encoding. Otherwise, it will be None. + + body_encoding: Same as header_encoding, but describes the encoding for the + mail message's body, which indeed may be different than the + header encoding. Charset.SHORTEST is not allowed for + body_encoding. + + output_charset: Some character sets must be converted before they can be + used in email headers or bodies. If the input_charset is + one of them, this attribute will contain the name of the + charset output will be converted to. Otherwise, it will + be None. + + input_codec: The name of the Python codec used to convert the + input_charset to Unicode. If no conversion codec is + necessary, this attribute will be None. + + output_codec: The name of the Python codec used to convert Unicode + to the output_charset. If no conversion codec is necessary, + this attribute will have the same value as the input_codec. + 'b'Return the content-transfer-encoding used for body encoding. + + This is either the string `quoted-printable' or `base64' depending on + the encoding used, or it is a function in which case you should call + the function with a single argument, the Message object being + encoded. The function should then set the Content-Transfer-Encoding + header itself to whatever is appropriate. + + Returns "quoted-printable" if self.body_encoding is QP. + Returns "base64" if self.body_encoding is BASE64. + Returns conversion function otherwise. + 'u'Return the content-transfer-encoding used for body encoding. + + This is either the string `quoted-printable' or `base64' depending on + the encoding used, or it is a function in which case you should call + the function with a single argument, the Message object being + encoded. The function should then set the Content-Transfer-Encoding + header itself to whatever is appropriate. + + Returns "quoted-printable" if self.body_encoding is QP. + Returns "base64" if self.body_encoding is BASE64. + Returns conversion function otherwise. + 'b'quoted-printable'u'quoted-printable'b'Return the output character set. + + This is self.output_charset if that is not None, otherwise it is + self.input_charset. + 'u'Return the output character set. + + This is self.output_charset if that is not None, otherwise it is + self.input_charset. + 'b'Header-encode a string by converting it first to bytes. + + The type of encoding (base64 or quoted-printable) will be based on + this charset's `header_encoding`. + + :param string: A unicode string for the header. It must be possible + to encode this string to bytes using the character set's + output codec. + :return: The encoded string, with RFC 2047 chrome. + 'u'Header-encode a string by converting it first to bytes. + + The type of encoding (base64 or quoted-printable) will be based on + this charset's `header_encoding`. + + :param string: A unicode string for the header. It must be possible + to encode this string to bytes using the character set's + output codec. + :return: The encoded string, with RFC 2047 chrome. + 'b'Header-encode a string by converting it first to bytes. + + This is similar to `header_encode()` except that the string is fit + into maximum line lengths as given by the argument. + + :param string: A unicode string for the header. It must be possible + to encode this string to bytes using the character set's + output codec. + :param maxlengths: Maximum line length iterator. Each element + returned from this iterator will provide the next maximum line + length. This parameter is used as an argument to built-in next() + and should never be exhausted. The maximum line lengths should + not count the RFC 2047 chrome. These line lengths are only a + hint; the splitter does the best it can. + :return: Lines of encoded strings, each with RFC 2047 chrome. + 'u'Header-encode a string by converting it first to bytes. + + This is similar to `header_encode()` except that the string is fit + into maximum line lengths as given by the argument. + + :param string: A unicode string for the header. It must be possible + to encode this string to bytes using the character set's + output codec. + :param maxlengths: Maximum line length iterator. Each element + returned from this iterator will provide the next maximum line + length. This parameter is used as an argument to built-in next() + and should never be exhausted. The maximum line lengths should + not count the RFC 2047 chrome. These line lengths are only a + hint; the splitter does the best it can. + :return: Lines of encoded strings, each with RFC 2047 chrome. + 'b'Body-encode a string by converting it first to bytes. + + The type of encoding (base64 or quoted-printable) will be based on + self.body_encoding. If body_encoding is None, we assume the + output charset is a 7bit encoding, so re-encoding the decoded + string using the ascii codec produces the correct string version + of the content. + 'u'Body-encode a string by converting it first to bytes. + + The type of encoding (base64 or quoted-printable) will be based on + self.body_encoding. If body_encoding is None, we assume the + output charset is a 7bit encoding, so re-encoding the decoded + string using the ascii codec produces the correct string version + of the content. + 'u'email.charset' +An XML-RPC client interface for Python. + +The marshalling and response parser code can also be used to +implement XML-RPC servers. + +Exported exceptions: + + Error Base class for client errors + ProtocolError Indicates an HTTP protocol error + ResponseError Indicates a broken response package + Fault Indicates an XML-RPC fault package + +Exported classes: + + ServerProxy Represents a logical connection to an XML-RPC server + + MultiCall Executor of boxcared xmlrpc requests + DateTime dateTime wrapper for an ISO 8601 string or time tuple or + localtime integer value to generate a "dateTime.iso8601" + XML-RPC value + Binary binary data wrapper + + Marshaller Generate an XML-RPC params chunk from a Python data structure + Unmarshaller Unmarshal an XML-RPC response from incoming XML event message + Transport Handles an HTTP transaction to an XML-RPC server + SafeTransport Handles an HTTPS transaction to an XML-RPC server + +Exported constants: + + (none) + +Exported functions: + + getparser Create instance of the fastest available parser & attach + to an unmarshalling object + dumps Convert an argument tuple or a Fault instance to an XML-RPC + request (or response, if the methodresponse option is used). + loads Convert an XML-RPC packet to unmarshalled data plus a method + name (None if not present). +httpMAXINTMININT32700PARSE_ERROR32600SERVER_ERROR32500APPLICATION_ERROR32400SYSTEM_ERROR32300TRANSPORT_ERRORNOT_WELLFORMED_ERROR32701UNSUPPORTED_ENCODING32702INVALID_ENCODING_CHARINVALID_XMLRPC32601METHOD_NOT_FOUND32602INVALID_METHOD_PARAMS32603INTERNAL_ERRORBase class for client errors.ProtocolErrorIndicates an HTTP protocol error.errcodeerrmsg<%s for %s: %s %s>ResponseErrorIndicates a broken response package.FaultIndicates an XML-RPC fault package.faultCodefaultString<%s %s: %r>Boolean_day00001_iso8601_format%Y%m%dT%H:%M:%S%4Y%4Y%m%dT%H:%M:%S_strftimestruct_time%04d%02d%02dT%02d:%02d:%02dDateTimeDateTime wrapper for an ISO 8601 string or time tuple or + localtime integer value to generate 'dateTime.iso8601' XML-RPC + value. + make_comparableotypeCan't compare %s and %s +_datetime_typeBinaryWrapper for binary data.expected bytes or bytearray, not %s + +_binaryWRAPPERSExpatParserMarshallerGenerate an XML-RPC params chunk from a Python data structure. + + Create a Marshaller instance for each set of parameters, and use + the "dumps" method to convert your data (represented as a tuple) + to an XML-RPC params chunk. To write a fault response, pass a + Fault instance instead. You may prefer to use the "dumps" module + function for this purpose. + allow_nonedispatch__dump + + + + + +cannot marshal %s objects_arbitrary_instancedump_nilcannot marshal None unless allow_none is enableddump_bool +dump_longint exceeds XML-RPC limits +dump_intdump_double +dump_unicode +dump_bytesdump_arraycannot marshal recursive sequences + +dump_structcannot marshal recursive dictionaries + +dictionary key must be string%s + + +dump_datetimedump_instanceUnmarshallerUnmarshal an XML-RPC response, based on incoming XML event + messages (start, data, end). Call close() to get the resulting + data structure. + + Note that this reader is fairly tolerant, and gladly accepts bogus + XML-RPC data without complaining (but not bogus XML). + use_datetimeuse_builtin_types_type_stack_marks_value_methodname_use_datetime_use_bytesfaultgetmethodnamestandaloneunknown tag %rend_dispatchend_nilnilend_booleanbad boolean valueend_inti1i2i4i8bigintegerend_doubleend_bigdecimalbigdecimalend_stringend_arrayend_structend_base64end_dateTimedateTime.iso8601end_valueend_paramsend_faultend_methodName_MultiCallMethodcall_list__call_list__nameMultiCallIteratorIterates over the results of a multicall. Exceptions are + raised in response to xmlrpc faults.unexpected type in multicall resultMultiCallserver -> an object used to boxcar method calls + + server should be a ServerProxy object. + + Methods can be added to the MultiCall using normal + method call syntax e.g.: + + multicall = MultiCall(server_proxy) + multicall.add(2,3) + multicall.get_address("Guido") + + To execute the multicall, call the MultiCall object e.g.: + + add_result, address = multicall() + __server<%s at %#x>marshalled_listmulticallFastMarshallerFastParserFastUnmarshallergetparsergetparser() -> parser, unmarshaller + + Create an instance of the fastest available parser, and attach it + to an unmarshalling object. Return both objects. + mkdatetimemkbytesmethodnamemethodresponsedata [,options] -> marshalled data + + Convert an argument tuple or a Fault instance to an XML-RPC + request (or response, if the methodresponse option is used). + + In addition to the data object, the following options can be given + as keyword arguments: + + methodname: the method name for a methodCall packet + + methodresponse: true to create a methodResponse packet. + If this option is used with a tuple, the tuple must be + a singleton (i.e. it can contain only one element). + + encoding: the packet encoding (default is UTF-8) + + All byte strings in the data structure are assumed to use the + packet encoding. Unicode strings are automatically converted, + where necessary. + argument must be tuple or Fault instanceresponse tuple must be a singletonxmlheader + +"\n""" + + + +data -> unmarshalled data, method name + + Convert an XML-RPC packet to unmarshalled data plus a method + name (None if not present). + + If the XML-RPC packet represents a fault condition, this function + raises a Fault exception. + gzip_encodedata -> gzip encoded data + + Encode data using the gzip content encoding as described in RFC 1952 + gzfgzip_decode20971520max_decodegzip encoded data -> unencoded data + + Decode data using the gzip content encoding as described in RFC 1952 + invalid datamax gzipped payload length exceededGzipDecodedResponsea file-like object to decode a response encoded with the gzip + method, as described in RFC 1952. + response_Method__sendTransportHandles an HTTP transaction to an XML-RPC server.Python-xmlrpc/%suser_agentaccept_gzip_encodingencode_threshold_use_builtin_types_connection_headers_extra_headersrequest_bodysingle_requestRemoteDisconnectedECONNABORTEDEPIPEsend_requesthttp_conngetresponserespparse_responsegetheadercontent-lengthgetheadersget_host_infox509_splituserauthunquote_to_bytesAuthorizationBasic extra_headersmake_connectionchostHTTPConnectionconnectionset_debuglevelputrequestPOSTskip_accept_encodingContent-Typetext/xmlUser-Agentsend_headerssend_contentputheaderContent-Lengthendheadersbody:SafeTransportHandles an HTTPS transaction to an XML-RPC server.HTTPSConnectionyour version of http.client doesn't support HTTPSServerProxyuri [,options] -> a logical connection to an XML-RPC server + + uri is the connection point on the server, given as + scheme://host/target. + + The standard implementation always supports the "http" scheme. If + SSL socket support is available (Python 2.0), it also supports + "https". + + If the target part and the slash preceding it are both omitted, + "/RPC2" is assumed. + + The following options can be given as keyword arguments: + + transport: a transport factory + encoding: the request encoding (default is UTF-8) + + All 8-bit strings passed to the server proxy are assumed to use + the given encoding. + _splittypehttpsunsupported XML-RPC protocol_splithost__host__handler/RPC2extra_kwargs__transport__encoding__verbose__allow_none__close__request<%s for %s%s>A workaround to get special attributes on the ServerProxy + without interfering with the magic __getattr__ + Attribute %r not foundhttp://localhost:8000currentTimegetCurrentTimemultigetData# XML-RPC CLIENT LIBRARY# $Id$# an XML-RPC client interface for Python.# the marshalling and response parser code can also be used to# implement XML-RPC servers.# Notes:# this version is designed to work with Python 2.1 or newer.# History:# 1999-01-14 fl Created# 1999-01-15 fl Changed dateTime to use localtime# 1999-01-16 fl Added Binary/base64 element, default to RPC2 service# 1999-01-19 fl Fixed array data element (from Skip Montanaro)# 1999-01-21 fl Fixed dateTime constructor, etc.# 1999-02-02 fl Added fault handling, handle empty sequences, etc.# 1999-02-10 fl Fixed problem with empty responses (from Skip Montanaro)# 1999-06-20 fl Speed improvements, pluggable parsers/transports (0.9.8)# 2000-11-28 fl Changed boolean to check the truth value of its argument# 2001-02-24 fl Added encoding/Unicode/SafeTransport patches# 2001-02-26 fl Added compare support to wrappers (0.9.9/1.0b1)# 2001-03-28 fl Make sure response tuple is a singleton# 2001-03-29 fl Don't require empty params element (from Nicholas Riley)# 2001-06-10 fl Folded in _xmlrpclib accelerator support (1.0b2)# 2001-08-20 fl Base xmlrpclib.Error on built-in Exception (from Paul Prescod)# 2001-09-03 fl Allow Transport subclass to override getparser# 2001-09-10 fl Lazy import of urllib, cgi, xmllib (20x import speedup)# 2001-10-01 fl Remove containers from memo cache when done with them# 2001-10-01 fl Use faster escape method (80% dumps speedup)# 2001-10-02 fl More dumps microtuning# 2001-10-04 fl Make sure import expat gets a parser (from Guido van Rossum)# 2001-10-10 sm Allow long ints to be passed as ints if they don't overflow# 2001-10-17 sm Test for int and long overflow (allows use on 64-bit systems)# 2001-11-12 fl Use repr() to marshal doubles (from Paul Felix)# 2002-03-17 fl Avoid buffered read when possible (from James Rucker)# 2002-04-07 fl Added pythondoc comments# 2002-04-16 fl Added __str__ methods to datetime/binary wrappers# 2002-05-15 fl Added error constants (from Andrew Kuchling)# 2002-06-27 fl Merged with Python CVS version# 2002-10-22 fl Added basic authentication (based on code from Phillip Eby)# 2003-01-22 sm Add support for the bool type# 2003-02-27 gvr Remove apply calls# 2003-04-24 sm Use cStringIO if available# 2003-04-25 ak Add support for nil# 2003-06-15 gn Add support for time.struct_time# 2003-07-12 gp Correct marshalling of Faults# 2003-10-31 mvl Add multicall support# 2004-08-20 mvl Bump minimum supported Python version to 2.1# 2014-12-02 ch/doko Add workaround for gzip bomb vulnerability# Copyright (c) 1999-2002 by Secret Labs AB.# Copyright (c) 1999-2002 by Fredrik Lundh.# info@pythonware.com# The XML-RPC client interface is# Copyright (c) 1999-2002 by Secret Labs AB# Copyright (c) 1999-2002 by Fredrik Lundh#python can be built without zlib/gzip support# Internal stuff# used in User-Agent header sent# xmlrpc integer limits# Error constants (from Dan Libby's specification at# http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php)# Ranges of errors# Specific errors# Base class for all kinds of client-side errors.# Indicates an HTTP-level protocol error. This is raised by the HTTP# transport layer, if the server returns an error code other than 200# (OK).# @param url The target URL.# @param errcode The HTTP error code.# @param errmsg The HTTP error message.# @param headers The HTTP header dictionary.# Indicates a broken XML-RPC response package. This exception is# raised by the unmarshalling layer, if the XML-RPC response is# malformed.# Indicates an XML-RPC fault response package. This exception is# raised by the unmarshalling layer, if the XML-RPC response contains# a fault string. This exception can also be used as a class, to# generate a fault XML-RPC message.# @param faultCode The XML-RPC fault code.# @param faultString The XML-RPC fault string.# Special values# Backwards compatibility# Wrapper for XML-RPC DateTime values. This converts a time value to# the format used by XML-RPC.#

# The value can be given as a datetime object, as a string in the# format "yyyymmddThh:mm:ss", as a 9-item time tuple (as returned by# time.localtime()), or an integer value (as returned by time.time()).# The wrapper uses time.localtime() to convert an integer to a time# tuple.# @param value The time, given as a datetime object, an ISO 8601 string,# a time tuple, or an integer time value.# Issue #13305: different format codes across platforms# Mac OS X# Linux# Get date/time value.# @return Date/time value, as an ISO 8601 string.# decode xml element contents into a DateTime structure.# Wrapper for binary data. This can be used to transport any kind# of binary data over XML-RPC, using BASE64 encoding.# @param data An 8-bit string containing arbitrary data.# Make a copy of the bytes!# Get buffer contents.# @return Buffer contents, as an 8-bit string.# XXX encoding?!# decode xml element contents into a Binary structure# XML parsers# fast expat parser for Python 2.0 and later.# XML-RPC marshalling and unmarshalling code# XML-RPC marshaller.# @param encoding Default encoding for 8-bit strings. The default# value is None (interpreted as UTF-8).# @see dumps# by the way, if you don't understand what's going on in here,# that's perfectly ok.# fault instance# parameter block# FIXME: the xml-rpc specification allows us to leave out# the entire block if there are no parameters.# however, changing this may break older code (including# old versions of xmlrpclib.py), so this is better left as# is for now. See @XMLRPC3 for more information. /F# check if this object can be marshalled as a structure# check if this class is a sub-class of a basic type,# because we don't know how to marshal these types# (e.g. a string sub-class)# XXX(twouters): using "_arbitrary_instance" as key as a quick-fix# for the p3yk merge, this should probably be fixed more neatly.# backward compatible# check for special wrappers# store instance attributes as a struct (really?)# XML-RPC unmarshaller.# @see loads# and again, if you don't understand what's going on in here,# return response tuple and target method# event handlers# FIXME: assert standalone == 1 ???# prepare to handle this element# call the appropriate end tag handler# unknown tag ?# accelerator support# dispatch data# element decoders# struct keys are always strings# map arrays to Python lists# map structs to Python dictionaries# if we stumble upon a value element with no internal# elements, treat it as a string element# no params## Multicall support# some lesser magic to store calls made to a MultiCall object# for batch execution# convenience functions# Create a parser object, and connect it to an unmarshalling instance.# This function picks the fastest available XML parser.# return A (parser, unmarshaller) tuple.# Convert a Python tuple or a Fault instance to an XML-RPC packet.# @def dumps(params, **options)# @param params A tuple or Fault instance.# @keyparam methodname If given, create a methodCall request for# this method name.# @keyparam methodresponse If given, create a methodResponse packet.# If used with a tuple, the tuple must be a singleton (that is,# it must contain exactly one element).# @keyparam encoding The packet encoding.# @return A string containing marshalled data.# utf-8 is default# standard XML-RPC wrappings# a method call# a method response, or a fault structure# return as is# Convert an XML-RPC packet to a Python object. If the XML-RPC packet# represents a fault condition, this function raises a Fault exception.# @param data An XML-RPC packet, given as an 8-bit string.# @return A tuple containing the unpacked data, and the method name# (None if not present).# @see Fault# Encode a string using the gzip content encoding such as specified by the# Content-Encoding: gzip# in the HTTP header, as described in RFC 1952# @param data the unencoded data# @return the encoded data# Decode a string using the gzip content encoding such as specified by the# @param data The encoded data# @keyparam max_decode Maximum bytes to decode (20 MiB default), use negative# values for unlimited decoding# @return the unencoded data# @raises ValueError if data is not correctly coded.# @raises ValueError if max gzipped payload length exceeded# no limit# Return a decoded file-like object for the gzip encoding# as described in RFC 1952.# @param response A stream supporting a read() method# @return a file-like object that the decoded data can be read() from#response doesn't support tell() and read(), required by#GzipFile# request dispatcher# some magic to bind an XML-RPC method to an RPC server.# supports "nested" methods (e.g. examples.getStateName)# Standard transport class for XML-RPC over HTTP.# You can create custom transports by subclassing this method, and# overriding selected methods.# client identifier (may be overridden)#if true, we'll request gzip encoding# if positive, encode request using gzip if it exceeds this threshold# note that many servers will get confused, so only use it if you know# that they can decode such a request#None = don't encode# Send a complete request, and parse the response.# Retry request if a cached connection has disconnected.# @param host Target host.# @param handler Target PRC handler.# @param request_body XML-RPC request body.# @param verbose Debugging flag.# @return Parsed response.#retry request once if cached connection has gone cold# issue XML-RPC request#All unexpected errors leave connection in# a strange state, so we clear it.#We got an error response.#Discard any response data and raise exception# Create parser.# @return A 2-tuple containing a parser and an unmarshaller.# get parser and unmarshaller# Get authorization info from host parameter# Host may be a string, or a (host, x509-dict) tuple; if a string,# it is checked for a "user:pw@host" format, and a "Basic# Authentication" header is added if appropriate.# @param host Host descriptor (URL or (URL, x509 info) tuple).# @return A 3-tuple containing (actual host, extra headers,# x509 info). The header and x509 fields may be None.# get rid of whitespace# Connect to server.# @return An HTTPConnection object#return an existing connection if possible. This allows#HTTP/1.1 keep-alive.# create a HTTP connection object from a host descriptor# Clear any cached connection object.# Used in the event of socket errors.# Send HTTP request.# @param handler Target RPC handler (a path relative to host)# @param request_body The XML-RPC request body# @param debug Enable debugging if debug is true.# @return An HTTPConnection.# Send request headers.# This function provides a useful hook for subclassing# @param connection httpConnection.# @param headers list of key,value pairs for HTTP headers# Send request body.#optionally encode the request# Parse response.# @param file Stream.# @return Response tuple and target method.# read response data from httpresponse, and parse it# Check for new http response object, otherwise it is a file object.# Standard transport class for XML-RPC over HTTPS.# FIXME: mostly untested# create a HTTPS connection object from a host descriptor# host may be a string, or a (host, x509-dict) tuple# Standard server proxy. This class establishes a virtual connection# to an XML-RPC server.# This class is available as ServerProxy and Server. New code should# use ServerProxy, to avoid confusion.# @def ServerProxy(uri, **options)# @param uri The connection point on the server.# @keyparam transport A transport factory, compatible with the# standard transport class.# @keyparam encoding The default encoding used for 8-bit strings# (default is UTF-8).# @keyparam verbose Use a true value to enable debugging output.# (printed to standard output).# @see Transport# establish a "logical" server connection# get the url# call a method on the remote server# magic method dispatcher# note: to call a remote object with a non-standard name, use# result getattr(server, "strange-python-name")(args)# test code# simple test program (from the XML-RPC specification)# local server, available from Lib/xmlrpc/server.pyb' +An XML-RPC client interface for Python. + +The marshalling and response parser code can also be used to +implement XML-RPC servers. + +Exported exceptions: + + Error Base class for client errors + ProtocolError Indicates an HTTP protocol error + ResponseError Indicates a broken response package + Fault Indicates an XML-RPC fault package + +Exported classes: + + ServerProxy Represents a logical connection to an XML-RPC server + + MultiCall Executor of boxcared xmlrpc requests + DateTime dateTime wrapper for an ISO 8601 string or time tuple or + localtime integer value to generate a "dateTime.iso8601" + XML-RPC value + Binary binary data wrapper + + Marshaller Generate an XML-RPC params chunk from a Python data structure + Unmarshaller Unmarshal an XML-RPC response from incoming XML event message + Transport Handles an HTTP transaction to an XML-RPC server + SafeTransport Handles an HTTPS transaction to an XML-RPC server + +Exported constants: + + (none) + +Exported functions: + + getparser Create instance of the fastest available parser & attach + to an unmarshalling object + dumps Convert an argument tuple or a Fault instance to an XML-RPC + request (or response, if the methodresponse option is used). + loads Convert an XML-RPC packet to unmarshalled data plus a method + name (None if not present). +'u' +An XML-RPC client interface for Python. + +The marshalling and response parser code can also be used to +implement XML-RPC servers. + +Exported exceptions: + + Error Base class for client errors + ProtocolError Indicates an HTTP protocol error + ResponseError Indicates a broken response package + Fault Indicates an XML-RPC fault package + +Exported classes: + + ServerProxy Represents a logical connection to an XML-RPC server + + MultiCall Executor of boxcared xmlrpc requests + DateTime dateTime wrapper for an ISO 8601 string or time tuple or + localtime integer value to generate a "dateTime.iso8601" + XML-RPC value + Binary binary data wrapper + + Marshaller Generate an XML-RPC params chunk from a Python data structure + Unmarshaller Unmarshal an XML-RPC response from incoming XML event message + Transport Handles an HTTP transaction to an XML-RPC server + SafeTransport Handles an HTTPS transaction to an XML-RPC server + +Exported constants: + + (none) + +Exported functions: + + getparser Create instance of the fastest available parser & attach + to an unmarshalling object + dumps Convert an argument tuple or a Fault instance to an XML-RPC + request (or response, if the methodresponse option is used). + loads Convert an XML-RPC packet to unmarshalled data plus a method + name (None if not present). +'b'Base class for client errors.'u'Base class for client errors.'b'Indicates an HTTP protocol error.'u'Indicates an HTTP protocol error.'b'<%s for %s: %s %s>'u'<%s for %s: %s %s>'b'Indicates a broken response package.'u'Indicates a broken response package.'b'Indicates an XML-RPC fault package.'u'Indicates an XML-RPC fault package.'b'<%s %s: %r>'u'<%s %s: %r>'b'0001'u'0001'b'%Y%m%dT%H:%M:%S'u'%Y%m%dT%H:%M:%S'b'%4Y'u'%4Y'b'%4Y%m%dT%H:%M:%S'u'%4Y%m%dT%H:%M:%S'b'%04d%02d%02dT%02d:%02d:%02d'u'%04d%02d%02dT%02d:%02d:%02d'b'DateTime wrapper for an ISO 8601 string or time tuple or + localtime integer value to generate 'dateTime.iso8601' XML-RPC + value. + 'u'DateTime wrapper for an ISO 8601 string or time tuple or + localtime integer value to generate 'dateTime.iso8601' XML-RPC + value. + 'b'timetuple'u'timetuple'b'__class__'u'__class__'b'Can't compare %s and %s'u'Can't compare %s and %s'b''u''b' +'u' +'b'Wrapper for binary data.'u'Wrapper for binary data.'b'expected bytes or bytearray, not %s'u'expected bytes or bytearray, not %s'b' +'u' +'b' +'u' +'b'Generate an XML-RPC params chunk from a Python data structure. + + Create a Marshaller instance for each set of parameters, and use + the "dumps" method to convert your data (represented as a tuple) + to an XML-RPC params chunk. To write a fault response, pass a + Fault instance instead. You may prefer to use the "dumps" module + function for this purpose. + 'u'Generate an XML-RPC params chunk from a Python data structure. + + Create a Marshaller instance for each set of parameters, and use + the "dumps" method to convert your data (represented as a tuple) + to an XML-RPC params chunk. To write a fault response, pass a + Fault instance instead. You may prefer to use the "dumps" module + function for this purpose. + 'b' +'u' +'b'faultCode'u'faultCode'b'faultString'u'faultString'b' +'u' +'b' +'u' +'b' +'u' +'b' +'u' +'b' +'u' +'b'cannot marshal %s objects'u'cannot marshal %s objects'b'_arbitrary_instance'u'_arbitrary_instance'b'cannot marshal None unless allow_none is enabled'u'cannot marshal None unless allow_none is enabled'b''u''b''u''b' +'u' +'b'int exceeds XML-RPC limits'u'int exceeds XML-RPC limits'b''u''b' +'u' +'b''u''b' +'u' +'b''u''b' +'u' +'b'cannot marshal recursive sequences'u'cannot marshal recursive sequences'b' +'u' +'b' +'u' +'b'cannot marshal recursive dictionaries'u'cannot marshal recursive dictionaries'b' +'u' +'b' +'u' +'b'dictionary key must be string'u'dictionary key must be string'b'%s +'u'%s +'b' +'u' +'b' +'u' +'b'Unmarshal an XML-RPC response, based on incoming XML event + messages (start, data, end). Call close() to get the resulting + data structure. + + Note that this reader is fairly tolerant, and gladly accepts bogus + XML-RPC data without complaining (but not bogus XML). + 'u'Unmarshal an XML-RPC response, based on incoming XML event + messages (start, data, end). Call close() to get the resulting + data structure. + + Note that this reader is fairly tolerant, and gladly accepts bogus + XML-RPC data without complaining (but not bogus XML). + 'b'fault'u'fault'b'array'b'struct'b'unknown tag %r'u'unknown tag %r'b'nil'u'nil'b'bad boolean value'u'bad boolean value'b'boolean'u'boolean'b'i1'u'i1'b'i2'u'i2'b'i4'u'i4'b'i8'u'i8'b'biginteger'u'biginteger'b'float'u'float'b'bigdecimal'u'bigdecimal'b'string'u'string'b'dateTime.iso8601'u'dateTime.iso8601'b'params'u'params'b'methodName'u'methodName'b'Iterates over the results of a multicall. Exceptions are + raised in response to xmlrpc faults.'u'Iterates over the results of a multicall. Exceptions are + raised in response to xmlrpc faults.'b'unexpected type in multicall result'u'unexpected type in multicall result'b'server -> an object used to boxcar method calls + + server should be a ServerProxy object. + + Methods can be added to the MultiCall using normal + method call syntax e.g.: + + multicall = MultiCall(server_proxy) + multicall.add(2,3) + multicall.get_address("Guido") + + To execute the multicall, call the MultiCall object e.g.: + + add_result, address = multicall() + 'u'server -> an object used to boxcar method calls + + server should be a ServerProxy object. + + Methods can be added to the MultiCall using normal + method call syntax e.g.: + + multicall = MultiCall(server_proxy) + multicall.add(2,3) + multicall.get_address("Guido") + + To execute the multicall, call the MultiCall object e.g.: + + add_result, address = multicall() + 'b'<%s at %#x>'u'<%s at %#x>'b'getparser() -> parser, unmarshaller + + Create an instance of the fastest available parser, and attach it + to an unmarshalling object. Return both objects. + 'u'getparser() -> parser, unmarshaller + + Create an instance of the fastest available parser, and attach it + to an unmarshalling object. Return both objects. + 'b'data [,options] -> marshalled data + + Convert an argument tuple or a Fault instance to an XML-RPC + request (or response, if the methodresponse option is used). + + In addition to the data object, the following options can be given + as keyword arguments: + + methodname: the method name for a methodCall packet + + methodresponse: true to create a methodResponse packet. + If this option is used with a tuple, the tuple must be + a singleton (i.e. it can contain only one element). + + encoding: the packet encoding (default is UTF-8) + + All byte strings in the data structure are assumed to use the + packet encoding. Unicode strings are automatically converted, + where necessary. + 'u'data [,options] -> marshalled data + + Convert an argument tuple or a Fault instance to an XML-RPC + request (or response, if the methodresponse option is used). + + In addition to the data object, the following options can be given + as keyword arguments: + + methodname: the method name for a methodCall packet + + methodresponse: true to create a methodResponse packet. + If this option is used with a tuple, the tuple must be + a singleton (i.e. it can contain only one element). + + encoding: the packet encoding (default is UTF-8) + + All byte strings in the data structure are assumed to use the + packet encoding. Unicode strings are automatically converted, + where necessary. + 'b'argument must be tuple or Fault instance'u'argument must be tuple or Fault instance'b'response tuple must be a singleton'u'response tuple must be a singleton'b' +'u' +'b' +'u' +'b' +'u' +'b' +'u' +'b' +'u' +'b' +'u' +'b'data -> unmarshalled data, method name + + Convert an XML-RPC packet to unmarshalled data plus a method + name (None if not present). + + If the XML-RPC packet represents a fault condition, this function + raises a Fault exception. + 'u'data -> unmarshalled data, method name + + Convert an XML-RPC packet to unmarshalled data plus a method + name (None if not present). + + If the XML-RPC packet represents a fault condition, this function + raises a Fault exception. + 'b'data -> gzip encoded data + + Encode data using the gzip content encoding as described in RFC 1952 + 'u'data -> gzip encoded data + + Encode data using the gzip content encoding as described in RFC 1952 + 'b'gzip encoded data -> unencoded data + + Decode data using the gzip content encoding as described in RFC 1952 + 'u'gzip encoded data -> unencoded data + + Decode data using the gzip content encoding as described in RFC 1952 + 'b'invalid data'u'invalid data'b'max gzipped payload length exceeded'u'max gzipped payload length exceeded'b'a file-like object to decode a response encoded with the gzip + method, as described in RFC 1952. + 'u'a file-like object to decode a response encoded with the gzip + method, as described in RFC 1952. + 'b'Handles an HTTP transaction to an XML-RPC server.'u'Handles an HTTP transaction to an XML-RPC server.'b'Python-xmlrpc/%s'u'Python-xmlrpc/%s'b'content-length'u'content-length'b'Authorization'u'Authorization'b'Basic 'u'Basic 'b'POST'u'POST'b'Content-Type'u'Content-Type'b'text/xml'u'text/xml'b'User-Agent'u'User-Agent'b'Content-Length'u'Content-Length'b'getheader'u'getheader'b'body:'u'body:'b'Handles an HTTPS transaction to an XML-RPC server.'u'Handles an HTTPS transaction to an XML-RPC server.'b'HTTPSConnection'u'HTTPSConnection'b'your version of http.client doesn't support HTTPS'u'your version of http.client doesn't support HTTPS'b'uri [,options] -> a logical connection to an XML-RPC server + + uri is the connection point on the server, given as + scheme://host/target. + + The standard implementation always supports the "http" scheme. If + SSL socket support is available (Python 2.0), it also supports + "https". + + If the target part and the slash preceding it are both omitted, + "/RPC2" is assumed. + + The following options can be given as keyword arguments: + + transport: a transport factory + encoding: the request encoding (default is UTF-8) + + All 8-bit strings passed to the server proxy are assumed to use + the given encoding. + 'u'uri [,options] -> a logical connection to an XML-RPC server + + uri is the connection point on the server, given as + scheme://host/target. + + The standard implementation always supports the "http" scheme. If + SSL socket support is available (Python 2.0), it also supports + "https". + + If the target part and the slash preceding it are both omitted, + "/RPC2" is assumed. + + The following options can be given as keyword arguments: + + transport: a transport factory + encoding: the request encoding (default is UTF-8) + + All 8-bit strings passed to the server proxy are assumed to use + the given encoding. + 'b'http'b'https'u'https'b'unsupported XML-RPC protocol'u'unsupported XML-RPC protocol'b'/RPC2'u'/RPC2'b'<%s for %s%s>'u'<%s for %s%s>'b'A workaround to get special attributes on the ServerProxy + without interfering with the magic __getattr__ + 'u'A workaround to get special attributes on the ServerProxy + without interfering with the magic __getattr__ + 'b'transport'u'transport'b'Attribute %r not found'u'Attribute %r not found'b'http://localhost:8000'u'http://localhost:8000'HTTP/1.1 client library + + + + +HTTPConnection goes through a number of "states", which define when a client +may legally make another request or fetch the response for a particular +request. This diagram details these state transitions: + + (null) + | + | HTTPConnection() + v + Idle + | + | putrequest() + v + Request-started + | + | ( putheader() )* endheaders() + v + Request-sent + |\_____________________________ + | | getresponse() raises + | response = getresponse() | ConnectionError + v v + Unread-response Idle + [Response-headers-read] + |\____________________ + | | + | response.read() | putrequest() + v v + Idle Req-started-unread-response + ______/| + / | + response.read() | | ( putheader() )* endheaders() + v v + Request-started Req-sent-unread-response + | + | response.read() + v + Request-sent + +This diagram presents the following rules: + -- a second request may not be started until {response-headers-read} + -- a response [object] cannot be retrieved until {request-sent} + -- there is no differentiation between an unread response body and a + partially read response body + +Note: this enforcement is applied by the HTTPConnection class. The + HTTPResponse class does not enforce this state machine, which + implies sophisticated clients may accelerate the request/response + pipeline. Caution should be taken, though: accelerating the states + beyond the above pattern may imply knowledge of the server's + connection-close behavior for certain requests. For example, it + is impossible to tell whether the server will close the connection + UNTIL the response headers have been read; this means that further + requests cannot be placed into the pipeline until it is known that + the server will NOT be closing the connection. + +Logical State __state __response +------------- ------- ---------- +Idle _CS_IDLE None +Request-started _CS_REQ_STARTED None +Request-sent _CS_REQ_SENT None +Unread-response _CS_IDLE +Req-started-unread-response _CS_REQ_STARTED +Req-sent-unread-response _CS_REQ_SENT +email.messageurlsplitHTTPResponseHTTPExceptionNotConnectedUnknownProtocolUnknownTransferEncodingUnimplementedFileModeIncompleteReadInvalidURLImproperConnectionStateCannotSendRequestCannotSendHeaderResponseNotReadyBadStatusLineLineTooLongresponsesHTTP_PORT443HTTPS_PORTUNKNOWN_UNKNOWNIdle_CS_IDLERequest-started_CS_REQ_STARTEDRequest-sent_CS_REQ_SENT__members___MAXLINE_MAXHEADERS[^:\s][^:\r\n]*rb'_is_legal_header_name\n(?![ \t])|\r(?![ \t\n])_is_illegal_header_value[- ]_contains_disallowed_url_pchar_re[-]_contains_disallowed_method_pchar_rePATCHPUT_METHODS_EXPECTING_BODYCall data.encode("latin-1") but show a better error message.%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') if you want to send it encoded in UTF-8."%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') ""if you want to send it encoded in UTF-8."HTTPMessagegetallmatchingheadersFind all header lines matching a given header name. + + Look through the list of headers and find all lines matching a given + header name (and their continuation lines). A list of the lines is + returned, without interpretation. If the header does not occur, an + empty list is returned. If the header occurs multiple times, all + occurrences are returned. Case is not important in the header name. + + hit_read_headersReads potential header lines into a list from a file pointer. + + Length of line is limited by _MAXLINE, and number of + headers is limited by _MAXHEADERS. + header linegot more than %d headersparse_headers_classParses only RFC2822 headers from a file pointer. + + email Parser wants to see strings rather than bytes. + But a TextIOWrapper around self.rfile would buffer too many bytes + from the stream, bytes which we later need to read as bytes. + So we read the correct bytes here, as bytes, for email Parser + to parse. + + hstringdebuglevelmakefile_methodchunkedchunk_leftwill_close_read_statusstatus linereply:Remote end closed connection without response"Remote end closed connection without"" response"HTTP/_close_conn999beginskipped_headersheaders:HTTP/1.0HTTP/0.9HTTP/1.hdrheader:transfer-encodingtr_enc_check_closeHEADconnkeep-aliveproxy-connectionpconnAlways returns TrueisclosedTrue if the connection is closed.amt_readall_chunked_safe_readRead up to len(b) bytes into bytearray b and return the number + of bytes read. + _readinto_chunked_read_next_chunk_sizechunk size_read_and_discard_trailertrailer line_get_chunk_lefttotal_bytesmvb_safe_readintotemp_mvbRead the number of bytes requested. + + This function should be used when bytes "should" be present for + reading. If the bytes are truly not available (due to EOF), then the + IncompleteRead exception can be used to detect the problem. + Same as _safe_read, but for reading into a buffer.Read with at most one underlying system call. If at least one + byte is buffered, return that instead. + _read1_chunked_peek_chunkedReturns the value of the header matching *name*. + + If there are multiple matching headers, the values are + combined into a single string separated by commas and spaces. + + If no matching header is found, returns *default* or None if + the *default* is not specified. + + If the headers are unknown, raises http.client.ResponseNotReady. + + get_allReturn list of (header, value) tuples.Returns an instance of the class mimetools.Message containing + meta-information associated with the URL. + + When the method is HTTP, these headers are those returned by + the server at the head of the retrieved HTML page (including + Content-Length and Content-Type). + + When the method is FTP, a Content-Length header will be + present if (as is now usual) the server passed back a file + length in response to the FTP retrieval request. A + Content-Type header will be present if the MIME type can be + guessed. + + When the method is local-file, returned headers will include + a Date representing the file's last-modified time, a + Content-Length giving file size, and a Content-Type + containing a guess at the file's type. See also the + description of the mimetools module. + + geturlReturn the real URL of the page. + + In some cases, the HTTP server redirects a client to another + URL. The urlopen() function handles this transparently, but in + some cases the caller needs to know which URL the client was + redirected to. The geturl() method can be used to get at this + redirected URL. + + getcodeReturn the HTTP status code that was sent with the response, + or None if the URL is not an HTTP URL. + + _http_vsnHTTP/1.1_http_vsn_strresponse_classdefault_portauto_open_is_textIOTest whether a file-like object is a text or a binary stream. + TextIOBase_get_content_lengthGet the content-length based on the body. + + If the body is None, we set Content-Length: 0 for methods that expect + a body (RFC 7230, Section 3.3.2). We also set the Content-Length for + any method if the body is a str or bytes-like object and not a file. + mv_GLOBAL_DEFAULT_TIMEOUTsource_address__response__state_tunnel_host_tunnel_port_tunnel_headers_get_hostport_validate_host_create_connectionset_tunnelSet up host and port for HTTP CONNECT tunnelling. + + In a connection that uses HTTP CONNECT tunneling, the host passed to the + constructor is used as a proxy server that relays all communication to + the endpoint passed to `set_tunnel`. This done by sending an HTTP + CONNECT request to the proxy server when the connection is established. + + This method must be called before the HTTP connection has been + established. + + The headers argument should be a mapping of extra HTTP headers to send + with the CONNECT request. + Can't set up tunnel for established connectionnonnumeric port: '%s'_tunnelCONNECT %s:%d HTTP/1.0 +connect_strconnect_bytes%s: %s +header_strTunnel connection failed: %d %sConnect to the host and port specified in __init__.Close the connection to the HTTP server.Send `data' to the server. + ``data`` can be a string object, a bytes object, an array object, a + file-like object that supports a .read() method, or an iterable object. + send:sendIng a read()ableencoding file using iso-8859-1datablockdata should be a bytes-like object or an iterable, got %r"data should be a bytes-like object ""or an iterable, got %r"_outputAdd a line of output to the current request buffer. + + Assumes that the line does *not* end with \r\n. + _read_readable_send_outputmessage_bodyencode_chunkedSend the currently buffered request and clear the buffer. + + Appends an extra \r\n to the buffer. + A message_body may be specified, to be appended to the request. + message_body should be a bytes-like object or an iterable, got %r"message_body should be a bytes-like ""object or an iterable, got %r"Zero length chunk ignored0 + +skip_hostSend a request to the server. + + `method' specifies an HTTP request method, e.g. 'GET'. + `url' specifies the object being requested, e.g. '/index.html'. + `skip_host' if True does not add automatically a 'Host:' header + `skip_accept_encoding' if True does not add automatically an + 'Accept-Encoding:' header + _validate_method_validate_path%s %s %s_encode_requestnetlocnetloc_encHosthost_encValidate a method name for putrequest.method can't contain control characters. (found at least "(found at least "Validate a url for putrequest.URL can't contain control characters. Validate a host so it doesn't contain control characters.Send a request header line to the server. + + For example: h.putheader('Accept', 'text/html') + Invalid header name %rone_valueInvalid header value %r + Indicate that the last header line has been sent to the server. + + This method sends the request to the server. The optional message_body + argument can be used to pass a message body associated with the + request. + Send a complete request to the server._send_requestheader_namesskipsaccept-encodingcontent_lengthUnable to determine size of %rTransfer-EncodingGet the response from the server. + + If the HTTPConnection is in the correct state, returns an + instance of HTTPResponse or of whatever object is returned by + the response_class variable. + + If a request has not been sent or if a previous response has + not be handled, ResponseNotReady is raised. If the HTTP + response indicates that the connection should be closed, then + it will be closed before the response is returned. When the + connection is closed, the underlying socket is closed. + This class allows communication via SSL.key_filecert_filekey_file, cert_file and check_hostname are deprecated, use a custom context instead."key_file, cert_file and check_hostname are ""deprecated, use a custom context instead."_create_default_https_contextwill_verifycheck_hostname needs a SSL context with either CERT_OPTIONAL or CERT_REQUIRED"check_hostname needs a SSL context with ""either CERT_OPTIONAL or CERT_REQUIRED"_contextConnect to a host on a given (SSL) port.wrap_socket, %i more expected%s(%i bytes read%s)line_typegot more than %d bytes when reading %s# HTTPMessage, parse_headers(), and the HTTP status code constants are# intentionally omitted for simplicity# connection states# hack to maintain backwards compatibility# another hack to maintain backwards compatibility# Mapping status codes to official W3C names# maximal line length when calling readline().# Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2)# VCHAR = %x21-7E# obs-text = %x80-FF# header-field = field-name ":" OWS field-value OWS# field-name = token# field-value = *( field-content / obs-fold )# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]# field-vchar = VCHAR / obs-text# obs-fold = CRLF 1*( SP / HTAB )# ; obsolete line folding# ; see Section 3.2.4# token = 1*tchar# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"# / DIGIT / ALPHA# ; any VCHAR, except delimiters# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1# the patterns for both name and value are more lenient than RFC# definitions to allow for backwards compatibility# These characters are not allowed within HTTP URL paths.# See https://tools.ietf.org/html/rfc3986#section-3.3 and the# https://tools.ietf.org/html/rfc3986#appendix-A pchar definition.# Prevents CVE-2019-9740. Includes control characters such as \r\n.# We don't restrict chars above \x7f as putrequest() limits us to ASCII.# Arguably only these _should_ allowed:# _is_allowed_url_pchars_re = re.compile(r"^[/!$&'()*+,;=:@%a-zA-Z0-9._~-]+$")# We are more lenient for assumed real world compatibility purposes.# These characters are not allowed within HTTP method names# to prevent http header injection.# We always set the Content-Length header for these methods because some# servers will otherwise respond with a 411# XXX The only usage of this method is in# http.server.CGIHTTPRequestHandler. Maybe move the code there so# that it doesn't need to be part of the public API. The API has# never been defined so this could cause backwards compatibility# issues.# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.# The bytes from the socket object are iso-8859-1 strings.# See RFC 2616 sec 2.2 which notes an exception for MIME-encoded# text following RFC 2047. The basic status line parsing only# accepts iso-8859-1.# If the response includes a content-length header, we need to# make sure that the client doesn't read more than the# specified number of bytes. If it does, it will block until# the server times out and closes the connection. This will# happen if a self.fp.read() is done (without a size) whether# self.fp is buffered or not. So, no self.fp.read() by# clients unless they know what they are doing.# The HTTPResponse object is returned via urllib. The clients# of http and urllib expect different attributes for the# headers. headers is used here and supports urllib. msg is# provided as a backwards compatibility layer for http# clients.# from the Status-Line of the response# HTTP-Version# Status-Code# Reason-Phrase# is "chunked" being used?# bytes left to read in current chunk# number of bytes left in response# conn will close at end of response# Presumably, the server closed the connection before# sending a valid response.# empty version will cause next test to fail.# The status code is a three-digit number# we've already started reading the response# read until we get a non-100 response# skip the header from the 100 response# Some servers might still return "0.9", treat it as 1.0 anyway# use HTTP/1.1 code for HTTP/1.x where x>=1# are we using the chunked-style of transfer encoding?# will the connection close at the end of the response?# do we have a Content-Length?# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"# ignore nonsensical negative lengths# does the body have a fixed length? (of zero)# 1xx codes# if the connection remains open, and we aren't using chunked, and# a content-length was not provided, then assume that the connection# WILL close.# An HTTP/1.1 proxy is assumed to stay open unless# explicitly closed.# Some HTTP/1.0 implementations have support for persistent# connections, using rules different than HTTP/1.1.# For older HTTP, Keep-Alive indicates persistent connection.# At least Akamai returns a "Connection: Keep-Alive" header,# which was supposed to be sent by the client.# Proxy-Connection is a netscape hack.# otherwise, assume it will close# set "closed" flag# These implementations are for the benefit of io.BufferedReader.# XXX This class should probably be revised to act more like# the "raw stream" that BufferedReader expects.# End of "raw stream" methods# NOTE: it is possible that we will not ever call self.close(). This# case occurs when will_close is TRUE, length is None, and we# read up to the last byte, but NOT past it.# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be# called, meaning self.isclosed() is meaningful.# Amount is given, implement using readinto# Amount is not given (unbounded read) so we must check self.length# and self.chunked# we read everything# clip the read to the "end of response"# we do not use _safe_read() here because this may be a .will_close# connection, and the user is reading more bytes than will be provided# (for example, reading in 1k chunks)# Ideally, we would raise IncompleteRead if the content-length# wasn't satisfied, but it might break compatibility.# Read the next chunk size from the file# strip chunk-extensions# close the connection as protocol synchronisation is# probably lost# read and discard trailer up to the CRLF terminator### note: we shouldn't have any trailers!# a vanishingly small number of sites EOF without# sending the trailer# return self.chunk_left, reading a new chunk if necessary.# chunk_left == 0: at the end of the current chunk, need to close it# chunk_left == None: No current chunk, should read next.# This function returns non-zero or None if the last chunk has# been read.# Can be 0 or None# We are at the end of chunk, discard chunk end# toss the CRLF at the end of the chunk# last chunk: 1*("0") [ chunk-extension ] CRLF# we read everything; close the "file"# Having this enables IOBase.readline() to read more than one# byte at a time# Fallback to IOBase readline which uses peek() and read()# Strictly speaking, _get_chunk_left() may cause more than one read,# but that is ok, since that is to satisfy the chunked protocol.# if n is negative or larger than chunk_left# peek doesn't worry about protocol# eof# peek is allowed to return more than requested. Just request the# entire chunk, and truncate what we get.# We override IOBase.__iter__ so that it doesn't check for closed-ness# For compatibility with old-style urllib responses.# do an explicit check for not None here to distinguish# between unset and set but empty# file-like object.# does it implement the buffer protocol (bytes, bytearray, array)?# This is stored as an instance variable to allow unit# tests to replace it with a suitable mockup# ipv6 addresses have [...]# http://foo.com:/ == http://foo.com/# for sites which EOF without sending a trailer# close it manually... there may be other refs# create a consistent interface to message_body# Let file-like take precedence over byte-like. This# is needed to allow the current position of mmap'ed# files to be taken into account.# this is solely to check to see if message_body# implements the buffer API. it /would/ be easier# to capture if PyObject_CheckBuffer was exposed# to Python.# the object implements the buffer interface and# can be passed directly into socket methods# chunked encoding# end chunked transfer# if a prior response has been completed, then forget about it.# in certain cases, we cannot issue another request on this connection.# this occurs when:# 1) we are in the process of sending a request. (_CS_REQ_STARTED)# 2) a response to a previous request has signalled that it is going# to close the connection upon completion.# 3) the headers for the previous response have not been read, thus# we cannot determine whether point (2) is true. (_CS_REQ_SENT)# if there is no prior response, then we can request at will.# if point (2) is true, then we will have passed the socket to the# response (effectively meaning, "there is no prior response"), and# will open a new one when a new request is made.# Note: if a prior response exists, then we *can* start a new request.# We are not allowed to begin fetching the response to this new# request, however, until that prior response is complete.# Save the method for use later in the response phase# Issue some standard headers for better HTTP/1.1 compliance# this header is issued *only* for HTTP/1.1# connections. more specifically, this means it is# only issued when the client uses the new# HTTPConnection() class. backwards-compat clients# will be using HTTP/1.0 and those clients may be# issuing this header themselves. we should NOT issue# it twice; some web servers (such as Apache) barf# when they see two Host: headers# If we need a non-standard port,include it in the# header. If the request is going through a proxy,# but the host of the actual URL, not the host of the# proxy.# As per RFC 273, IPv6 address should be wrapped with []# when used as Host header# note: we are assuming that clients will not attempt to set these# headers since *this* library must deal with the# consequences. this also means that when the supporting# libraries are updated to recognize other forms, then this# code should be changed (removed or updated).# we only want a Content-Encoding of "identity" since we don't# support encodings such as x-gzip or x-deflate.# we can accept "chunked" Transfer-Encodings, but no others# NOTE: no TE header implies *only* "chunked"#self.putheader('TE', 'chunked')# if TE is supplied in the header, then it must appear in a# Connection header.#self.putheader('Connection', 'TE')# For HTTP/1.0, the server will assume "not chunked"# ASCII also helps prevent CVE-2019-9740.# prevent http header injection# Prevent CVE-2019-9740.# Prevent CVE-2019-18348.# Honor explicitly requested Host: and Accept-Encoding: headers.# chunked encoding will happen if HTTP/1.1 is used and either# the caller passes encode_chunked=True or the following# conditions hold:# 1. content-length has not been explicitly set# 2. the body is a file or iterable, but not a str or bytes-like# 3. Transfer-Encoding has NOT been explicitly set by the caller# only chunk body if not explicitly set for backwards# compatibility, assuming the client code is already handling the# chunking# if content-length cannot be automatically determined, fall# back to chunked encoding# RFC 2616 Section 3.7.1 says that text default has a# default charset of iso-8859-1.# if a prior response exists, then it must be completed (otherwise, we# cannot read this response's header to determine the connection-close# behavior)# note: if a prior response existed, but was connection-close, then the# socket and response were made independent of this HTTPConnection# object since a new request requires that we open a whole new# connection# this means the prior response had one of two states:# 1) will_close: this connection was reset and the prior socket and# response operate independently# 2) persistent: the response was retained and we await its# isclosed() status to become true.# this effectively passes the connection to the response# remember this, so we can tell when it is complete# XXX Should key_file and cert_file be deprecated in favour of context?# enable PHA for TLS 1.3 connections if available# cert and key file means the user wants to authenticate.# enable TLS 1.3 PHA implicitly even for custom contexts.# Subclasses that define an __init__ must call Exception.__init__# or define self.args. Otherwise, str() will fail.# for backwards compatibilityb'HTTP/1.1 client library + + + + +HTTPConnection goes through a number of "states", which define when a client +may legally make another request or fetch the response for a particular +request. This diagram details these state transitions: + + (null) + | + | HTTPConnection() + v + Idle + | + | putrequest() + v + Request-started + | + | ( putheader() )* endheaders() + v + Request-sent + |\_____________________________ + | | getresponse() raises + | response = getresponse() | ConnectionError + v v + Unread-response Idle + [Response-headers-read] + |\____________________ + | | + | response.read() | putrequest() + v v + Idle Req-started-unread-response + ______/| + / | + response.read() | | ( putheader() )* endheaders() + v v + Request-started Req-sent-unread-response + | + | response.read() + v + Request-sent + +This diagram presents the following rules: + -- a second request may not be started until {response-headers-read} + -- a response [object] cannot be retrieved until {request-sent} + -- there is no differentiation between an unread response body and a + partially read response body + +Note: this enforcement is applied by the HTTPConnection class. The + HTTPResponse class does not enforce this state machine, which + implies sophisticated clients may accelerate the request/response + pipeline. Caution should be taken, though: accelerating the states + beyond the above pattern may imply knowledge of the server's + connection-close behavior for certain requests. For example, it + is impossible to tell whether the server will close the connection + UNTIL the response headers have been read; this means that further + requests cannot be placed into the pipeline until it is known that + the server will NOT be closing the connection. + +Logical State __state __response +------------- ------- ---------- +Idle _CS_IDLE None +Request-started _CS_REQ_STARTED None +Request-sent _CS_REQ_SENT None +Unread-response _CS_IDLE +Req-started-unread-response _CS_REQ_STARTED +Req-sent-unread-response _CS_REQ_SENT +'u'HTTP/1.1 client library + + + + +HTTPConnection goes through a number of "states", which define when a client +may legally make another request or fetch the response for a particular +request. This diagram details these state transitions: + + (null) + | + | HTTPConnection() + v + Idle + | + | putrequest() + v + Request-started + | + | ( putheader() )* endheaders() + v + Request-sent + |\_____________________________ + | | getresponse() raises + | response = getresponse() | ConnectionError + v v + Unread-response Idle + [Response-headers-read] + |\____________________ + | | + | response.read() | putrequest() + v v + Idle Req-started-unread-response + ______/| + / | + response.read() | | ( putheader() )* endheaders() + v v + Request-started Req-sent-unread-response + | + | response.read() + v + Request-sent + +This diagram presents the following rules: + -- a second request may not be started until {response-headers-read} + -- a response [object] cannot be retrieved until {request-sent} + -- there is no differentiation between an unread response body and a + partially read response body + +Note: this enforcement is applied by the HTTPConnection class. The + HTTPResponse class does not enforce this state machine, which + implies sophisticated clients may accelerate the request/response + pipeline. Caution should be taken, though: accelerating the states + beyond the above pattern may imply knowledge of the server's + connection-close behavior for certain requests. For example, it + is impossible to tell whether the server will close the connection + UNTIL the response headers have been read; this means that further + requests cannot be placed into the pipeline until it is known that + the server will NOT be closing the connection. + +Logical State __state __response +------------- ------- ---------- +Idle _CS_IDLE None +Request-started _CS_REQ_STARTED None +Request-sent _CS_REQ_SENT None +Unread-response _CS_IDLE +Req-started-unread-response _CS_REQ_STARTED +Req-sent-unread-response _CS_REQ_SENT +'b'HTTPResponse'u'HTTPResponse'b'HTTPConnection'u'HTTPConnection'b'HTTPException'u'HTTPException'b'NotConnected'u'NotConnected'b'UnknownProtocol'u'UnknownProtocol'b'UnknownTransferEncoding'u'UnknownTransferEncoding'b'UnimplementedFileMode'u'UnimplementedFileMode'b'IncompleteRead'u'IncompleteRead'b'InvalidURL'u'InvalidURL'b'ImproperConnectionState'u'ImproperConnectionState'b'CannotSendRequest'u'CannotSendRequest'b'CannotSendHeader'u'CannotSendHeader'b'ResponseNotReady'u'ResponseNotReady'b'BadStatusLine'u'BadStatusLine'b'LineTooLong'u'LineTooLong'b'RemoteDisconnected'u'RemoteDisconnected'b'responses'u'responses'b'UNKNOWN'u'UNKNOWN'b'Idle'u'Idle'b'Request-started'u'Request-started'b'Request-sent'u'Request-sent'b'[^:\s][^:\r\n]*'b'\n(?![ \t])|\r(?![ \t\n])'b'[- ]'u'[- ]'b'[-]'u'[-]'b'PATCH'u'PATCH'b'PUT'u'PUT'b'Call data.encode("latin-1") but show a better error message.'u'Call data.encode("latin-1") but show a better error message.'b'%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') if you want to send it encoded in UTF-8.'u'%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') if you want to send it encoded in UTF-8.'b'Find all header lines matching a given header name. + + Look through the list of headers and find all lines matching a given + header name (and their continuation lines). A list of the lines is + returned, without interpretation. If the header does not occur, an + empty list is returned. If the header occurs multiple times, all + occurrences are returned. Case is not important in the header name. + + 'u'Find all header lines matching a given header name. + + Look through the list of headers and find all lines matching a given + header name (and their continuation lines). A list of the lines is + returned, without interpretation. If the header does not occur, an + empty list is returned. If the header occurs multiple times, all + occurrences are returned. Case is not important in the header name. + + 'b'Reads potential header lines into a list from a file pointer. + + Length of line is limited by _MAXLINE, and number of + headers is limited by _MAXHEADERS. + 'u'Reads potential header lines into a list from a file pointer. + + Length of line is limited by _MAXLINE, and number of + headers is limited by _MAXHEADERS. + 'b'header line'u'header line'b'got more than %d headers'u'got more than %d headers'b'Parses only RFC2822 headers from a file pointer. + + email Parser wants to see strings rather than bytes. + But a TextIOWrapper around self.rfile would buffer too many bytes + from the stream, bytes which we later need to read as bytes. + So we read the correct bytes here, as bytes, for email Parser + to parse. + + 'u'Parses only RFC2822 headers from a file pointer. + + email Parser wants to see strings rather than bytes. + But a TextIOWrapper around self.rfile would buffer too many bytes + from the stream, bytes which we later need to read as bytes. + So we read the correct bytes here, as bytes, for email Parser + to parse. + + 'b'status line'u'status line'b'reply:'u'reply:'b'Remote end closed connection without response'u'Remote end closed connection without response'b'HTTP/'u'HTTP/'b'headers:'u'headers:'b'HTTP/1.0'u'HTTP/1.0'b'HTTP/0.9'u'HTTP/0.9'b'HTTP/1.'u'HTTP/1.'b'header:'u'header:'b'transfer-encoding'u'transfer-encoding'b'chunked'u'chunked'b'HEAD'u'HEAD'b'connection'u'connection'b'keep-alive'u'keep-alive'b'proxy-connection'u'proxy-connection'b'Always returns True'u'Always returns True'b'True if the connection is closed.'u'True if the connection is closed.'b'Read up to len(b) bytes into bytearray b and return the number + of bytes read. + 'u'Read up to len(b) bytes into bytearray b and return the number + of bytes read. + 'b'chunk size'u'chunk size'b'trailer line'u'trailer line'b'Read the number of bytes requested. + + This function should be used when bytes "should" be present for + reading. If the bytes are truly not available (due to EOF), then the + IncompleteRead exception can be used to detect the problem. + 'u'Read the number of bytes requested. + + This function should be used when bytes "should" be present for + reading. If the bytes are truly not available (due to EOF), then the + IncompleteRead exception can be used to detect the problem. + 'b'Same as _safe_read, but for reading into a buffer.'u'Same as _safe_read, but for reading into a buffer.'b'Read with at most one underlying system call. If at least one + byte is buffered, return that instead. + 'u'Read with at most one underlying system call. If at least one + byte is buffered, return that instead. + 'b'Returns the value of the header matching *name*. + + If there are multiple matching headers, the values are + combined into a single string separated by commas and spaces. + + If no matching header is found, returns *default* or None if + the *default* is not specified. + + If the headers are unknown, raises http.client.ResponseNotReady. + + 'u'Returns the value of the header matching *name*. + + If there are multiple matching headers, the values are + combined into a single string separated by commas and spaces. + + If no matching header is found, returns *default* or None if + the *default* is not specified. + + If the headers are unknown, raises http.client.ResponseNotReady. + + 'b'Return list of (header, value) tuples.'u'Return list of (header, value) tuples.'b'Returns an instance of the class mimetools.Message containing + meta-information associated with the URL. + + When the method is HTTP, these headers are those returned by + the server at the head of the retrieved HTML page (including + Content-Length and Content-Type). + + When the method is FTP, a Content-Length header will be + present if (as is now usual) the server passed back a file + length in response to the FTP retrieval request. A + Content-Type header will be present if the MIME type can be + guessed. + + When the method is local-file, returned headers will include + a Date representing the file's last-modified time, a + Content-Length giving file size, and a Content-Type + containing a guess at the file's type. See also the + description of the mimetools module. + + 'u'Returns an instance of the class mimetools.Message containing + meta-information associated with the URL. + + When the method is HTTP, these headers are those returned by + the server at the head of the retrieved HTML page (including + Content-Length and Content-Type). + + When the method is FTP, a Content-Length header will be + present if (as is now usual) the server passed back a file + length in response to the FTP retrieval request. A + Content-Type header will be present if the MIME type can be + guessed. + + When the method is local-file, returned headers will include + a Date representing the file's last-modified time, a + Content-Length giving file size, and a Content-Type + containing a guess at the file's type. See also the + description of the mimetools module. + + 'b'Return the real URL of the page. + + In some cases, the HTTP server redirects a client to another + URL. The urlopen() function handles this transparently, but in + some cases the caller needs to know which URL the client was + redirected to. The geturl() method can be used to get at this + redirected URL. + + 'u'Return the real URL of the page. + + In some cases, the HTTP server redirects a client to another + URL. The urlopen() function handles this transparently, but in + some cases the caller needs to know which URL the client was + redirected to. The geturl() method can be used to get at this + redirected URL. + + 'b'Return the HTTP status code that was sent with the response, + or None if the URL is not an HTTP URL. + + 'u'Return the HTTP status code that was sent with the response, + or None if the URL is not an HTTP URL. + + 'b'HTTP/1.1'u'HTTP/1.1'b'Test whether a file-like object is a text or a binary stream. + 'u'Test whether a file-like object is a text or a binary stream. + 'b'Get the content-length based on the body. + + If the body is None, we set Content-Length: 0 for methods that expect + a body (RFC 7230, Section 3.3.2). We also set the Content-Length for + any method if the body is a str or bytes-like object and not a file. + 'u'Get the content-length based on the body. + + If the body is None, we set Content-Length: 0 for methods that expect + a body (RFC 7230, Section 3.3.2). We also set the Content-Length for + any method if the body is a str or bytes-like object and not a file. + 'b'Set up host and port for HTTP CONNECT tunnelling. + + In a connection that uses HTTP CONNECT tunneling, the host passed to the + constructor is used as a proxy server that relays all communication to + the endpoint passed to `set_tunnel`. This done by sending an HTTP + CONNECT request to the proxy server when the connection is established. + + This method must be called before the HTTP connection has been + established. + + The headers argument should be a mapping of extra HTTP headers to send + with the CONNECT request. + 'u'Set up host and port for HTTP CONNECT tunnelling. + + In a connection that uses HTTP CONNECT tunneling, the host passed to the + constructor is used as a proxy server that relays all communication to + the endpoint passed to `set_tunnel`. This done by sending an HTTP + CONNECT request to the proxy server when the connection is established. + + This method must be called before the HTTP connection has been + established. + + The headers argument should be a mapping of extra HTTP headers to send + with the CONNECT request. + 'b'Can't set up tunnel for established connection'u'Can't set up tunnel for established connection'b'nonnumeric port: '%s''u'nonnumeric port: '%s''b'CONNECT %s:%d HTTP/1.0 +'u'CONNECT %s:%d HTTP/1.0 +'b'%s: %s +'u'%s: %s +'b'Tunnel connection failed: %d %s'u'Tunnel connection failed: %d %s'b'Connect to the host and port specified in __init__.'u'Connect to the host and port specified in __init__.'b'Close the connection to the HTTP server.'u'Close the connection to the HTTP server.'b'Send `data' to the server. + ``data`` can be a string object, a bytes object, an array object, a + file-like object that supports a .read() method, or an iterable object. + 'u'Send `data' to the server. + ``data`` can be a string object, a bytes object, an array object, a + file-like object that supports a .read() method, or an iterable object. + 'b'send:'u'send:'b'sendIng a read()able'u'sendIng a read()able'b'encoding file using iso-8859-1'u'encoding file using iso-8859-1'b'data should be a bytes-like object or an iterable, got %r'u'data should be a bytes-like object or an iterable, got %r'b'Add a line of output to the current request buffer. + + Assumes that the line does *not* end with \r\n. + 'u'Add a line of output to the current request buffer. + + Assumes that the line does *not* end with \r\n. + 'b'Send the currently buffered request and clear the buffer. + + Appends an extra \r\n to the buffer. + A message_body may be specified, to be appended to the request. + 'u'Send the currently buffered request and clear the buffer. + + Appends an extra \r\n to the buffer. + A message_body may be specified, to be appended to the request. + 'b'message_body should be a bytes-like object or an iterable, got %r'u'message_body should be a bytes-like object or an iterable, got %r'b'Zero length chunk ignored'u'Zero length chunk ignored'b'0 + +'b'Send a request to the server. + + `method' specifies an HTTP request method, e.g. 'GET'. + `url' specifies the object being requested, e.g. '/index.html'. + `skip_host' if True does not add automatically a 'Host:' header + `skip_accept_encoding' if True does not add automatically an + 'Accept-Encoding:' header + 'u'Send a request to the server. + + `method' specifies an HTTP request method, e.g. 'GET'. + `url' specifies the object being requested, e.g. '/index.html'. + `skip_host' if True does not add automatically a 'Host:' header + `skip_accept_encoding' if True does not add automatically an + 'Accept-Encoding:' header + 'b'%s %s %s'u'%s %s %s'b'Host'u'Host'b'identity'u'identity'b'Validate a method name for putrequest.'u'Validate a method name for putrequest.'b'method can't contain control characters. 'u'method can't contain control characters. 'b' (found at least 'u' (found at least 'b'Validate a url for putrequest.'u'Validate a url for putrequest.'b'URL can't contain control characters. 'u'URL can't contain control characters. 'b'Validate a host so it doesn't contain control characters.'u'Validate a host so it doesn't contain control characters.'b'Send a request header line to the server. + + For example: h.putheader('Accept', 'text/html') + 'u'Send a request header line to the server. + + For example: h.putheader('Accept', 'text/html') + 'b'Invalid header name %r'u'Invalid header name %r'b'Invalid header value %r'u'Invalid header value %r'b' + 'b'Indicate that the last header line has been sent to the server. + + This method sends the request to the server. The optional message_body + argument can be used to pass a message body associated with the + request. + 'u'Indicate that the last header line has been sent to the server. + + This method sends the request to the server. The optional message_body + argument can be used to pass a message body associated with the + request. + 'b'Send a complete request to the server.'u'Send a complete request to the server.'b'host'u'host'b'skip_host'u'skip_host'b'accept-encoding'u'accept-encoding'b'skip_accept_encoding'u'skip_accept_encoding'b'Unable to determine size of %r'u'Unable to determine size of %r'b'Transfer-Encoding'u'Transfer-Encoding'b'body'b'Get the response from the server. + + If the HTTPConnection is in the correct state, returns an + instance of HTTPResponse or of whatever object is returned by + the response_class variable. + + If a request has not been sent or if a previous response has + not be handled, ResponseNotReady is raised. If the HTTP + response indicates that the connection should be closed, then + it will be closed before the response is returned. When the + connection is closed, the underlying socket is closed. + 'u'Get the response from the server. + + If the HTTPConnection is in the correct state, returns an + instance of HTTPResponse or of whatever object is returned by + the response_class variable. + + If a request has not been sent or if a previous response has + not be handled, ResponseNotReady is raised. If the HTTP + response indicates that the connection should be closed, then + it will be closed before the response is returned. When the + connection is closed, the underlying socket is closed. + 'b'This class allows communication via SSL.'u'This class allows communication via SSL.'b'key_file, cert_file and check_hostname are deprecated, use a custom context instead.'u'key_file, cert_file and check_hostname are deprecated, use a custom context instead.'b'check_hostname needs a SSL context with either CERT_OPTIONAL or CERT_REQUIRED'u'check_hostname needs a SSL context with either CERT_OPTIONAL or CERT_REQUIRED'b'Connect to a host on a given (SSL) port.'u'Connect to a host on a given (SSL) port.'b', %i more expected'u', %i more expected'b'%s(%i bytes read%s)'u'%s(%i bytes read%s)'b'got more than %d bytes when reading %s'u'got more than %d bytes when reading %s'A generic class to build line-oriented command interpreters. + +Interpreters constructed with this class obey the following conventions: + +1. End of file on input is processed as the command 'EOF'. +2. A command is parsed out of each line by collecting the prefix composed + of characters in the identchars member. +3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method + is passed a single argument consisting of the remainder of the line. +4. Typing an empty line repeats the last command. (Actually, it calls the + method `emptyline', which may be overridden in a subclass.) +5. There is a predefined `help' method. Given an argument `topic', it + calls the command `help_topic'. With no arguments, it lists all topics + with defined help_ functions, broken into up to three topics; documented + commands, miscellaneous help topics, and undocumented commands. +6. The command '?' is a synonym for `help'. The command '!' is a synonym + for `shell', if a do_shell method exists. +7. If completion is enabled, completing commands will be done automatically, + and completing of commands args is done by calling complete_foo() with + arguments text, line, begidx, endidx. text is string we are matching + against, all returned matches must begin with it. line is the current + input line (lstripped), begidx and endidx are the beginning and end + indexes of the text being matched, which could be used to provide + different completion depending upon which position the argument is in. + +The `default' method may be overridden to intercept commands for which there +is no do_ method. + +The `completedefault' method may be overridden to intercept completions for +commands that have no complete_ method. + +The data member `self.ruler' sets the character used to draw separator lines +in the help messages. If empty, no ruler line is drawn. It defaults to "=". + +If the value of `self.intro' is nonempty when the cmdloop method is called, +it is printed out on interpreter startup. This value may be overridden +via an optional argument to the cmdloop() method. + +The data members `self.doc_header', `self.misc_header', and +`self.undoc_header' set the headers used for the help function's +listings of documented functions, miscellaneous topics, and undocumented +functions respectively. +Cmd(Cmd) PROMPTIDENTCHARSA simple framework for writing line-oriented command interpreters. + + These are often useful for test harnesses, administrative tools, and + prototypes that will later be wrapped in a more sophisticated interface. + + A Cmd instance or subclass instance is a line-oriented interpreter + framework. There is no good reason to instantiate Cmd itself; rather, + it's useful as a superclass of an interpreter class you define yourself + in order to inherit Cmd's methods and encapsulate action methods. + + promptidentcharsrulerlastcmdintrodoc_leaderDocumented commands (type help ):doc_headerMiscellaneous help topics:misc_headerUndocumented commands:undoc_header*** No help on %snohelpuse_rawinputtabcompletekeyInstantiate a line-oriented interpreter framework. + + The optional argument 'completekey' is the readline name of a + completion key; it defaults to the Tab key. If completekey is + not None and the readline module is available, command completion + is done automatically. The optional arguments stdin and stdout + specify alternate input and output file objects; if not specified, + sys.stdin and sys.stdout are used. + + cmdqueuecmdloopRepeatedly issue a prompt, accept input, parse an initial prefix + off the received input, and dispatch to action methods, passing them + the remainder of the line as argument. + + preloopget_completerold_completerset_completercompleteparse_and_bind: completeEOFprecmdonecmdpostcmdpostloopHook method executed just before the command line is + interpreted, but after the input prompt is generated and issued. + + Hook method executed just after a command dispatch is finished.Hook method executed once when the cmdloop() method is called.Hook method executed once when the cmdloop() method is about to + return. + + parselineParse the line into a command name and a string containing + the arguments. Returns a tuple containing (command, args, line). + 'command' and 'args' may be None if the line couldn't be parsed. + help do_shellshell Interpret the argument as though it had been typed in response + to the prompt. + + This may be overridden, but should not normally need to be; + see the precmd() and postcmd() methods for useful execution hooks. + The return value is a flag indicating whether interpretation of + commands by the interpreter should stop. + + emptylinedo_Called when an empty line is entered in response to the prompt. + + If this method is not overridden, it repeats the last nonempty + command entered. + + Called on an input line when the command prefix is not recognized. + + If this method is not overridden, it prints an error message and + returns. + + *** Unknown syntax: %s +completedefaultignoredMethod called to complete an input line when no command-specific + complete_*() method is available. + + By default, it returns an empty list. + + completenamesdotextget_namesReturn the next possible completion for 'text'. + + If a command has not been entered, then complete against command list. + Otherwise try to call complete_ to get list of completions. + get_line_bufferoriglinestrippedget_begidxbegidxget_endidxendidxcompfunccomplete_completion_matchescomplete_helphelp_topicsdo_helpList available commands with "help" or detailed help with "help cmd".%s +cmds_doccmds_undocprevnameprint_topicscmdscmdlenmaxcolcolumnizedisplaywidthDisplay a list of strings as a compact set of columns. + + Each column is only as wide as necessary. + Columns are separated by two spaces (one was not legible enough). + +nonstringslist[i] not a string for i in %snrowsncolscolwidthstotwidthtexts # This method used to pull in base class attributes# at a time dir() didn't do it yet.# XXX check arg syntax# There can be duplicates if routines overridden# Try every row count from 1 upwardsb'A generic class to build line-oriented command interpreters. + +Interpreters constructed with this class obey the following conventions: + +1. End of file on input is processed as the command 'EOF'. +2. A command is parsed out of each line by collecting the prefix composed + of characters in the identchars member. +3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method + is passed a single argument consisting of the remainder of the line. +4. Typing an empty line repeats the last command. (Actually, it calls the + method `emptyline', which may be overridden in a subclass.) +5. There is a predefined `help' method. Given an argument `topic', it + calls the command `help_topic'. With no arguments, it lists all topics + with defined help_ functions, broken into up to three topics; documented + commands, miscellaneous help topics, and undocumented commands. +6. The command '?' is a synonym for `help'. The command '!' is a synonym + for `shell', if a do_shell method exists. +7. If completion is enabled, completing commands will be done automatically, + and completing of commands args is done by calling complete_foo() with + arguments text, line, begidx, endidx. text is string we are matching + against, all returned matches must begin with it. line is the current + input line (lstripped), begidx and endidx are the beginning and end + indexes of the text being matched, which could be used to provide + different completion depending upon which position the argument is in. + +The `default' method may be overridden to intercept commands for which there +is no do_ method. + +The `completedefault' method may be overridden to intercept completions for +commands that have no complete_ method. + +The data member `self.ruler' sets the character used to draw separator lines +in the help messages. If empty, no ruler line is drawn. It defaults to "=". + +If the value of `self.intro' is nonempty when the cmdloop method is called, +it is printed out on interpreter startup. This value may be overridden +via an optional argument to the cmdloop() method. + +The data members `self.doc_header', `self.misc_header', and +`self.undoc_header' set the headers used for the help function's +listings of documented functions, miscellaneous topics, and undocumented +functions respectively. +'u'A generic class to build line-oriented command interpreters. + +Interpreters constructed with this class obey the following conventions: + +1. End of file on input is processed as the command 'EOF'. +2. A command is parsed out of each line by collecting the prefix composed + of characters in the identchars member. +3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method + is passed a single argument consisting of the remainder of the line. +4. Typing an empty line repeats the last command. (Actually, it calls the + method `emptyline', which may be overridden in a subclass.) +5. There is a predefined `help' method. Given an argument `topic', it + calls the command `help_topic'. With no arguments, it lists all topics + with defined help_ functions, broken into up to three topics; documented + commands, miscellaneous help topics, and undocumented commands. +6. The command '?' is a synonym for `help'. The command '!' is a synonym + for `shell', if a do_shell method exists. +7. If completion is enabled, completing commands will be done automatically, + and completing of commands args is done by calling complete_foo() with + arguments text, line, begidx, endidx. text is string we are matching + against, all returned matches must begin with it. line is the current + input line (lstripped), begidx and endidx are the beginning and end + indexes of the text being matched, which could be used to provide + different completion depending upon which position the argument is in. + +The `default' method may be overridden to intercept commands for which there +is no do_ method. + +The `completedefault' method may be overridden to intercept completions for +commands that have no complete_ method. + +The data member `self.ruler' sets the character used to draw separator lines +in the help messages. If empty, no ruler line is drawn. It defaults to "=". + +If the value of `self.intro' is nonempty when the cmdloop method is called, +it is printed out on interpreter startup. This value may be overridden +via an optional argument to the cmdloop() method. + +The data members `self.doc_header', `self.misc_header', and +`self.undoc_header' set the headers used for the help function's +listings of documented functions, miscellaneous topics, and undocumented +functions respectively. +'b'Cmd'u'Cmd'b'(Cmd) 'u'(Cmd) 'b'A simple framework for writing line-oriented command interpreters. + + These are often useful for test harnesses, administrative tools, and + prototypes that will later be wrapped in a more sophisticated interface. + + A Cmd instance or subclass instance is a line-oriented interpreter + framework. There is no good reason to instantiate Cmd itself; rather, + it's useful as a superclass of an interpreter class you define yourself + in order to inherit Cmd's methods and encapsulate action methods. + + 'u'A simple framework for writing line-oriented command interpreters. + + These are often useful for test harnesses, administrative tools, and + prototypes that will later be wrapped in a more sophisticated interface. + + A Cmd instance or subclass instance is a line-oriented interpreter + framework. There is no good reason to instantiate Cmd itself; rather, + it's useful as a superclass of an interpreter class you define yourself + in order to inherit Cmd's methods and encapsulate action methods. + + 'b'Documented commands (type help ):'u'Documented commands (type help ):'b'Miscellaneous help topics:'u'Miscellaneous help topics:'b'Undocumented commands:'u'Undocumented commands:'b'*** No help on %s'u'*** No help on %s'b'tab'u'tab'b'Instantiate a line-oriented interpreter framework. + + The optional argument 'completekey' is the readline name of a + completion key; it defaults to the Tab key. If completekey is + not None and the readline module is available, command completion + is done automatically. The optional arguments stdin and stdout + specify alternate input and output file objects; if not specified, + sys.stdin and sys.stdout are used. + + 'u'Instantiate a line-oriented interpreter framework. + + The optional argument 'completekey' is the readline name of a + completion key; it defaults to the Tab key. If completekey is + not None and the readline module is available, command completion + is done automatically. The optional arguments stdin and stdout + specify alternate input and output file objects; if not specified, + sys.stdin and sys.stdout are used. + + 'b'Repeatedly issue a prompt, accept input, parse an initial prefix + off the received input, and dispatch to action methods, passing them + the remainder of the line as argument. + + 'u'Repeatedly issue a prompt, accept input, parse an initial prefix + off the received input, and dispatch to action methods, passing them + the remainder of the line as argument. + + 'b': complete'u': complete'b'EOF'u'EOF'b'Hook method executed just before the command line is + interpreted, but after the input prompt is generated and issued. + + 'u'Hook method executed just before the command line is + interpreted, but after the input prompt is generated and issued. + + 'b'Hook method executed just after a command dispatch is finished.'u'Hook method executed just after a command dispatch is finished.'b'Hook method executed once when the cmdloop() method is called.'u'Hook method executed once when the cmdloop() method is called.'b'Hook method executed once when the cmdloop() method is about to + return. + + 'u'Hook method executed once when the cmdloop() method is about to + return. + + 'b'Parse the line into a command name and a string containing + the arguments. Returns a tuple containing (command, args, line). + 'command' and 'args' may be None if the line couldn't be parsed. + 'u'Parse the line into a command name and a string containing + the arguments. Returns a tuple containing (command, args, line). + 'command' and 'args' may be None if the line couldn't be parsed. + 'b'help 'u'help 'u'!'b'do_shell'u'do_shell'b'shell 'u'shell 'b'Interpret the argument as though it had been typed in response + to the prompt. + + This may be overridden, but should not normally need to be; + see the precmd() and postcmd() methods for useful execution hooks. + The return value is a flag indicating whether interpretation of + commands by the interpreter should stop. + + 'u'Interpret the argument as though it had been typed in response + to the prompt. + + This may be overridden, but should not normally need to be; + see the precmd() and postcmd() methods for useful execution hooks. + The return value is a flag indicating whether interpretation of + commands by the interpreter should stop. + + 'b'do_'u'do_'b'Called when an empty line is entered in response to the prompt. + + If this method is not overridden, it repeats the last nonempty + command entered. + + 'u'Called when an empty line is entered in response to the prompt. + + If this method is not overridden, it repeats the last nonempty + command entered. + + 'b'Called on an input line when the command prefix is not recognized. + + If this method is not overridden, it prints an error message and + returns. + + 'u'Called on an input line when the command prefix is not recognized. + + If this method is not overridden, it prints an error message and + returns. + + 'b'*** Unknown syntax: %s +'u'*** Unknown syntax: %s +'b'Method called to complete an input line when no command-specific + complete_*() method is available. + + By default, it returns an empty list. + + 'u'Method called to complete an input line when no command-specific + complete_*() method is available. + + By default, it returns an empty list. + + 'b'Return the next possible completion for 'text'. + + If a command has not been entered, then complete against command list. + Otherwise try to call complete_ to get list of completions. + 'u'Return the next possible completion for 'text'. + + If a command has not been entered, then complete against command list. + Otherwise try to call complete_ to get list of completions. + 'b'complete_'u'complete_'b'help_'u'help_'b'List available commands with "help" or detailed help with "help cmd".'u'List available commands with "help" or detailed help with "help cmd".'b'%s +'u'%s +'b'Display a list of strings as a compact set of columns. + + Each column is only as wide as necessary. + Columns are separated by two spaces (one was not legible enough). + 'u'Display a list of strings as a compact set of columns. + + Each column is only as wide as necessary. + Columns are separated by two spaces (one was not legible enough). + 'b' +'u' +'b'list[i] not a string for i in %s'u'list[i] not a string for i in %s'b' 'u' 'u'cmd'Utilities needed to emulate Python's interactive interpreter. + +codeopCommandCompilercompile_commandInteractiveInterpreterInteractiveConsoleinteractBase class for InteractiveConsole. + + This class deals with parsing and interpreter state (the user's + namespace); it doesn't deal with input buffering or prompting or + input file naming (the filename is always passed in explicitly). + + Constructor. + + The optional 'locals' argument specifies the dictionary in + which code will be executed; it defaults to a newly created + dictionary with key "__name__" set to "__console__" and key + "__doc__" set to None. + + __console__runsourcesinglesymbolCompile and run some source in the interpreter. + + Arguments are as for compile_command(). + + One of several things can happen: + + 1) The input is incorrect; compile_command() raised an + exception (SyntaxError or OverflowError). A syntax traceback + will be printed by calling the showsyntaxerror() method. + + 2) The input is incomplete, and more input is required; + compile_command() returned None. Nothing happens. + + 3) The input is complete; compile_command() returned a code + object. The code is executed by calling self.runcode() (which + also handles run-time exceptions, except for SystemExit). + + The return value is True in case 2, False in the other cases (unless + an exception is raised). The return value can be used to + decide whether to use sys.ps1 or sys.ps2 to prompt the next + line. + + showsyntaxerrorruncodeExecute a code object. + + When an exception occurs, self.showtraceback() is called to + display a traceback. All exceptions are caught except + SystemExit, which is reraised. + + A note about KeyboardInterrupt: this exception may occur + elsewhere in this code, and may not always be caught. The + caller should be prepared to deal with it. + + showtracebackDisplay the syntax error that just occurred. + + This doesn't display a stack trace because there isn't one. + + If a filename is given, it is stuffed in the exception instead + of what was there before (because Python's parser always uses + "" when reading from a string). + + The output is written by self.write(), below. + + dummy_filenameDisplay the exception that just occurred. + + We remove the first stack item because it is our own code. + + The output is written by self.write(), below. + + last_tbformat_exceptionWrite a string. + + The base implementation writes to sys.stderr; a subclass may + replace this with a different implementation. + + Closely emulate the behavior of the interactive Python interpreter. + + This class builds on InteractiveInterpreter and adds prompting + using the familiar sys.ps1 and sys.ps2, and input buffering. + + Constructor. + + The optional locals argument will be passed to the + InteractiveInterpreter base class. + + The optional filename argument should specify the (file)name + of the input stream; it will show up in tracebacks. + + resetbufferReset the input buffer.bannerexitmsgClosely emulate the interactive Python console. + + The optional banner argument specifies the banner to print + before the first interaction; by default it prints a banner + similar to the one printed by the real Python interpreter, + followed by the current class name in parentheses (so as not + to confuse this with the real interpreter -- since it's so + close!). + + The optional exitmsg argument specifies the exit message + printed when exiting. Pass the empty string to suppress + printing an exit message. If exitmsg is not given or None, + a default message is printed. + + ps1>>> ps2... Type "help", "copyright", "credits" or "license" for more information.cprtPython %s on %s +%s +(%s) +moreraw_inputpush +KeyboardInterrupt +now exiting %s... +Push a line to the interpreter. + + The line should not have a trailing newline; it may have + internal newlines. The line is appended to a buffer and the + interpreter's runsource() method is called with the + concatenated contents of the buffer as source. If this + indicates that the command was executed or invalid, the buffer + is reset; otherwise, the command is incomplete, and the buffer + is left as it was after the line was appended. The return + value is 1 if more input is required, 0 if the line was dealt + with in some way (this is the same as runsource()). + + Write a prompt and read a line. + + The returned line does not include the trailing newline. + When the user enters the EOF key sequence, EOFError is raised. + + The base implementation uses the built-in function + input(); a subclass may replace this with a different + implementation. + + readfuncClosely emulate the interactive Python interpreter. + + This is a backwards compatible interface to the InteractiveConsole + class. When readfunc is not specified, it attempts to import the + readline module to enable GNU readline if it is available. + + Arguments (all optional, all default to None): + + banner -- passed to InteractiveConsole.interact() + readfunc -- if not None, replaces InteractiveConsole.raw_input() + local -- passed to InteractiveInterpreter.__init__() + exitmsg -- passed to InteractiveConsole.interact() + + console-qdon't print version and copyright messages# Inspired by similar code by Jeff Epler and Fredrik Lundh.# Case 1# Case 2# Case 3# Work hard to stuff the correct filename in the exception# Not the format we expect; leave it alone# Stuff in the right filename# If someone has set sys.excepthook, we let that take precedence# over self.writeb'Utilities needed to emulate Python's interactive interpreter. + +'u'Utilities needed to emulate Python's interactive interpreter. + +'b'InteractiveInterpreter'u'InteractiveInterpreter'b'InteractiveConsole'u'InteractiveConsole'b'interact'u'interact'b'compile_command'u'compile_command'b'Base class for InteractiveConsole. + + This class deals with parsing and interpreter state (the user's + namespace); it doesn't deal with input buffering or prompting or + input file naming (the filename is always passed in explicitly). + + 'u'Base class for InteractiveConsole. + + This class deals with parsing and interpreter state (the user's + namespace); it doesn't deal with input buffering or prompting or + input file naming (the filename is always passed in explicitly). + + 'b'Constructor. + + The optional 'locals' argument specifies the dictionary in + which code will be executed; it defaults to a newly created + dictionary with key "__name__" set to "__console__" and key + "__doc__" set to None. + + 'u'Constructor. + + The optional 'locals' argument specifies the dictionary in + which code will be executed; it defaults to a newly created + dictionary with key "__name__" set to "__console__" and key + "__doc__" set to None. + + 'b'__console__'u'__console__'b''u''b'single'u'single'b'Compile and run some source in the interpreter. + + Arguments are as for compile_command(). + + One of several things can happen: + + 1) The input is incorrect; compile_command() raised an + exception (SyntaxError or OverflowError). A syntax traceback + will be printed by calling the showsyntaxerror() method. + + 2) The input is incomplete, and more input is required; + compile_command() returned None. Nothing happens. + + 3) The input is complete; compile_command() returned a code + object. The code is executed by calling self.runcode() (which + also handles run-time exceptions, except for SystemExit). + + The return value is True in case 2, False in the other cases (unless + an exception is raised). The return value can be used to + decide whether to use sys.ps1 or sys.ps2 to prompt the next + line. + + 'u'Compile and run some source in the interpreter. + + Arguments are as for compile_command(). + + One of several things can happen: + + 1) The input is incorrect; compile_command() raised an + exception (SyntaxError or OverflowError). A syntax traceback + will be printed by calling the showsyntaxerror() method. + + 2) The input is incomplete, and more input is required; + compile_command() returned None. Nothing happens. + + 3) The input is complete; compile_command() returned a code + object. The code is executed by calling self.runcode() (which + also handles run-time exceptions, except for SystemExit). + + The return value is True in case 2, False in the other cases (unless + an exception is raised). The return value can be used to + decide whether to use sys.ps1 or sys.ps2 to prompt the next + line. + + 'b'Execute a code object. + + When an exception occurs, self.showtraceback() is called to + display a traceback. All exceptions are caught except + SystemExit, which is reraised. + + A note about KeyboardInterrupt: this exception may occur + elsewhere in this code, and may not always be caught. The + caller should be prepared to deal with it. + + 'u'Execute a code object. + + When an exception occurs, self.showtraceback() is called to + display a traceback. All exceptions are caught except + SystemExit, which is reraised. + + A note about KeyboardInterrupt: this exception may occur + elsewhere in this code, and may not always be caught. The + caller should be prepared to deal with it. + + 'b'Display the syntax error that just occurred. + + This doesn't display a stack trace because there isn't one. + + If a filename is given, it is stuffed in the exception instead + of what was there before (because Python's parser always uses + "" when reading from a string). + + The output is written by self.write(), below. + + 'u'Display the syntax error that just occurred. + + This doesn't display a stack trace because there isn't one. + + If a filename is given, it is stuffed in the exception instead + of what was there before (because Python's parser always uses + "" when reading from a string). + + The output is written by self.write(), below. + + 'b'Display the exception that just occurred. + + We remove the first stack item because it is our own code. + + The output is written by self.write(), below. + + 'u'Display the exception that just occurred. + + We remove the first stack item because it is our own code. + + The output is written by self.write(), below. + + 'b'Write a string. + + The base implementation writes to sys.stderr; a subclass may + replace this with a different implementation. + + 'u'Write a string. + + The base implementation writes to sys.stderr; a subclass may + replace this with a different implementation. + + 'b'Closely emulate the behavior of the interactive Python interpreter. + + This class builds on InteractiveInterpreter and adds prompting + using the familiar sys.ps1 and sys.ps2, and input buffering. + + 'u'Closely emulate the behavior of the interactive Python interpreter. + + This class builds on InteractiveInterpreter and adds prompting + using the familiar sys.ps1 and sys.ps2, and input buffering. + + 'b''u''b'Constructor. + + The optional locals argument will be passed to the + InteractiveInterpreter base class. + + The optional filename argument should specify the (file)name + of the input stream; it will show up in tracebacks. + + 'u'Constructor. + + The optional locals argument will be passed to the + InteractiveInterpreter base class. + + The optional filename argument should specify the (file)name + of the input stream; it will show up in tracebacks. + + 'b'Reset the input buffer.'u'Reset the input buffer.'b'Closely emulate the interactive Python console. + + The optional banner argument specifies the banner to print + before the first interaction; by default it prints a banner + similar to the one printed by the real Python interpreter, + followed by the current class name in parentheses (so as not + to confuse this with the real interpreter -- since it's so + close!). + + The optional exitmsg argument specifies the exit message + printed when exiting. Pass the empty string to suppress + printing an exit message. If exitmsg is not given or None, + a default message is printed. + + 'u'Closely emulate the interactive Python console. + + The optional banner argument specifies the banner to print + before the first interaction; by default it prints a banner + similar to the one printed by the real Python interpreter, + followed by the current class name in parentheses (so as not + to confuse this with the real interpreter -- since it's so + close!). + + The optional exitmsg argument specifies the exit message + printed when exiting. Pass the empty string to suppress + printing an exit message. If exitmsg is not given or None, + a default message is printed. + + 'b'>>> 'u'>>> 'b'... 'u'... 'b'Type "help", "copyright", "credits" or "license" for more information.'u'Type "help", "copyright", "credits" or "license" for more information.'b'Python %s on %s +%s +(%s) +'u'Python %s on %s +%s +(%s) +'b' +KeyboardInterrupt +'u' +KeyboardInterrupt +'b'now exiting %s... +'u'now exiting %s... +'b'Push a line to the interpreter. + + The line should not have a trailing newline; it may have + internal newlines. The line is appended to a buffer and the + interpreter's runsource() method is called with the + concatenated contents of the buffer as source. If this + indicates that the command was executed or invalid, the buffer + is reset; otherwise, the command is incomplete, and the buffer + is left as it was after the line was appended. The return + value is 1 if more input is required, 0 if the line was dealt + with in some way (this is the same as runsource()). + + 'u'Push a line to the interpreter. + + The line should not have a trailing newline; it may have + internal newlines. The line is appended to a buffer and the + interpreter's runsource() method is called with the + concatenated contents of the buffer as source. If this + indicates that the command was executed or invalid, the buffer + is reset; otherwise, the command is incomplete, and the buffer + is left as it was after the line was appended. The return + value is 1 if more input is required, 0 if the line was dealt + with in some way (this is the same as runsource()). + + 'b'Write a prompt and read a line. + + The returned line does not include the trailing newline. + When the user enters the EOF key sequence, EOFError is raised. + + The base implementation uses the built-in function + input(); a subclass may replace this with a different + implementation. + + 'u'Write a prompt and read a line. + + The returned line does not include the trailing newline. + When the user enters the EOF key sequence, EOFError is raised. + + The base implementation uses the built-in function + input(); a subclass may replace this with a different + implementation. + + 'b'Closely emulate the interactive Python interpreter. + + This is a backwards compatible interface to the InteractiveConsole + class. When readfunc is not specified, it attempts to import the + readline module to enable GNU readline if it is available. + + Arguments (all optional, all default to None): + + banner -- passed to InteractiveConsole.interact() + readfunc -- if not None, replaces InteractiveConsole.raw_input() + local -- passed to InteractiveInterpreter.__init__() + exitmsg -- passed to InteractiveConsole.interact() + + 'u'Closely emulate the interactive Python interpreter. + + This is a backwards compatible interface to the InteractiveConsole + class. When readfunc is not specified, it attempts to import the + readline module to enable GNU readline if it is available. + + Arguments (all optional, all default to None): + + banner -- passed to InteractiveConsole.interact() + readfunc -- if not None, replaces InteractiveConsole.raw_input() + local -- passed to InteractiveInterpreter.__init__() + exitmsg -- passed to InteractiveConsole.interact() + + 'b'-q'u'-q'b'don't print version and copyright messages'u'don't print version and copyright messages'u'code' codecs -- Python Codec Registry, API and helpers. + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +whyFailed to load the builtin codecs: %sEncodedFileBOMBOM_BEBOM_LEBOM32_BEBOM32_LEBOM64_BEBOM64_LEBOM_UTF8BOM_UTF16BOM_UTF16_LEBOM_UTF16_BEBOM_UTF32BOM_UTF32_LEBOM_UTF32_BECodecIncrementalEncoderIncrementalDecoderStreamReaderStreamWriterStreamReaderWriterStreamRecodergetencodergetdecodergetincrementalencodergetincrementaldecodergetreadergetwriteriterencodeiterdecodestrict_errorsignore_errorsreplace_errorsxmlcharrefreplace_errorsbackslashreplace_errorsnamereplace_errorsÿþþÿÿþþÿCodec details when looking up the codec registry_is_text_encodingstreamreaderstreamwriterincrementalencoderincrementaldecoder<%s.%s object for encoding %s at %#x> Defines the interface for stateless encoders/decoders. + + The .encode()/.decode() methods may use different error + handling schemes by providing the errors argument. These + string values are predefined: + + 'strict' - raise a ValueError error (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace' - replace with a suitable replacement character; + Python will use the official U+FFFD REPLACEMENT + CHARACTER for the builtin Unicode codecs on + decoding and '?' on encoding. + 'surrogateescape' - replace with private code points U+DCnn. + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference (only for encoding). + 'backslashreplace' - Replace with backslashed escape sequences. + 'namereplace' - Replace with \N{...} escape sequences + (only for encoding). + + The set of allowed values can be extended via register_error. + + Encodes the object input and returns a tuple (output + object, length consumed). + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamWriter for codecs which have to keep state in order to + make encoding efficient. + + The encoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + Decodes the object input and returns a tuple (output + object, length consumed). + + input must be an object which provides the bf_getreadbuf + buffer slot. Python strings, buffer objects and memory + mapped files are examples of objects providing this slot. + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamReader for codecs which have to keep state in order to + make decoding efficient. + + The decoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + + An IncrementalEncoder encodes an input in multiple steps. The input can + be passed piece by piece to the encode() method. The IncrementalEncoder + remembers the state of the encoding process between calls to encode(). + + Creates an IncrementalEncoder instance. + + The IncrementalEncoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + + Encodes input and returns the resulting object. + + Resets the encoder to the initial state. + + Return the current state of the encoder. + + Set the current state of the encoder. state must have been + returned by getstate(). + BufferedIncrementalEncoder + This subclass of IncrementalEncoder can be used as the baseclass for an + incremental encoder if the encoder must keep some of the output in a + buffer between calls to encode(). + _buffer_encodeconsumed + An IncrementalDecoder decodes an input in multiple steps. The input can + be passed piece by piece to the decode() method. The IncrementalDecoder + remembers the state of the decoding process between calls to decode(). + + Create an IncrementalDecoder instance. + + The IncrementalDecoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + + Decode input and returns the resulting object. + + Reset the decoder to the initial state. + + Return the current state of the decoder. + + This must be a (buffered_input, additional_state_info) tuple. + buffered_input must be a bytes object containing bytes that + were passed to decode() that have not yet been converted. + additional_state_info must be a non-negative integer + representing the state of the decoder WITHOUT yet having + processed the contents of buffered_input. In the initial state + and after reset(), getstate() must return (b"", 0). + + Set the current state of the decoder. + + state must have been returned by getstate(). The effect of + setstate((b"", 0)) must be equivalent to reset(). + BufferedIncrementalDecoder + This subclass of IncrementalDecoder can be used as the baseclass for an + incremental decoder if the decoder must be able to handle incomplete + byte sequences. + _buffer_decode Creates a StreamWriter instance. + + stream must be a file-like object open for writing. + + The StreamWriter may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference. + 'backslashreplace' - Replace with backslashed escape + sequences. + 'namereplace' - Replace with \N{...} escape sequences. + + The set of allowed parameter values can be extended via + register_error. + Writes the object's contents encoded to self.stream. + Writes the concatenated list of strings to the stream + using .write(). + Flushes and resets the codec buffers used for keeping state. + + Calling this method should ensure that the data on the + output is put into a clean state, that allows appending + of new fresh data without having to rescan the whole + stream to recover state. + + Inherit all other methods from the underlying stream. + charbuffertype Creates a StreamReader instance. + + stream must be a file-like object open for reading. + + The StreamReader may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'backslashreplace' - Replace with backslashed escape sequences; + + The set of allowed parameter values can be extended via + register_error. + bytebuffer_empty_charbuffercharbufferlinebufferfirstline Decodes data from the stream self.stream and returns the + resulting object. + + chars indicates the number of decoded code points or bytes to + return. read() will never return more data than requested, + but it might return less, if there is not enough available. + + size indicates the approximate maximum number of decoded + bytes or code points to read for decoding. The decoder + can modify this setting as appropriate. The default value + -1 indicates to read and decode as much as possible. size + is intended to prevent having to decode huge files in one + step. + + If firstline is true, and a UnicodeDecodeError happens + after the first line terminator in the input only the first line + will be returned, the rest of the input will be kept until the + next call to read(). + + The method should use a greedy read strategy, meaning that + it should read as much data as is allowed within the + definition of the encoding and the given size, e.g. if + optional encoding endings or state markers are available + on the stream, these should be read too. + newdatanewcharsdecodedbytes Read one line from the input stream and return the + decoded data. + + size, if given, is passed as size argument to the + read() method. + + 72readsizeline0withendline0withoutend8000sizehint Read all lines available on the input stream + and return them as a list. + + Line breaks are implemented using the codec's decoder + method and are included in the list entries. + + sizehint, if given, is ignored since there is no efficient + way to finding the true end-of-line. + + Resets the codec buffers used for keeping state. + + Note that no stream repositioning should take place. + This method is primarily intended to be able to recover + from decoding errors. + + Set the input stream's current position. + + Resets the codec buffers used for keeping state. + Return the next decoded line from the input stream. StreamReaderWriter instances allow wrapping streams which + work in both read and write modes. + + The design is such that one can use the factory functions + returned by the codec.lookup() function to construct the + instance. + + unknownReaderWriter Creates a StreamReaderWriter instance. + + stream must be a Stream-like object. + + Reader, Writer must be factory functions or classes + providing the StreamReader, StreamWriter interface resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + readerwriter StreamRecoder instances translate data from one encoding to another. + + They use the complete set of APIs returned by the + codecs.lookup() function to implement their task. + + Data written to the StreamRecoder is first decoded into an + intermediate format (depending on the "decode" codec) and then + written to the underlying stream using an instance of the provided + Writer class. + + In the other direction, data is read from the underlying stream using + a Reader instance and then encoded and returned to the caller. + + data_encodingfile_encoding Creates a StreamRecoder instance which implements a two-way + conversion: encode and decode work on the frontend (the + data visible to .read() and .write()) while Reader and Writer + work on the backend (the data in stream). + + You can use these objects to do transparent + transcodings from e.g. latin-1 to utf-8 and back. + + stream must be a file-like object. + + encode and decode must adhere to the Codec interface; Reader and + Writer must be factory functions or classes providing the + StreamReader and StreamWriter interfaces resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + bytesencodedbytesdecoded Open an encoded file using the given mode and return + a wrapped version providing transparent encoding/decoding. + + Note: The wrapped version will only accept the object format + defined by the codecs, i.e. Unicode objects for most builtin + codecs. Output is also codec dependent and will usually be + Unicode as well. + + Underlying encoded files are always opened in binary mode. + The default file mode is 'r', meaning to open the file in read mode. + + encoding specifies the encoding which is to be used for the + file. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + buffering has the same meaning as for the builtin open() API. + It defaults to -1 which means that the default buffer size will + be used. + + The returned wrapped file object provides an extra attribute + .encoding which allows querying the used encoding. This + attribute is only available if an encoding was specified as + parameter. + + srw Return a wrapped version of file which provides transparent + encoding translation. + + Data written to the wrapped file is decoded according + to the given data_encoding and then encoded to the underlying + file using file_encoding. The intermediate data type + will usually be Unicode but depends on the specified codecs. + + Bytes read from the file are decoded using file_encoding and then + passed back to the caller encoded using data_encoding. + + If file_encoding is not given, it defaults to data_encoding. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + The returned wrapped file object provides two extra attributes + .data_encoding and .file_encoding which reflect the given + parameters of the same name. The attributes can be used for + introspection by Python programs. + + data_infofile_infosr Lookup up the codec for the given encoding and return + its encoder function. + + Raises a LookupError in case the encoding cannot be found. + + Lookup up the codec for the given encoding and return + its decoder function. + + Raises a LookupError in case the encoding cannot be found. + + Lookup up the codec for the given encoding and return + its IncrementalEncoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental encoder. + + Lookup up the codec for the given encoding and return + its IncrementalDecoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental decoder. + + decoder Lookup up the codec for the given encoding and return + its StreamReader class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + Lookup up the codec for the given encoding and return + its StreamWriter class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + + Encoding iterator. + + Encodes the input strings from the iterator using an IncrementalEncoder. + + errors and kwargs are passed through to the IncrementalEncoder + constructor. + + Decoding iterator. + + Decodes the input strings from the iterator using an IncrementalDecoder. + + errors and kwargs are passed through to the IncrementalDecoder + constructor. + make_identity_dictrng make_identity_dict(rng) -> dict + + Return a dictionary where elements of the rng sequence are + mapped to themselves. + + make_encoding_mapdecoding_map Creates an encoding map from a decoding map. + + If a target mapping in the decoding map occurs multiple + times, then that target is mapped to None (undefined mapping), + causing an exception when encountered by the charmap codec + during translation. + + One example where this happens is cp875.py which decodes + multiple character to \u001a. + + backslashreplacenamereplace_false### Registry and builtin stateless codec functions### Constants# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)# and its possible byte string values# for UTF8/UTF16/UTF32 output and little/big endian machines# UTF-8# UTF-16, little endian# UTF-16, big endian# UTF-32, little endian# UTF-32, big endian# UTF-16, native endianness# UTF-32, native endianness# Old broken names (don't use in new code)### Codec base classes (defining the API)# Private API to allow Python 3.4 to blacklist the known non-Unicode# codecs in the standard library. A more general mechanism to# reliably distinguish test encodings from other codecs will hopefully# be defined for Python 3.5# See http://bugs.python.org/issue19619# Assume codecs are text encodings by default# unencoded input that is kept between calls to encode()# Overwrite this method in subclasses: It must encode input# and return an (output, length consumed) tuple# encode input (taking the buffer into account)# keep unencoded input until the next call# undecoded input that is kept between calls to decode()# Overwrite this method in subclasses: It must decode input# decode input (taking the buffer into account)# keep undecoded input until the next call# additional state info is always 0# ignore additional state info# The StreamWriter and StreamReader class provide generic working# interfaces which can be used to implement new encoding submodules# very easily. See encodings/utf_8.py for an example on how this is# done.#### If we have lines cached, first merge them back into characters# For compatibility with other read() methods that take a# single argument# read until we get the required number of characters (if available)# can the request be satisfied from the character buffer?# we need more data# decode bytes (those remaining from the last call included)# keep undecoded bytes until the next call# put new characters in the character buffer# there was no data available# Return everything we've got# Return the first chars characters# If we have lines cached from an earlier read, return# them unconditionally# revert to charbuffer mode; we might need more data# next time# If size is given, we call read() only once# If we're at a "\r" read one extra character (which might# be a "\n") to get a proper line ending. If the stream is# temporarily exhausted we return the wrong line ending.# More than one line result; the first line is a full line# to return# cache the remaining lines# only one remaining line, put it back into charbuffer# We really have a line end# Put the rest back together and keep it until the next call# we didn't get anything or this was our only try# Optional attributes set by the file wrappers below# these are needed to make "with StreamReaderWriter(...)" work properly# Seeks must be propagated to both the readers and writers# as they might need to reset their internal buffers.### Shortcuts# Force opening of the file in binary mode# Add attributes to simplify introspection### Helpers for codec lookup### Helpers for charmap-based codecs### error handlers# In --disable-unicode builds, these error handler are missing# Tell modulefinder that using codecs probably needs the encodings# package### Tests# Make stdout translate Latin-1 output into UTF-8 output# Have stdin translate Latin-1 input into UTF-8 inputb' codecs -- Python Codec Registry, API and helpers. + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +'u' codecs -- Python Codec Registry, API and helpers. + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +'b'Failed to load the builtin codecs: %s'u'Failed to load the builtin codecs: %s'b'register'u'register'b'lookup'u'lookup'b'EncodedFile'u'EncodedFile'b'BOM'u'BOM'b'BOM_BE'u'BOM_BE'b'BOM_LE'u'BOM_LE'b'BOM32_BE'u'BOM32_BE'b'BOM32_LE'u'BOM32_LE'b'BOM64_BE'u'BOM64_BE'b'BOM64_LE'u'BOM64_LE'b'BOM_UTF8'u'BOM_UTF8'b'BOM_UTF16'u'BOM_UTF16'b'BOM_UTF16_LE'u'BOM_UTF16_LE'b'BOM_UTF16_BE'u'BOM_UTF16_BE'b'BOM_UTF32'u'BOM_UTF32'b'BOM_UTF32_LE'u'BOM_UTF32_LE'b'BOM_UTF32_BE'u'BOM_UTF32_BE'b'CodecInfo'u'CodecInfo'b'Codec'u'Codec'b'IncrementalEncoder'u'IncrementalEncoder'b'IncrementalDecoder'u'IncrementalDecoder'b'StreamReader'u'StreamReader'b'StreamWriter'u'StreamWriter'b'StreamReaderWriter'u'StreamReaderWriter'b'StreamRecoder'u'StreamRecoder'b'getencoder'u'getencoder'b'getdecoder'u'getdecoder'b'getincrementalencoder'u'getincrementalencoder'b'getincrementaldecoder'u'getincrementaldecoder'b'getreader'u'getreader'b'getwriter'u'getwriter'b'iterencode'u'iterencode'b'iterdecode'u'iterdecode'b'strict_errors'u'strict_errors'b'ignore_errors'u'ignore_errors'b'replace_errors'u'replace_errors'b'xmlcharrefreplace_errors'u'xmlcharrefreplace_errors'b'backslashreplace_errors'u'backslashreplace_errors'b'namereplace_errors'u'namereplace_errors'b'register_error'u'register_error'b'lookup_error'u'lookup_error'b'ÿþ'b'þÿ'b'ÿþ'b'þÿ'b'Codec details when looking up the codec registry'u'Codec details when looking up the codec registry'b'<%s.%s object for encoding %s at %#x>'u'<%s.%s object for encoding %s at %#x>'b' Defines the interface for stateless encoders/decoders. + + The .encode()/.decode() methods may use different error + handling schemes by providing the errors argument. These + string values are predefined: + + 'strict' - raise a ValueError error (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace' - replace with a suitable replacement character; + Python will use the official U+FFFD REPLACEMENT + CHARACTER for the builtin Unicode codecs on + decoding and '?' on encoding. + 'surrogateescape' - replace with private code points U+DCnn. + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference (only for encoding). + 'backslashreplace' - Replace with backslashed escape sequences. + 'namereplace' - Replace with \N{...} escape sequences + (only for encoding). + + The set of allowed values can be extended via register_error. + + 'u' Defines the interface for stateless encoders/decoders. + + The .encode()/.decode() methods may use different error + handling schemes by providing the errors argument. These + string values are predefined: + + 'strict' - raise a ValueError error (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace' - replace with a suitable replacement character; + Python will use the official U+FFFD REPLACEMENT + CHARACTER for the builtin Unicode codecs on + decoding and '?' on encoding. + 'surrogateescape' - replace with private code points U+DCnn. + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference (only for encoding). + 'backslashreplace' - Replace with backslashed escape sequences. + 'namereplace' - Replace with \N{...} escape sequences + (only for encoding). + + The set of allowed values can be extended via register_error. + + 'b' Encodes the object input and returns a tuple (output + object, length consumed). + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamWriter for codecs which have to keep state in order to + make encoding efficient. + + The encoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + 'u' Encodes the object input and returns a tuple (output + object, length consumed). + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamWriter for codecs which have to keep state in order to + make encoding efficient. + + The encoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + 'b' Decodes the object input and returns a tuple (output + object, length consumed). + + input must be an object which provides the bf_getreadbuf + buffer slot. Python strings, buffer objects and memory + mapped files are examples of objects providing this slot. + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamReader for codecs which have to keep state in order to + make decoding efficient. + + The decoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + 'u' Decodes the object input and returns a tuple (output + object, length consumed). + + input must be an object which provides the bf_getreadbuf + buffer slot. Python strings, buffer objects and memory + mapped files are examples of objects providing this slot. + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamReader for codecs which have to keep state in order to + make decoding efficient. + + The decoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + 'b' + An IncrementalEncoder encodes an input in multiple steps. The input can + be passed piece by piece to the encode() method. The IncrementalEncoder + remembers the state of the encoding process between calls to encode(). + 'u' + An IncrementalEncoder encodes an input in multiple steps. The input can + be passed piece by piece to the encode() method. The IncrementalEncoder + remembers the state of the encoding process between calls to encode(). + 'b' + Creates an IncrementalEncoder instance. + + The IncrementalEncoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + 'u' + Creates an IncrementalEncoder instance. + + The IncrementalEncoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + 'b' + Encodes input and returns the resulting object. + 'u' + Encodes input and returns the resulting object. + 'b' + Resets the encoder to the initial state. + 'u' + Resets the encoder to the initial state. + 'b' + Return the current state of the encoder. + 'u' + Return the current state of the encoder. + 'b' + Set the current state of the encoder. state must have been + returned by getstate(). + 'u' + Set the current state of the encoder. state must have been + returned by getstate(). + 'b' + This subclass of IncrementalEncoder can be used as the baseclass for an + incremental encoder if the encoder must keep some of the output in a + buffer between calls to encode(). + 'u' + This subclass of IncrementalEncoder can be used as the baseclass for an + incremental encoder if the encoder must keep some of the output in a + buffer between calls to encode(). + 'b' + An IncrementalDecoder decodes an input in multiple steps. The input can + be passed piece by piece to the decode() method. The IncrementalDecoder + remembers the state of the decoding process between calls to decode(). + 'u' + An IncrementalDecoder decodes an input in multiple steps. The input can + be passed piece by piece to the decode() method. The IncrementalDecoder + remembers the state of the decoding process between calls to decode(). + 'b' + Create an IncrementalDecoder instance. + + The IncrementalDecoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + 'u' + Create an IncrementalDecoder instance. + + The IncrementalDecoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + 'b' + Decode input and returns the resulting object. + 'u' + Decode input and returns the resulting object. + 'b' + Reset the decoder to the initial state. + 'u' + Reset the decoder to the initial state. + 'b' + Return the current state of the decoder. + + This must be a (buffered_input, additional_state_info) tuple. + buffered_input must be a bytes object containing bytes that + were passed to decode() that have not yet been converted. + additional_state_info must be a non-negative integer + representing the state of the decoder WITHOUT yet having + processed the contents of buffered_input. In the initial state + and after reset(), getstate() must return (b"", 0). + 'u' + Return the current state of the decoder. + + This must be a (buffered_input, additional_state_info) tuple. + buffered_input must be a bytes object containing bytes that + were passed to decode() that have not yet been converted. + additional_state_info must be a non-negative integer + representing the state of the decoder WITHOUT yet having + processed the contents of buffered_input. In the initial state + and after reset(), getstate() must return (b"", 0). + 'b' + Set the current state of the decoder. + + state must have been returned by getstate(). The effect of + setstate((b"", 0)) must be equivalent to reset(). + 'u' + Set the current state of the decoder. + + state must have been returned by getstate(). The effect of + setstate((b"", 0)) must be equivalent to reset(). + 'b' + This subclass of IncrementalDecoder can be used as the baseclass for an + incremental decoder if the decoder must be able to handle incomplete + byte sequences. + 'u' + This subclass of IncrementalDecoder can be used as the baseclass for an + incremental decoder if the decoder must be able to handle incomplete + byte sequences. + 'b' Creates a StreamWriter instance. + + stream must be a file-like object open for writing. + + The StreamWriter may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference. + 'backslashreplace' - Replace with backslashed escape + sequences. + 'namereplace' - Replace with \N{...} escape sequences. + + The set of allowed parameter values can be extended via + register_error. + 'u' Creates a StreamWriter instance. + + stream must be a file-like object open for writing. + + The StreamWriter may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference. + 'backslashreplace' - Replace with backslashed escape + sequences. + 'namereplace' - Replace with \N{...} escape sequences. + + The set of allowed parameter values can be extended via + register_error. + 'b' Writes the object's contents encoded to self.stream. + 'u' Writes the object's contents encoded to self.stream. + 'b' Writes the concatenated list of strings to the stream + using .write(). + 'u' Writes the concatenated list of strings to the stream + using .write(). + 'b' Flushes and resets the codec buffers used for keeping state. + + Calling this method should ensure that the data on the + output is put into a clean state, that allows appending + of new fresh data without having to rescan the whole + stream to recover state. + + 'u' Flushes and resets the codec buffers used for keeping state. + + Calling this method should ensure that the data on the + output is put into a clean state, that allows appending + of new fresh data without having to rescan the whole + stream to recover state. + + 'b' Inherit all other methods from the underlying stream. + 'u' Inherit all other methods from the underlying stream. + 'b' Creates a StreamReader instance. + + stream must be a file-like object open for reading. + + The StreamReader may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'backslashreplace' - Replace with backslashed escape sequences; + + The set of allowed parameter values can be extended via + register_error. + 'u' Creates a StreamReader instance. + + stream must be a file-like object open for reading. + + The StreamReader may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'backslashreplace' - Replace with backslashed escape sequences; + + The set of allowed parameter values can be extended via + register_error. + 'b' Decodes data from the stream self.stream and returns the + resulting object. + + chars indicates the number of decoded code points or bytes to + return. read() will never return more data than requested, + but it might return less, if there is not enough available. + + size indicates the approximate maximum number of decoded + bytes or code points to read for decoding. The decoder + can modify this setting as appropriate. The default value + -1 indicates to read and decode as much as possible. size + is intended to prevent having to decode huge files in one + step. + + If firstline is true, and a UnicodeDecodeError happens + after the first line terminator in the input only the first line + will be returned, the rest of the input will be kept until the + next call to read(). + + The method should use a greedy read strategy, meaning that + it should read as much data as is allowed within the + definition of the encoding and the given size, e.g. if + optional encoding endings or state markers are available + on the stream, these should be read too. + 'u' Decodes data from the stream self.stream and returns the + resulting object. + + chars indicates the number of decoded code points or bytes to + return. read() will never return more data than requested, + but it might return less, if there is not enough available. + + size indicates the approximate maximum number of decoded + bytes or code points to read for decoding. The decoder + can modify this setting as appropriate. The default value + -1 indicates to read and decode as much as possible. size + is intended to prevent having to decode huge files in one + step. + + If firstline is true, and a UnicodeDecodeError happens + after the first line terminator in the input only the first line + will be returned, the rest of the input will be kept until the + next call to read(). + + The method should use a greedy read strategy, meaning that + it should read as much data as is allowed within the + definition of the encoding and the given size, e.g. if + optional encoding endings or state markers are available + on the stream, these should be read too. + 'b' Read one line from the input stream and return the + decoded data. + + size, if given, is passed as size argument to the + read() method. + + 'u' Read one line from the input stream and return the + decoded data. + + size, if given, is passed as size argument to the + read() method. + + 'b' Read all lines available on the input stream + and return them as a list. + + Line breaks are implemented using the codec's decoder + method and are included in the list entries. + + sizehint, if given, is ignored since there is no efficient + way to finding the true end-of-line. + + 'u' Read all lines available on the input stream + and return them as a list. + + Line breaks are implemented using the codec's decoder + method and are included in the list entries. + + sizehint, if given, is ignored since there is no efficient + way to finding the true end-of-line. + + 'b' Resets the codec buffers used for keeping state. + + Note that no stream repositioning should take place. + This method is primarily intended to be able to recover + from decoding errors. + + 'u' Resets the codec buffers used for keeping state. + + Note that no stream repositioning should take place. + This method is primarily intended to be able to recover + from decoding errors. + + 'b' Set the input stream's current position. + + Resets the codec buffers used for keeping state. + 'u' Set the input stream's current position. + + Resets the codec buffers used for keeping state. + 'b' Return the next decoded line from the input stream.'u' Return the next decoded line from the input stream.'b' StreamReaderWriter instances allow wrapping streams which + work in both read and write modes. + + The design is such that one can use the factory functions + returned by the codec.lookup() function to construct the + instance. + + 'u' StreamReaderWriter instances allow wrapping streams which + work in both read and write modes. + + The design is such that one can use the factory functions + returned by the codec.lookup() function to construct the + instance. + + 'b'unknown'u'unknown'b' Creates a StreamReaderWriter instance. + + stream must be a Stream-like object. + + Reader, Writer must be factory functions or classes + providing the StreamReader, StreamWriter interface resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + 'u' Creates a StreamReaderWriter instance. + + stream must be a Stream-like object. + + Reader, Writer must be factory functions or classes + providing the StreamReader, StreamWriter interface resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + 'b' StreamRecoder instances translate data from one encoding to another. + + They use the complete set of APIs returned by the + codecs.lookup() function to implement their task. + + Data written to the StreamRecoder is first decoded into an + intermediate format (depending on the "decode" codec) and then + written to the underlying stream using an instance of the provided + Writer class. + + In the other direction, data is read from the underlying stream using + a Reader instance and then encoded and returned to the caller. + + 'u' StreamRecoder instances translate data from one encoding to another. + + They use the complete set of APIs returned by the + codecs.lookup() function to implement their task. + + Data written to the StreamRecoder is first decoded into an + intermediate format (depending on the "decode" codec) and then + written to the underlying stream using an instance of the provided + Writer class. + + In the other direction, data is read from the underlying stream using + a Reader instance and then encoded and returned to the caller. + + 'b' Creates a StreamRecoder instance which implements a two-way + conversion: encode and decode work on the frontend (the + data visible to .read() and .write()) while Reader and Writer + work on the backend (the data in stream). + + You can use these objects to do transparent + transcodings from e.g. latin-1 to utf-8 and back. + + stream must be a file-like object. + + encode and decode must adhere to the Codec interface; Reader and + Writer must be factory functions or classes providing the + StreamReader and StreamWriter interfaces resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + 'u' Creates a StreamRecoder instance which implements a two-way + conversion: encode and decode work on the frontend (the + data visible to .read() and .write()) while Reader and Writer + work on the backend (the data in stream). + + You can use these objects to do transparent + transcodings from e.g. latin-1 to utf-8 and back. + + stream must be a file-like object. + + encode and decode must adhere to the Codec interface; Reader and + Writer must be factory functions or classes providing the + StreamReader and StreamWriter interfaces resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + 'b' Open an encoded file using the given mode and return + a wrapped version providing transparent encoding/decoding. + + Note: The wrapped version will only accept the object format + defined by the codecs, i.e. Unicode objects for most builtin + codecs. Output is also codec dependent and will usually be + Unicode as well. + + Underlying encoded files are always opened in binary mode. + The default file mode is 'r', meaning to open the file in read mode. + + encoding specifies the encoding which is to be used for the + file. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + buffering has the same meaning as for the builtin open() API. + It defaults to -1 which means that the default buffer size will + be used. + + The returned wrapped file object provides an extra attribute + .encoding which allows querying the used encoding. This + attribute is only available if an encoding was specified as + parameter. + + 'u' Open an encoded file using the given mode and return + a wrapped version providing transparent encoding/decoding. + + Note: The wrapped version will only accept the object format + defined by the codecs, i.e. Unicode objects for most builtin + codecs. Output is also codec dependent and will usually be + Unicode as well. + + Underlying encoded files are always opened in binary mode. + The default file mode is 'r', meaning to open the file in read mode. + + encoding specifies the encoding which is to be used for the + file. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + buffering has the same meaning as for the builtin open() API. + It defaults to -1 which means that the default buffer size will + be used. + + The returned wrapped file object provides an extra attribute + .encoding which allows querying the used encoding. This + attribute is only available if an encoding was specified as + parameter. + + 'b' Return a wrapped version of file which provides transparent + encoding translation. + + Data written to the wrapped file is decoded according + to the given data_encoding and then encoded to the underlying + file using file_encoding. The intermediate data type + will usually be Unicode but depends on the specified codecs. + + Bytes read from the file are decoded using file_encoding and then + passed back to the caller encoded using data_encoding. + + If file_encoding is not given, it defaults to data_encoding. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + The returned wrapped file object provides two extra attributes + .data_encoding and .file_encoding which reflect the given + parameters of the same name. The attributes can be used for + introspection by Python programs. + + 'u' Return a wrapped version of file which provides transparent + encoding translation. + + Data written to the wrapped file is decoded according + to the given data_encoding and then encoded to the underlying + file using file_encoding. The intermediate data type + will usually be Unicode but depends on the specified codecs. + + Bytes read from the file are decoded using file_encoding and then + passed back to the caller encoded using data_encoding. + + If file_encoding is not given, it defaults to data_encoding. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + The returned wrapped file object provides two extra attributes + .data_encoding and .file_encoding which reflect the given + parameters of the same name. The attributes can be used for + introspection by Python programs. + + 'b' Lookup up the codec for the given encoding and return + its encoder function. + + Raises a LookupError in case the encoding cannot be found. + + 'u' Lookup up the codec for the given encoding and return + its encoder function. + + Raises a LookupError in case the encoding cannot be found. + + 'b' Lookup up the codec for the given encoding and return + its decoder function. + + Raises a LookupError in case the encoding cannot be found. + + 'u' Lookup up the codec for the given encoding and return + its decoder function. + + Raises a LookupError in case the encoding cannot be found. + + 'b' Lookup up the codec for the given encoding and return + its IncrementalEncoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental encoder. + + 'u' Lookup up the codec for the given encoding and return + its IncrementalEncoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental encoder. + + 'b' Lookup up the codec for the given encoding and return + its IncrementalDecoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental decoder. + + 'u' Lookup up the codec for the given encoding and return + its IncrementalDecoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental decoder. + + 'b' Lookup up the codec for the given encoding and return + its StreamReader class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + 'u' Lookup up the codec for the given encoding and return + its StreamReader class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + 'b' Lookup up the codec for the given encoding and return + its StreamWriter class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + 'u' Lookup up the codec for the given encoding and return + its StreamWriter class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + 'b' + Encoding iterator. + + Encodes the input strings from the iterator using an IncrementalEncoder. + + errors and kwargs are passed through to the IncrementalEncoder + constructor. + 'u' + Encoding iterator. + + Encodes the input strings from the iterator using an IncrementalEncoder. + + errors and kwargs are passed through to the IncrementalEncoder + constructor. + 'b' + Decoding iterator. + + Decodes the input strings from the iterator using an IncrementalDecoder. + + errors and kwargs are passed through to the IncrementalDecoder + constructor. + 'u' + Decoding iterator. + + Decodes the input strings from the iterator using an IncrementalDecoder. + + errors and kwargs are passed through to the IncrementalDecoder + constructor. + 'b' make_identity_dict(rng) -> dict + + Return a dictionary where elements of the rng sequence are + mapped to themselves. + + 'u' make_identity_dict(rng) -> dict + + Return a dictionary where elements of the rng sequence are + mapped to themselves. + + 'b' Creates an encoding map from a decoding map. + + If a target mapping in the decoding map occurs multiple + times, then that target is mapped to None (undefined mapping), + causing an exception when encountered by the charmap codec + during translation. + + One example where this happens is cp875.py which decodes + multiple character to \u001a. + + 'u' Creates an encoding map from a decoding map. + + If a target mapping in the decoding map occurs multiple + times, then that target is mapped to None (undefined mapping), + causing an exception when encountered by the charmap codec + during translation. + + One example where this happens is cp875.py which decodes + multiple character to \u001a. + + 'b'backslashreplace'u'backslashreplace'b'namereplace'u'namereplace'u'codecs'Utilities to compile possibly incomplete Python source code. + +This module provides two interfaces, broadly similar to the builtin +function compile(), which take program text, a filename and a 'mode' +and: + +- Return code object if the command is complete and valid +- Return None if the command is incomplete +- Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + +Approach: + +First, check if the source consists entirely of blank lines and +comments; if so, replace it with 'pass', because the built-in +parser doesn't always do the right thing for these. + +Compile three times: as is, with \n, and with \n\n appended. If it +compiles as is, it's complete. If it compiles with one \n appended, +we expect more. If it doesn't compile either way, we compare the +error we get when compiling with \n or \n\n appended. If the errors +are the same, the code is broken. But if the errors are different, we +expect more. Not intuitive; not even guaranteed to hold in future +releases; but this matches the compiler's behavior from Python 1.4 +through 2.2, at least. + +Caveat: + +It is possible (but not likely) that the parser stops parsing with a +successful outcome before reaching the end of the source; in this +case, trailing symbols may be ignored instead of causing an error. +For example, a backslash followed by two newlines may be followed by +arbitrary garbage. This will be fixed once the API for the parser is +better. + +The two interfaces are: + +compile_command(source, filename, symbol): + + Compiles a single command in the manner described above. + +CommandCompiler(): + + Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force. + +The module also provides another class: + +Compile(): + + Instances of this class act like the built-in function compile, + but with 'memory' in the sense described above. +__future___featuresCompile0x200PyCF_DONT_IMPLY_DEDENT_maybe_compilepasserr1err2code1code2Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; default + "" + symbol -- optional grammar start symbol; "single" (default), "exec" + or "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + Instances of this class behave much like the built-in compile + function, but if one is used to compile text containing a future + statement, it "remembers" and compiles all subsequent program texts + with the statement in force.codeobfeatureInstances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force.Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; + default "" + symbol -- optional grammar start symbol; "single" (default) or + "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + # Matches pythonrun.h# Check for source consisting of only blank lines and comments# Leave it alone# Replace it with a 'pass' statement# Catch syntax warnings after the first compile# to emit warnings (SyntaxWarning, DeprecationWarning) at most once.b'Utilities to compile possibly incomplete Python source code. + +This module provides two interfaces, broadly similar to the builtin +function compile(), which take program text, a filename and a 'mode' +and: + +- Return code object if the command is complete and valid +- Return None if the command is incomplete +- Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + +Approach: + +First, check if the source consists entirely of blank lines and +comments; if so, replace it with 'pass', because the built-in +parser doesn't always do the right thing for these. + +Compile three times: as is, with \n, and with \n\n appended. If it +compiles as is, it's complete. If it compiles with one \n appended, +we expect more. If it doesn't compile either way, we compare the +error we get when compiling with \n or \n\n appended. If the errors +are the same, the code is broken. But if the errors are different, we +expect more. Not intuitive; not even guaranteed to hold in future +releases; but this matches the compiler's behavior from Python 1.4 +through 2.2, at least. + +Caveat: + +It is possible (but not likely) that the parser stops parsing with a +successful outcome before reaching the end of the source; in this +case, trailing symbols may be ignored instead of causing an error. +For example, a backslash followed by two newlines may be followed by +arbitrary garbage. This will be fixed once the API for the parser is +better. + +The two interfaces are: + +compile_command(source, filename, symbol): + + Compiles a single command in the manner described above. + +CommandCompiler(): + + Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force. + +The module also provides another class: + +Compile(): + + Instances of this class act like the built-in function compile, + but with 'memory' in the sense described above. +'u'Utilities to compile possibly incomplete Python source code. + +This module provides two interfaces, broadly similar to the builtin +function compile(), which take program text, a filename and a 'mode' +and: + +- Return code object if the command is complete and valid +- Return None if the command is incomplete +- Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + +Approach: + +First, check if the source consists entirely of blank lines and +comments; if so, replace it with 'pass', because the built-in +parser doesn't always do the right thing for these. + +Compile three times: as is, with \n, and with \n\n appended. If it +compiles as is, it's complete. If it compiles with one \n appended, +we expect more. If it doesn't compile either way, we compare the +error we get when compiling with \n or \n\n appended. If the errors +are the same, the code is broken. But if the errors are different, we +expect more. Not intuitive; not even guaranteed to hold in future +releases; but this matches the compiler's behavior from Python 1.4 +through 2.2, at least. + +Caveat: + +It is possible (but not likely) that the parser stops parsing with a +successful outcome before reaching the end of the source; in this +case, trailing symbols may be ignored instead of causing an error. +For example, a backslash followed by two newlines may be followed by +arbitrary garbage. This will be fixed once the API for the parser is +better. + +The two interfaces are: + +compile_command(source, filename, symbol): + + Compiles a single command in the manner described above. + +CommandCompiler(): + + Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force. + +The module also provides another class: + +Compile(): + + Instances of this class act like the built-in function compile, + but with 'memory' in the sense described above. +'b'Compile'u'Compile'b'CommandCompiler'u'CommandCompiler'b'pass'u'pass'b'Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; default + "" + symbol -- optional grammar start symbol; "single" (default), "exec" + or "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + 'u'Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; default + "" + symbol -- optional grammar start symbol; "single" (default), "exec" + or "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + 'b'Instances of this class behave much like the built-in compile + function, but if one is used to compile text containing a future + statement, it "remembers" and compiles all subsequent program texts + with the statement in force.'u'Instances of this class behave much like the built-in compile + function, but if one is used to compile text containing a future + statement, it "remembers" and compiles all subsequent program texts + with the statement in force.'b'Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force.'u'Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force.'b'Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; + default "" + symbol -- optional grammar start symbol; "single" (default) or + "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + 'u'Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; + default "" + symbol -- optional grammar start symbol; "single" (default) or + "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + 'u'codeop'ClientListenerPipereductionForkingPickler_ForkingPicklerWAIT_OBJECT_0WAIT_ABANDONED_0WAIT_TIMEOUTINFINITEBUFSIZE20.020.CONNECTION_TIMEOUT_mmap_counterdefault_familyfamiliesAF_PIPE_init_timeout_check_timeoutarbitrary_address + Return an arbitrary free address for the given family + mktemplistener-get_temp_dir\\.\pipe\pyc-%d-%d-unrecognized family_validate_family + Checks if the family is valid for the current environment. + Family %s is not recognized.address_type + Return the types of the address + + This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' + is_abstract_socket_namespaceaddress type of %r unrecognized_ConnectionBaseinvalid handleat least one of `readable` and `writable` must be True_readable_writable_closehandle is closed_check_readableconnection is write-only_check_writableconnection is read-only_bad_message_lengthbad message lengthTrue if the connection is closedTrue if the connection is readableTrue if the connection is writableFile descriptor or handle of the connectionClose the connectionsend_bytesSend the bytes data from a bytes-like objectoffset is negativebuffer length < offsetsize is negativebuffer length < offset + size_send_bytesSend a (picklable) objectrecv_bytesmaxlength + Receive bytes data as a bytes object. + negative maxlength_recv_bytesrecv_bytes_into + Receive bytes data into a writeable bytes-like object. + Return the number of bytes read. + bytesizenegative offsetoffset too largeReceive a (picklable) objectWhether there is any input available to be read_pollPipeConnection + Connection class based on a Windows named pipe. + Overlapped I/O is used, so the handles must have been created + with FILE_FLAG_OVERLAPPED. + _got_empty_messageCloseHandle_CloseHandleWriteFileoverlappedovERROR_IO_PENDINGWaitForMultipleObjectswaitresGetOverlappedResultnwrittenbsizeReadFilenreadERROR_MORE_DATA_get_more_datawinerrorERROR_BROKEN_PIPEshouldn't get here; expected KeyboardInterruptPeekNamedPiperbytes + Connection class based on an arbitrary file descriptor (Unix only), or + a socket handle (Windows). + closesocket_read_sendremaining_recvgot end of file during message0x7fffffff!ipre_header!Q + Returns a listener object. + + This is a wrapper for a bound socket which is 'listening' for + connections, or for a Windows named pipe. + authkeyPipeListener_listenerSocketListenerauthkey should be a byte string_authkey + Accept a connection on the bound socket or named pipe of `self`. + + Returns a `Connection` object. + listener is closeddeliver_challengeanswer_challenge + Close the bound socket or named pipe of `self`. + listener_addresslast_accepted_last_accepted + Returns a connection to the address of a `Listener` + PipeClientSocketClientduplex + Returns pair of connection objects at either end of a pipe + c1c2fd1PIPE_ACCESS_DUPLEXopenmodeGENERIC_READGENERIC_WRITEaccessobsizeibsizePIPE_ACCESS_INBOUNDCreateNamedPipeFILE_FLAG_OVERLAPPEDFILE_FLAG_FIRST_PIPE_INSTANCEPIPE_TYPE_MESSAGEPIPE_READMODE_MESSAGEPIPE_WAITNMPWAIT_WAIT_FOREVERNULLh1CreateFileOPEN_EXISTINGh2SetNamedPipeHandleStateConnectNamedPipe + Representation of a socket which is bound to an address and listening + _familyFinalizeexitpriority + Return a connection object connected to the socket given by `address` + + Representation of a named pipe + _new_handle_handle_queuesub_debuglistener created with address=%r_finalize_pipe_listenerPIPE_UNLIMITED_INSTANCESERROR_NO_DATAclosing listener with address=%r + Return a connection object connected to the pipe given by `address` + WaitNamedPipeERROR_SEM_TIMEOUTERROR_PIPE_BUSYMESSAGE_LENGTH#CHALLENGE#CHALLENGE#WELCOME#WELCOME#FAILURE#FAILUREhmacAuthkey must be bytes, not {0!s}urandomdigest received was wrongmessage = %rdigest sent was rejectedConnectionWrapper_conn_dumps_loads_xml_dumps_xml_loadsXmlListenerXmlClient_exhaustive_waithandlesreadyShould not get hereERROR_NETNAME_DELETED_ready_errorsobject_list + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + waithandle_to_objov_listready_objectsready_handlesERROR_OPERATION_ABORTEDselectorsPollSelector_WaitSelectorSelectSelectorEVENT_READreduce_connectionresource_sharerDupSocketdsrebuild_connectionreduce_pipe_connectionFILE_GENERIC_READFILE_GENERIC_WRITEDupHandledhrebuild_pipe_connectionDupFddf# A higher level module for using sockets (or Windows named pipes)# multiprocessing/connection.py# A very generous timeout when it comes to local connections...# double check# Connection classes# XXX should we use util.Finalize instead of a __del__?# HACK for byte-indexing of non-bytewise buffers (e.g. array.array)# Get bytesize of arbitrary buffer# Message can fit in dest# For wire compatibility with 3.7 and lower# The payload is large so Nagle's algorithm won't be triggered# and we'd better avoid the cost of concatenation.# Issue #20540: concatenate before sending, to avoid delays due# to Nagle's algorithm on a TCP socket.# Also note we want to avoid sending a 0-length buffer separately,# to avoid "broken pipe" errors if the other end closed the pipe.# Public functions# default security descriptor: the handle cannot be inherited# Definitions for connections based on sockets# SO_REUSEADDR has different semantics on Windows (issue #2550).# Linux abstract socket namespaces do not need to be explicitly unlinked# Definitions for connections based on named pipes# ERROR_NO_DATA can occur if a client has already connected,# written data and then disconnected -- see Issue 14725.# Authentication stuff# reject large message# Support for using xmlrpclib for serialization# Wait# Return ALL handles which are currently signalled. (Only# returning the first signalled might create starvation issues.)# start an overlapped read of length zero# If o.fileno() is an overlapped pipe handle and# err == 0 then there is a zero length message# in the pipe, but it HAS NOT been consumed...# ... except on Windows 8 and later, where# the message HAS been consumed.# request that overlapped reads stop# wait for all overlapped reads to stop# If o.fileno() is an overlapped pipe handle then# a zero length message HAS been consumed.# poll/select have the advantage of not requiring any extra file# descriptor, contrarily to epoll/kqueue (also, they require a single# syscall).# Make connection and socket objects sharable if possibleb'Client'u'Client'b'Listener'u'Listener'b'Pipe'u'Pipe'b'AF_INET'u'AF_INET'b'AF_PIPE'u'AF_PIPE'b' + Return an arbitrary free address for the given family + 'u' + Return an arbitrary free address for the given family + 'b'listener-'u'listener-'b'\\.\pipe\pyc-%d-%d-'u'\\.\pipe\pyc-%d-%d-'b'unrecognized family'u'unrecognized family'b' + Checks if the family is valid for the current environment. + 'u' + Checks if the family is valid for the current environment. + 'b'Family %s is not recognized.'u'Family %s is not recognized.'b' + Return the types of the address + + This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' + 'u' + Return the types of the address + + This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' + 'b'address type of %r unrecognized'u'address type of %r unrecognized'b'invalid handle'u'invalid handle'b'at least one of `readable` and `writable` must be True'u'at least one of `readable` and `writable` must be True'b'handle is closed'u'handle is closed'b'connection is write-only'u'connection is write-only'b'connection is read-only'u'connection is read-only'b'bad message length'u'bad message length'b'True if the connection is closed'u'True if the connection is closed'b'True if the connection is readable'u'True if the connection is readable'b'True if the connection is writable'u'True if the connection is writable'b'File descriptor or handle of the connection'u'File descriptor or handle of the connection'b'Close the connection'u'Close the connection'b'Send the bytes data from a bytes-like object'u'Send the bytes data from a bytes-like object'b'offset is negative'u'offset is negative'b'buffer length < offset'u'buffer length < offset'b'size is negative'u'size is negative'b'buffer length < offset + size'u'buffer length < offset + size'b'Send a (picklable) object'u'Send a (picklable) object'b' + Receive bytes data as a bytes object. + 'u' + Receive bytes data as a bytes object. + 'b'negative maxlength'u'negative maxlength'b' + Receive bytes data into a writeable bytes-like object. + Return the number of bytes read. + 'u' + Receive bytes data into a writeable bytes-like object. + Return the number of bytes read. + 'b'negative offset'u'negative offset'b'offset too large'u'offset too large'b'Receive a (picklable) object'u'Receive a (picklable) object'b'Whether there is any input available to be read'u'Whether there is any input available to be read'b' + Connection class based on a Windows named pipe. + Overlapped I/O is used, so the handles must have been created + with FILE_FLAG_OVERLAPPED. + 'u' + Connection class based on a Windows named pipe. + Overlapped I/O is used, so the handles must have been created + with FILE_FLAG_OVERLAPPED. + 'b'shouldn't get here; expected KeyboardInterrupt'u'shouldn't get here; expected KeyboardInterrupt'b' + Connection class based on an arbitrary file descriptor (Unix only), or + a socket handle (Windows). + 'u' + Connection class based on an arbitrary file descriptor (Unix only), or + a socket handle (Windows). + 'b'got end of file during message'u'got end of file during message'b'!i'u'!i'b'!Q'u'!Q'b' + Returns a listener object. + + This is a wrapper for a bound socket which is 'listening' for + connections, or for a Windows named pipe. + 'u' + Returns a listener object. + + This is a wrapper for a bound socket which is 'listening' for + connections, or for a Windows named pipe. + 'b'authkey should be a byte string'u'authkey should be a byte string'b' + Accept a connection on the bound socket or named pipe of `self`. + + Returns a `Connection` object. + 'u' + Accept a connection on the bound socket or named pipe of `self`. + + Returns a `Connection` object. + 'b'listener is closed'u'listener is closed'b' + Close the bound socket or named pipe of `self`. + 'u' + Close the bound socket or named pipe of `self`. + 'b' + Returns a connection to the address of a `Listener` + 'u' + Returns a connection to the address of a `Listener` + 'b' + Returns pair of connection objects at either end of a pipe + 'u' + Returns pair of connection objects at either end of a pipe + 'b' + Representation of a socket which is bound to an address and listening + 'u' + Representation of a socket which is bound to an address and listening + 'b' + Return a connection object connected to the socket given by `address` + 'u' + Return a connection object connected to the socket given by `address` + 'b' + Representation of a named pipe + 'u' + Representation of a named pipe + 'b'listener created with address=%r'u'listener created with address=%r'b'closing listener with address=%r'u'closing listener with address=%r'b' + Return a connection object connected to the pipe given by `address` + 'u' + Return a connection object connected to the pipe given by `address` + 'b'#CHALLENGE#'b'#WELCOME#'b'#FAILURE#'b'Authkey must be bytes, not {0!s}'u'Authkey must be bytes, not {0!s}'b'md5'u'md5'b'digest received was wrong'u'digest received was wrong'b'message = %r'u'message = %r'b'digest sent was rejected'u'digest sent was rejected'b'fileno'u'fileno'b'poll'u'poll'b'recv_bytes'u'recv_bytes'b'send_bytes'u'send_bytes'b'Should not get here'u'Should not get here'b' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + 'u' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + 'b'_got_empty_message'u'_got_empty_message'b'PollSelector'u'PollSelector'NOFALSEOFFYESTRUEONSnwNWswSWNEseSENSEWnsewNSEWCENTERNONEbothBOTHLEFTtopTOPRIGHTbottomBOTTOMraisedsunkenSUNKENflatFLATridgeRIDGEgrooveGROOVEsolidSOLIDhorizontalHORIZONTALverticalVERTICALnumericNUMERICCHARWORDbaselineBASELINEinsideINSIDEoutsideOUTSIDEselSELsel.firstSEL_FIRSTsel.lastSEL_LASTENDINSERTCURRENTANCHORALLnormalNORMALDISABLEDactiveACTIVEhiddenHIDDENCASCADECHECKBUTTONCOMMANDRADIOBUTTONSEPARATORSINGLEbrowseBROWSEmultipleMULTIPLEextendedEXTENDEDdotboxDOTBOXunderlineUNDERLINEpieslicePIESLICEchordCHORDARCFIRSTLASTbuttBUTTprojectingPROJECTINGROUNDbevelBEVELmiterMITERMOVETOSCROLLunitsUNITSpagesPAGES# Symbolic constants for Tk# Booleans# -anchor and -sticky# -fill# -side# -relief# -orient# -tabs# -wrap# -align# -bordermode# Special tags, marks and insert positions# e.g. Canvas.delete(ALL)# Text widget and button states# Canvas state# Menu item types# Selection modes for list boxes# Activestyle for list boxes# NONE='none' is also valid# Various canvas styles# Arguments to xview/yviewb'nw'u'nw'b'sw'u'sw'b'ne'u'ne'b'se'u'se'b'ns'u'ns'b'ew'u'ew'b'nsew'u'nsew'b'center'u'center'b'both'u'both'b'left'b'top'u'top'b'right'b'bottom'u'bottom'b'raised'u'raised'b'sunken'u'sunken'b'flat'u'flat'b'ridge'u'ridge'b'groove'u'groove'b'solid'u'solid'b'horizontal'u'horizontal'b'vertical'u'vertical'b'numeric'u'numeric'b'word'u'word'b'baseline'u'baseline'b'inside'u'inside'b'outside'u'outside'b'sel'u'sel'b'sel.first'u'sel.first'b'sel.last'u'sel.last'b'normal'u'normal'b'disabled'u'disabled'b'active'u'active'b'hidden'u'hidden'b'browse'u'browse'b'multiple'u'multiple'b'extended'u'extended'b'dotbox'u'dotbox'b'underline'u'underline'b'pieslice'u'pieslice'b'chord'u'chord'b'first'u'first'b'butt'u'butt'b'projecting'u'projecting'b'round'u'round'b'bevel'u'bevel'b'miter'u'miter'b'units'u'units'b'pages'u'pages'u'constants'LOG_THRESHOLD_FOR_CONNLOST_WRITESACCEPT_RETRY_DELAYSSL_HANDSHAKE_TIMEOUTautoFALLBACK# After the connection is lost, log warnings after this many write()s.# Seconds to wait before retrying accept().# Number of stack entries to capture in debug mode.# The larger the number, the slower the operation in debug mode# (see extract_stack() in format_helpers.py).# Number of seconds to wait for SSL handshake to complete# The default timeout matches that of Nginx.# Used in sendfile fallback code. We use fallback for platforms# that don't support sendfile, or for TLS connections.# The enum should be here to break circular dependencies between# base_events and sslprotou'asyncio.constants'BaseContextparent_processactive_childrencpu_countReturns the number of CPUs in the systemcannot determine number of cpusReturns a manager associated with a running server process + + The managers methods such as `Lock()`, `Condition()` and `Queue()` + can be used to create shared objects. + managersSyncManagerget_contextReturns two connection object connected by a pipeReturns a non-recursive lock objectReturns a recursive lock objectReturns a condition objectSemaphoreReturns a semaphore objectBoundedSemaphoreReturns a bounded semaphore objectReturns an event objectBarrierpartiesReturns a barrier objectReturns a queue objectJoinableQueuePoolprocessesinitializerinitargsmaxtasksperchildReturns a process pool objectpoolRawValuetypecode_or_typeReturns a shared objectsharedctypesRawArraysize_or_initializerReturns a shared arrayValueReturns a synchronized shared objectReturns a synchronized shared arrayfreeze_supportCheck whether this is a fake forked process in a frozen executable. + If so then run code specified by commandline and exit. + get_loggerReturn package logger -- if it does not already exist then + it is created. + log_to_stderrTurn on logging and add a handler which prints to stderrallow_connection_picklingInstall support for sending connections and sockets + between processes + Sets the path to a python.exe or pythonw.exe binary used to run + child processes instead of sys.executable when using the 'spawn' + start method. Useful for people embedding Python. + set_forkserver_preloadSet list of module names to try to load in forkserver process. + This is really just a hint. + forkserver_concrete_contextscannot find context for %r_check_availableget_start_methodset_start_methodcannot set start method of concrete contextreducerControls how objects will be reduced to a form that can be + shared with other processes.BaseProcess_start_method_Popenprocess_obj_actual_contextcontext has already been setget_all_start_methodsforkHAVE_SEND_HANDLEForkProcesspopen_forkSpawnProcesspopen_spawn_posixForkServerProcesspopen_forkserverForkContextSpawnContextForkServerContextforkserver start method not availablepopen_spawn_win32_force_start_method_tlsget_spawning_popenspawning_popenset_spawning_popenpopenassert_spawning%s objects should only be shared between processes through inheritance'%s objects should only be shared between processes'' through inheritance'# Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py# This is undocumented. In previous versions of multiprocessing# its only effect was to make socket objects inheritable on Windows.# Type of default context -- underlying context can be set at most once# Context types for fixed start method# bpo-33725: running arbitrary code after fork() is no longer reliable# on macOS since macOS 10.14 (Mojave). Use spawn by default instead.# Force the start method# Check that the current thread is spawning a child processb'Returns the number of CPUs in the system'u'Returns the number of CPUs in the system'b'cannot determine number of cpus'u'cannot determine number of cpus'b'Returns a manager associated with a running server process + + The managers methods such as `Lock()`, `Condition()` and `Queue()` + can be used to create shared objects. + 'u'Returns a manager associated with a running server process + + The managers methods such as `Lock()`, `Condition()` and `Queue()` + can be used to create shared objects. + 'b'Returns two connection object connected by a pipe'u'Returns two connection object connected by a pipe'b'Returns a non-recursive lock object'u'Returns a non-recursive lock object'b'Returns a recursive lock object'u'Returns a recursive lock object'b'Returns a condition object'u'Returns a condition object'b'Returns a semaphore object'u'Returns a semaphore object'b'Returns a bounded semaphore object'u'Returns a bounded semaphore object'b'Returns an event object'u'Returns an event object'b'Returns a barrier object'u'Returns a barrier object'b'Returns a queue object'u'Returns a queue object'b'Returns a process pool object'u'Returns a process pool object'b'Returns a shared object'u'Returns a shared object'b'Returns a shared array'u'Returns a shared array'b'Returns a synchronized shared object'u'Returns a synchronized shared object'b'Returns a synchronized shared array'u'Returns a synchronized shared array'b'Check whether this is a fake forked process in a frozen executable. + If so then run code specified by commandline and exit. + 'u'Check whether this is a fake forked process in a frozen executable. + If so then run code specified by commandline and exit. + 'b'Return package logger -- if it does not already exist then + it is created. + 'u'Return package logger -- if it does not already exist then + it is created. + 'b'Turn on logging and add a handler which prints to stderr'u'Turn on logging and add a handler which prints to stderr'b'Install support for sending connections and sockets + between processes + 'u'Install support for sending connections and sockets + between processes + 'b'Sets the path to a python.exe or pythonw.exe binary used to run + child processes instead of sys.executable when using the 'spawn' + start method. Useful for people embedding Python. + 'u'Sets the path to a python.exe or pythonw.exe binary used to run + child processes instead of sys.executable when using the 'spawn' + start method. Useful for people embedding Python. + 'b'Set list of module names to try to load in forkserver process. + This is really just a hint. + 'u'Set list of module names to try to load in forkserver process. + This is really just a hint. + 'b'cannot find context for %r'u'cannot find context for %r'b'cannot set start method of concrete context'u'cannot set start method of concrete context'b'Controls how objects will be reduced to a form that can be + shared with other processes.'u'Controls how objects will be reduced to a form that can be + shared with other processes.'b'reduction'u'reduction'b'context has already been set'u'context has already been set'b'spawn'u'spawn'b'fork'u'fork'b'forkserver'u'forkserver'b'forkserver start method not available'u'forkserver start method not available'b'spawning_popen'u'spawning_popen'b'%s objects should only be shared between processes through inheritance'u'%s objects should only be shared between processes through inheritance'Utilities for with-statement contexts. See PEP 343.asynccontextmanagernullcontextAbstractContextManagerAbstractAsyncContextManagerAsyncExitStackContextDecoratorredirect_stdoutredirect_stderrsuppressAn abstract base class for context managers.Return `self` upon entering the runtime context.Raise any exception triggered within the runtime context.An abstract base class for asynchronous context managers.__aenter____aexit__A base class or mixin that enables context managers to work as decorators._recreate_cmReturn a recreated instance of self. + + Allows an otherwise one-shot context manager like + _GeneratorContextManager to support use as + a decorator via implicit recreation. + + This is a private interface just for _GeneratorContextManager. + See issue #11647 for details. + _GeneratorContextManagerBaseShared functionality for @contextmanager and @asynccontextmanager.gen_GeneratorContextManagerHelper for @contextmanager decorator.generator didn't yieldgenerator didn't stopgenerator didn't stop after throw()_AsyncGeneratorContextManagerHelper for @asynccontextmanager.generator didn't stop after athrow()@contextmanager decorator. + + Typical usage: + + @contextmanager + def some_generator(): + + try: + yield + finally: + + + This makes this: + + with some_generator() as : + + + equivalent to this: + + + try: + = + + finally: + + helper@asynccontextmanager decorator. + + Typical usage: + + @asynccontextmanager + async def some_async_generator(): + + try: + yield + finally: + + + This makes this: + + async with some_async_generator() as : + + + equivalent to this: + + + try: + = + + finally: + + Context to automatically close something at the end of a block. + + Code like this: + + with closing(.open()) as f: + + + is equivalent to this: + + f = .open() + try: + + finally: + f.close() + + thing_RedirectStream_streamnew_target_new_target_old_targetsexctypeexcinstexctbContext manager for temporarily redirecting stdout to another file. + + # How to send help() to stderr + with redirect_stdout(sys.stderr): + help(dir) + + # How to write help() to a file + with open('help.txt', 'w') as f: + with redirect_stdout(f): + help(pow) + Context manager for temporarily redirecting stderr to another file.Context manager to suppress specified exceptions + + After the exception is suppressed, execution proceeds with the next + statement following the with statement. + + with suppress(FileNotFoundError): + os.remove(somefile) + # Execution still resumes here if the file was already removed + _exceptions_BaseExitStackA base class for ExitStack and AsyncExitStack._create_exit_wrappercm_exit_create_cb_wrapper_exit_wrapper_exit_callbackspop_allPreserve the context stack by transferring it to a new instance.new_stackRegisters a callback with the standard __exit__ method signature. + + Can suppress exceptions the same way __exit__ method can. + Also accepts any object with an __exit__ method (registering a call + to the method instead of the object itself). + _cb_typeexit_method_push_cm_exit_push_exit_callbackenter_contextEnters the supplied context manager. + + If successful, also pushes its __exit__ method as a callback and + returns the result of the __enter__ method. + _cm_typeRegisters an arbitrary callback and arguments. + + Cannot suppress exceptions. + descriptor 'callback' of '_BaseExitStack' object needs an argument"descriptor 'callback' of '_BaseExitStack' object "Passing 'callback' as keyword argument is deprecatedcallback expected at least 1 positional argument, got %d'callback expected at least 1 positional argument, '__wrapped__($self, callback, /, *args, **kwds)Helper to correctly register callbacks to __exit__ methods.is_syncContext manager for dynamic management of a stack of exit callbacks. + + For example: + with ExitStack() as stack: + files = [stack.enter_context(open(fname)) for fname in filenames] + # All opened files will automatically be closed at the end of + # the with statement, even if attempts to open files later + # in the list raise an exception. + received_excframe_exc_fix_exception_contextnew_excold_excexc_contextsuppressed_excpending_raisenew_exc_detailsfixed_ctxImmediately unwind the context stack.Async context manager for dynamic management of a stack of exit + callbacks. + + For example: + async with AsyncExitStack() as stack: + connections = [await stack.enter_async_context(get_connection()) + for i in range(5)] + # All opened connections will automatically be released at the + # end of the async with statement, even if attempts to open a + # connection later in the list raise an exception. + _create_async_exit_wrapper_create_async_cb_wrapperenter_async_contextEnters the supplied async context manager. + + If successful, also pushes its __aexit__ method as a callback and + returns the result of the __aenter__ method. + _push_async_cm_exitpush_async_exitRegisters a coroutine function with the standard __aexit__ method + signature. + + Can suppress exceptions the same way __aexit__ method can. + Also accepts any object with an __aexit__ method (registering a call + to the method instead of the object itself). + push_async_callbackRegisters an arbitrary coroutine function and arguments. + + Cannot suppress exceptions. + descriptor 'push_async_callback' of 'AsyncExitStack' object needs an argument"descriptor 'push_async_callback' of ""'AsyncExitStack' object needs an argument"push_async_callback expected at least 1 positional argument, got %d'push_async_callback expected at least 1 ''positional argument, got %d'Helper to correctly register coroutine function to __aexit__ + method.cb_suppressContext manager that does no additional processing. + + Used as a stand-in for a normal context manager, when a particular + block of code is only sometimes used with a normal context manager: + + cm = optional_cm if condition else nullcontext() + with cm: + # Perform operation, using optional_cm if condition is True + enter_resultexcinfo# Issue 19330: ensure context manager instances have good docstrings# Unfortunately, this still doesn't provide good help output when# inspecting the created context manager instances, since pydoc# currently bypasses the instance docstring and shows the docstring# for the class instead.# See http://bugs.python.org/issue19404 for more details.# _GCM instances are one-shot context managers, so the# CM must be recreated each time a decorated function is# called# do not keep args and kwds alive unnecessarily# they are only needed for recreation, which is not possible anymore# Need to force instantiation so we can reliably# tell if we get the same exception back# Suppress StopIteration *unless* it's the same exception that# was passed to throw(). This prevents a StopIteration# raised inside the "with" statement from being suppressed.# Don't re-raise the passed in exception. (issue27122)# Likewise, avoid suppressing if a StopIteration exception# was passed to throw() and later wrapped into a RuntimeError# (see PEP 479).# only re-raise if it's *not* the exception that was# passed to throw(), because __exit__() must not raise# an exception unless __exit__() itself failed. But throw()# has to raise the exception to signal propagation, so this# fixes the impedance mismatch between the throw() protocol# and the __exit__() protocol.# This cannot use 'except BaseException as exc' (as in the# async implementation) to maintain compatibility with# Python 2, where old-style class exceptions are not caught# by 'except BaseException'.# See _GeneratorContextManager.__exit__ for comments on subtleties# in this implementation# Avoid suppressing if a StopIteration exception# (see PEP 479 for sync generators; async generators also# have this behavior). But do this only if the exception wrapped# by the RuntimeError is actully Stop(Async)Iteration (see# issue29692).# We use a list of old targets to make this CM re-entrant# Unlike isinstance and issubclass, CPython exception handling# currently only looks at the concrete type hierarchy (ignoring# the instance and subclass checking hooks). While Guido considers# that a bug rather than a feature, it's a fairly hard one to fix# due to various internal implementation details. suppress provides# the simpler issubclass based semantics, rather than trying to# exactly reproduce the limitations of the CPython interpreter.# See http://bugs.python.org/issue12029 for more details# We use an unbound method rather than a bound method to follow# the standard lookup behaviour for special methods.# Not a context manager, so assume it's a callable.# Allow use as a decorator.# We look up the special methods on the type to match the with# statement.# We changed the signature, so using @wraps is not appropriate, but# setting __wrapped__ may still help with introspection.# Allow use as a decorator# Inspired by discussions on http://bugs.python.org/issue13585# We manipulate the exception state so it behaves as though# we were actually nesting multiple with statements# Context may not be correct, so find the end of the chain# Context is already set correctly (see issue 20317)# Change the end of the chain to point to the exception# we expect it to reference# Callbacks are invoked in LIFO order to match the behaviour of# nested context managers# simulate the stack of exceptions by setting the context# bare "raise exc_details[1]" replaces our carefully# set-up context# Inspired by discussions on https://bugs.python.org/issue29302# Not an async context manager, so assume it's a coroutine functionb'Utilities for with-statement contexts. See PEP 343.'u'Utilities for with-statement contexts. See PEP 343.'b'asynccontextmanager'u'asynccontextmanager'b'contextmanager'u'contextmanager'b'closing'u'closing'b'nullcontext'u'nullcontext'b'AbstractContextManager'u'AbstractContextManager'b'AbstractAsyncContextManager'u'AbstractAsyncContextManager'b'AsyncExitStack'u'AsyncExitStack'b'ContextDecorator'u'ContextDecorator'b'ExitStack'u'ExitStack'b'redirect_stdout'u'redirect_stdout'b'redirect_stderr'u'redirect_stderr'b'suppress'u'suppress'b'An abstract base class for context managers.'u'An abstract base class for context managers.'b'Return `self` upon entering the runtime context.'u'Return `self` upon entering the runtime context.'b'Raise any exception triggered within the runtime context.'u'Raise any exception triggered within the runtime context.'b'__enter__'u'__enter__'b'__exit__'u'__exit__'b'An abstract base class for asynchronous context managers.'u'An abstract base class for asynchronous context managers.'b'__aenter__'u'__aenter__'b'__aexit__'u'__aexit__'b'A base class or mixin that enables context managers to work as decorators.'u'A base class or mixin that enables context managers to work as decorators.'b'Return a recreated instance of self. + + Allows an otherwise one-shot context manager like + _GeneratorContextManager to support use as + a decorator via implicit recreation. + + This is a private interface just for _GeneratorContextManager. + See issue #11647 for details. + 'u'Return a recreated instance of self. + + Allows an otherwise one-shot context manager like + _GeneratorContextManager to support use as + a decorator via implicit recreation. + + This is a private interface just for _GeneratorContextManager. + See issue #11647 for details. + 'b'Shared functionality for @contextmanager and @asynccontextmanager.'u'Shared functionality for @contextmanager and @asynccontextmanager.'b'Helper for @contextmanager decorator.'u'Helper for @contextmanager decorator.'b'generator didn't yield'u'generator didn't yield'b'generator didn't stop'u'generator didn't stop'b'generator didn't stop after throw()'u'generator didn't stop after throw()'b'Helper for @asynccontextmanager.'u'Helper for @asynccontextmanager.'b'generator didn't stop after athrow()'u'generator didn't stop after athrow()'b'@contextmanager decorator. + + Typical usage: + + @contextmanager + def some_generator(): + + try: + yield + finally: + + + This makes this: + + with some_generator() as : + + + equivalent to this: + + + try: + = + + finally: + + 'u'@contextmanager decorator. + + Typical usage: + + @contextmanager + def some_generator(): + + try: + yield + finally: + + + This makes this: + + with some_generator() as : + + + equivalent to this: + + + try: + = + + finally: + + 'b'@asynccontextmanager decorator. + + Typical usage: + + @asynccontextmanager + async def some_async_generator(): + + try: + yield + finally: + + + This makes this: + + async with some_async_generator() as : + + + equivalent to this: + + + try: + = + + finally: + + 'u'@asynccontextmanager decorator. + + Typical usage: + + @asynccontextmanager + async def some_async_generator(): + + try: + yield + finally: + + + This makes this: + + async with some_async_generator() as : + + + equivalent to this: + + + try: + = + + finally: + + 'b'Context to automatically close something at the end of a block. + + Code like this: + + with closing(.open()) as f: + + + is equivalent to this: + + f = .open() + try: + + finally: + f.close() + + 'u'Context to automatically close something at the end of a block. + + Code like this: + + with closing(.open()) as f: + + + is equivalent to this: + + f = .open() + try: + + finally: + f.close() + + 'b'Context manager for temporarily redirecting stdout to another file. + + # How to send help() to stderr + with redirect_stdout(sys.stderr): + help(dir) + + # How to write help() to a file + with open('help.txt', 'w') as f: + with redirect_stdout(f): + help(pow) + 'u'Context manager for temporarily redirecting stdout to another file. + + # How to send help() to stderr + with redirect_stdout(sys.stderr): + help(dir) + + # How to write help() to a file + with open('help.txt', 'w') as f: + with redirect_stdout(f): + help(pow) + 'b'Context manager for temporarily redirecting stderr to another file.'u'Context manager for temporarily redirecting stderr to another file.'b'Context manager to suppress specified exceptions + + After the exception is suppressed, execution proceeds with the next + statement following the with statement. + + with suppress(FileNotFoundError): + os.remove(somefile) + # Execution still resumes here if the file was already removed + 'u'Context manager to suppress specified exceptions + + After the exception is suppressed, execution proceeds with the next + statement following the with statement. + + with suppress(FileNotFoundError): + os.remove(somefile) + # Execution still resumes here if the file was already removed + 'b'A base class for ExitStack and AsyncExitStack.'u'A base class for ExitStack and AsyncExitStack.'b'Preserve the context stack by transferring it to a new instance.'u'Preserve the context stack by transferring it to a new instance.'b'Registers a callback with the standard __exit__ method signature. + + Can suppress exceptions the same way __exit__ method can. + Also accepts any object with an __exit__ method (registering a call + to the method instead of the object itself). + 'u'Registers a callback with the standard __exit__ method signature. + + Can suppress exceptions the same way __exit__ method can. + Also accepts any object with an __exit__ method (registering a call + to the method instead of the object itself). + 'b'Enters the supplied context manager. + + If successful, also pushes its __exit__ method as a callback and + returns the result of the __enter__ method. + 'u'Enters the supplied context manager. + + If successful, also pushes its __exit__ method as a callback and + returns the result of the __enter__ method. + 'b'Registers an arbitrary callback and arguments. + + Cannot suppress exceptions. + 'u'Registers an arbitrary callback and arguments. + + Cannot suppress exceptions. + 'b'descriptor 'callback' of '_BaseExitStack' object needs an argument'u'descriptor 'callback' of '_BaseExitStack' object needs an argument'b'callback'u'callback'b'Passing 'callback' as keyword argument is deprecated'u'Passing 'callback' as keyword argument is deprecated'b'callback expected at least 1 positional argument, got %d'u'callback expected at least 1 positional argument, got %d'b'($self, callback, /, *args, **kwds)'u'($self, callback, /, *args, **kwds)'b'Helper to correctly register callbacks to __exit__ methods.'u'Helper to correctly register callbacks to __exit__ methods.'b'Context manager for dynamic management of a stack of exit callbacks. + + For example: + with ExitStack() as stack: + files = [stack.enter_context(open(fname)) for fname in filenames] + # All opened files will automatically be closed at the end of + # the with statement, even if attempts to open files later + # in the list raise an exception. + 'u'Context manager for dynamic management of a stack of exit callbacks. + + For example: + with ExitStack() as stack: + files = [stack.enter_context(open(fname)) for fname in filenames] + # All opened files will automatically be closed at the end of + # the with statement, even if attempts to open files later + # in the list raise an exception. + 'b'Immediately unwind the context stack.'u'Immediately unwind the context stack.'b'Async context manager for dynamic management of a stack of exit + callbacks. + + For example: + async with AsyncExitStack() as stack: + connections = [await stack.enter_async_context(get_connection()) + for i in range(5)] + # All opened connections will automatically be released at the + # end of the async with statement, even if attempts to open a + # connection later in the list raise an exception. + 'u'Async context manager for dynamic management of a stack of exit + callbacks. + + For example: + async with AsyncExitStack() as stack: + connections = [await stack.enter_async_context(get_connection()) + for i in range(5)] + # All opened connections will automatically be released at the + # end of the async with statement, even if attempts to open a + # connection later in the list raise an exception. + 'b'Enters the supplied async context manager. + + If successful, also pushes its __aexit__ method as a callback and + returns the result of the __aenter__ method. + 'u'Enters the supplied async context manager. + + If successful, also pushes its __aexit__ method as a callback and + returns the result of the __aenter__ method. + 'b'Registers a coroutine function with the standard __aexit__ method + signature. + + Can suppress exceptions the same way __aexit__ method can. + Also accepts any object with an __aexit__ method (registering a call + to the method instead of the object itself). + 'u'Registers a coroutine function with the standard __aexit__ method + signature. + + Can suppress exceptions the same way __aexit__ method can. + Also accepts any object with an __aexit__ method (registering a call + to the method instead of the object itself). + 'b'Registers an arbitrary coroutine function and arguments. + + Cannot suppress exceptions. + 'u'Registers an arbitrary coroutine function and arguments. + + Cannot suppress exceptions. + 'b'descriptor 'push_async_callback' of 'AsyncExitStack' object needs an argument'u'descriptor 'push_async_callback' of 'AsyncExitStack' object needs an argument'b'push_async_callback expected at least 1 positional argument, got %d'u'push_async_callback expected at least 1 positional argument, got %d'b'Helper to correctly register coroutine function to __aexit__ + method.'u'Helper to correctly register coroutine function to __aexit__ + method.'b'Context manager that does no additional processing. + + Used as a stand-in for a normal context manager, when a particular + block of code is only sometimes used with a normal context manager: + + cm = optional_cm if condition else nullcontext() + with cm: + # Perform operation, using optional_cm if condition is True + 'u'Context manager that does no additional processing. + + Used as a stand-in for a normal context manager, when a particular + block of code is only sometimes used with a normal context manager: + + cm = optional_cm if condition else nullcontext() + with cm: + # Perform operation, using optional_cm if condition is True + 'u'contextlib'b'ContextVar'u'ContextVar'b'Token'u'Token'b'copy_context'u'copy_context'u'contextvars'Generic (shallow and deep) copying operations. + +Interface summary: + + import copy + + x = copy.copy(y) # make a shallow copy of y + x = copy.deepcopy(y) # make a deep copy of y + +For module specific errors, copy.Error is raised. + +The difference between shallow and deep copying is only relevant for +compound objects (objects that contain other objects, like lists or +class instances). + +- A shallow copy constructs a new compound object and then (to the + extent possible) inserts *the same objects* into it that the + original contains. + +- A deep copy constructs a new compound object and then, recursively, + inserts *copies* into it of the objects found in the original. + +Two problems often exist with deep copy operations that don't exist +with shallow copy operations: + + a) recursive objects (compound objects that, directly or indirectly, + contain a reference to themselves) may cause a recursive loop + + b) because deep copy copies *everything* it may copy too much, e.g. + administrative data structures that should be shared even between + copies + +Python's deep copy operation avoids these problems by: + + a) keeping a table of objects already copied during the current + copying pass + + b) letting user-defined classes override the copying operation or the + set of components copied + +This version does not copy types like module, class, function, method, +nor stack trace, stack frame, nor file, socket, window, nor array, nor +any similar types. + +Classes can use the same interfaces to control copying that they use +to control pickling: they can define methods called __getinitargs__(), +__getstate__() and __setstate__(). See the documentation for module +"pickle" for information on these methods. +org.python.corePyStringMapdeepcopyShallow copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + _copy_dispatchcopier_copy_immutablereductorun(shallow)copyable object of type %s_reconstructCodeType_nilDeep copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + _deepcopy_dispatch_deepcopy_atomicun(deep)copyable object of type %s_keep_alive_deepcopy_list_deepcopy_tuple_deepcopy_dict_deepcopy_methodKeeps a reference to the object x in the memo. + + Because we remember objects by their id, we have + to assure that possibly temporary objects are kept + alive by referencing them. + We store a reference at the id of the memo, which should + normally not be used unless someone tries to deepcopy + the memo itself... + listiterdictiterdeepslotstate# backward compatibility# treat it as a regular class:# If is its own copy, don't memoize.# Make sure x lives at least as long as d# We're not going to put the tuple in the memo, but it's still important we# check for it, in case the tuple contains recursive mutable structures.# Copy instance methods# aha, this is the first one :-)b'Generic (shallow and deep) copying operations. + +Interface summary: + + import copy + + x = copy.copy(y) # make a shallow copy of y + x = copy.deepcopy(y) # make a deep copy of y + +For module specific errors, copy.Error is raised. + +The difference between shallow and deep copying is only relevant for +compound objects (objects that contain other objects, like lists or +class instances). + +- A shallow copy constructs a new compound object and then (to the + extent possible) inserts *the same objects* into it that the + original contains. + +- A deep copy constructs a new compound object and then, recursively, + inserts *copies* into it of the objects found in the original. + +Two problems often exist with deep copy operations that don't exist +with shallow copy operations: + + a) recursive objects (compound objects that, directly or indirectly, + contain a reference to themselves) may cause a recursive loop + + b) because deep copy copies *everything* it may copy too much, e.g. + administrative data structures that should be shared even between + copies + +Python's deep copy operation avoids these problems by: + + a) keeping a table of objects already copied during the current + copying pass + + b) letting user-defined classes override the copying operation or the + set of components copied + +This version does not copy types like module, class, function, method, +nor stack trace, stack frame, nor file, socket, window, nor array, nor +any similar types. + +Classes can use the same interfaces to control copying that they use +to control pickling: they can define methods called __getinitargs__(), +__getstate__() and __setstate__(). See the documentation for module +"pickle" for information on these methods. +'u'Generic (shallow and deep) copying operations. + +Interface summary: + + import copy + + x = copy.copy(y) # make a shallow copy of y + x = copy.deepcopy(y) # make a deep copy of y + +For module specific errors, copy.Error is raised. + +The difference between shallow and deep copying is only relevant for +compound objects (objects that contain other objects, like lists or +class instances). + +- A shallow copy constructs a new compound object and then (to the + extent possible) inserts *the same objects* into it that the + original contains. + +- A deep copy constructs a new compound object and then, recursively, + inserts *copies* into it of the objects found in the original. + +Two problems often exist with deep copy operations that don't exist +with shallow copy operations: + + a) recursive objects (compound objects that, directly or indirectly, + contain a reference to themselves) may cause a recursive loop + + b) because deep copy copies *everything* it may copy too much, e.g. + administrative data structures that should be shared even between + copies + +Python's deep copy operation avoids these problems by: + + a) keeping a table of objects already copied during the current + copying pass + + b) letting user-defined classes override the copying operation or the + set of components copied + +This version does not copy types like module, class, function, method, +nor stack trace, stack frame, nor file, socket, window, nor array, nor +any similar types. + +Classes can use the same interfaces to control copying that they use +to control pickling: they can define methods called __getinitargs__(), +__getstate__() and __setstate__(). See the documentation for module +"pickle" for information on these methods. +'b'deepcopy'u'deepcopy'b'Shallow copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + 'u'Shallow copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + 'b'__copy__'u'__copy__'b'__reduce_ex__'u'__reduce_ex__'b'__reduce__'u'__reduce__'b'un(shallow)copyable object of type %s'u'un(shallow)copyable object of type %s'b'CodeType'u'CodeType'b'Deep copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + 'u'Deep copy operation on arbitrary Python objects. + + See the module's __doc__ string for more info. + 'b'__deepcopy__'u'__deepcopy__'b'un(deep)copyable object of type %s'u'un(deep)copyable object of type %s'b'Keeps a reference to the object x in the memo. + + Because we remember objects by their id, we have + to assure that possibly temporary objects are kept + alive by referencing them. + We store a reference at the id of the memo, which should + normally not be used unless someone tries to deepcopy + the memo itself... + 'u'Keeps a reference to the object x in the memo. + + Because we remember objects by their id, we have + to assure that possibly temporary objects are kept + alive by referencing them. + We store a reference at the id of the memo, which should + normally not be used unless someone tries to deepcopy + the memo itself... + 'b'__setstate__'u'__setstate__'Helper to provide extensibility for pickle. + +This is only useful to add pickle support for extension types defined in +C, not for instances of user-defined classes. +constructoradd_extensionremove_extensionclear_extension_cacheob_typepickle_functionconstructor_obreduction functions must be callableconstructors must be callablepickle_complex_reconstructor_HEAPTYPE_reduce_excannot pickle object object: a class that defines __slots__ without defining __getstate__ cannot be pickled with protocol " object: ""a class that defines __slots__ without ""defining __getstate__ cannot be pickled ""with protocol "__newobj____newobj_ex__Used by pickle protocol 4, instead of __newobj__ to allow classes with + keyword-only arguments to be pickled correctly. + _slotnamesReturn a list of slot names for a given class. + + This needs to find slots defined by the class and its bases, so we + can't simply return the __slots__ attribute. We must walk down + the Method Resolution Order and concatenate the __slots__ of each + class found there. (This assumes classes don't modify their + __slots__ attribute to misrepresent their slots after the class is + defined.) + __slotnames__slots_%s%s_extension_registry_inverted_registry_extension_cacheRegister an extension code.code out of rangekey %s is already registered with code %scode %s is already in use for key %sUnregister an extension code. For testing only.key %s is not registered with code %s# The constructor_ob function is a vestige of safe for unpickling.# There is no reason for the caller to pass it anymore.# Example: provide pickling support for complex numbers.# Support for pickling new-style objects# Python code for object.__reduce_ex__ for protocols 0 and 1# not really reachable# Helper for __reduce_ex__ protocol 2# Get the value from a cache in the class if possible# Not cached -- calculate the value# This class has no slots# Slots found -- gather slot names from all base classes# if class has a single slot, it can be given as a string# special descriptors# mangled names# Cache the outcome in the class if at all possible# But don't die if we can't# A registry of extension codes. This is an ad-hoc compression# mechanism. Whenever a global reference to , is about# to be pickled, the (, ) tuple is looked up here to see# if it is a registered extension code for it. Extension codes are# universal, so that the meaning of a pickle does not depend on# context. (There are also some codes reserved for local use that# don't have this restriction.) Codes are positive ints; 0 is# reserved.# key -> code# code -> key# code -> object# Don't ever rebind those names: pickling grabs a reference to them when# it's initialized, and won't see a rebinding.# Redundant registrations are benign# Standard extension code assignments# Reserved ranges# First Last Count Purpose# 1 127 127 Reserved for Python standard library# 128 191 64 Reserved for Zope# 192 239 48 Reserved for 3rd parties# 240 255 16 Reserved for private use (will never be assigned)# 256 Inf Inf Reserved for future assignment# Extension codes are assigned by the Python Software Foundation.b'Helper to provide extensibility for pickle. + +This is only useful to add pickle support for extension types defined in +C, not for instances of user-defined classes. +'u'Helper to provide extensibility for pickle. + +This is only useful to add pickle support for extension types defined in +C, not for instances of user-defined classes. +'b'constructor'u'constructor'b'add_extension'u'add_extension'b'remove_extension'u'remove_extension'b'clear_extension_cache'u'clear_extension_cache'b'reduction functions must be callable'u'reduction functions must be callable'b'constructors must be callable'u'constructors must be callable'b'__flags__'u'__flags__'b'cannot pickle 'u'cannot pickle 'b' object'u' object'b' object: a class that defines __slots__ without defining __getstate__ cannot be pickled with protocol 'u' object: a class that defines __slots__ without defining __getstate__ cannot be pickled with protocol 'b'Used by pickle protocol 4, instead of __newobj__ to allow classes with + keyword-only arguments to be pickled correctly. + 'u'Used by pickle protocol 4, instead of __newobj__ to allow classes with + keyword-only arguments to be pickled correctly. + 'b'Return a list of slot names for a given class. + + This needs to find slots defined by the class and its bases, so we + can't simply return the __slots__ attribute. We must walk down + the Method Resolution Order and concatenate the __slots__ of each + class found there. (This assumes classes don't modify their + __slots__ attribute to misrepresent their slots after the class is + defined.) + 'u'Return a list of slot names for a given class. + + This needs to find slots defined by the class and its bases, so we + can't simply return the __slots__ attribute. We must walk down + the Method Resolution Order and concatenate the __slots__ of each + class found there. (This assumes classes don't modify their + __slots__ attribute to misrepresent their slots after the class is + defined.) + 'b'__slotnames__'u'__slotnames__'b'_%s%s'u'_%s%s'b'Register an extension code.'u'Register an extension code.'b'code out of range'u'code out of range'b'key %s is already registered with code %s'u'key %s is already registered with code %s'b'code %s is already in use for key %s'u'code %s is already in use for key %s'b'Unregister an extension code. For testing only.'u'Unregister an extension code. For testing only.'b'key %s is not registered with code %s'u'key %s is not registered with code %s'PYTHONASYNCIODEBUG_DEBUGCoroWrapperisgeneratorextract_stackcoro_repr, created at f_lasti was never yielded from +Coroutine object created at (most recent call last, truncated to '\nCoroutine object created at ''(most recent call last, truncated to ' last lines): +Decorator to mark coroutines. + + If the coroutine is not yielded from before it is destroyed, + an error message is logged. + "@coroutine" decorator is deprecated since Python 3.8, use "async def" insteadisgeneratorfunctionawait_meth_is_coroutineReturn True if func is a decorated coroutine function.CoroutineTypeGeneratorType_COROUTINE_TYPES_iscoroutine_typecacheReturn True if obj is a coroutine object.is_corowrapper_format_callbackcoro_name without __name__>cr_runningcoro_codecr_code runningcoro_frame_get_function_source done, defined at running, defined at running at # If you set _DEBUG to true, @coroutine will wrap the resulting# generator objects in a CoroWrapper instance (defined below). That# instance will log a message when the generator is never iterated# over, which may happen when you forget to use "await" or "yield from"# with a coroutine call.# Note that the value of the _DEBUG flag is taken# when the decorator is used, so to be of any use it must be set# before you define your coroutines. A downside of using this feature# is that tracebacks show entries for the CoroWrapper.__next__ method# when _DEBUG is true.# Wrapper for coroutine object in _DEBUG mode.# Used to unwrap @coroutine decorator# Be careful accessing self.gen.frame -- self.gen might not exist.# In Python 3.5 that's all we need to do for coroutines# defined with "async def".# If 'res' is an awaitable, run it.# Python < 3.5 does not implement __qualname__# on generator objects, so we set it manually.# We use getattr as some callables (such as# functools.partial may lack __qualname__).# For iscoroutinefunction().# A marker for iscoroutinefunction.# Prioritize native coroutine check to speed-up# asyncio.iscoroutine.# Just in case we don't want to cache more than 100# positive types. That shouldn't ever happen, unless# someone stressing the system on purpose.# Coroutines compiled with Cython sometimes don't have# proper __qualname__ or __name__. While that is a bug# in Cython, asyncio shouldn't crash with an AttributeError# in its __repr__ functions.# Stop masking Cython bugs, expose them in a friendly way.# Built-in types might not have __qualname__ or __name__.# If Cython's coroutine has a fake code object without proper# co_filename -- expose that.b'coroutine'u'coroutine'b'iscoroutinefunction'u'iscoroutinefunction'b'iscoroutine'u'iscoroutine'b'PYTHONASYNCIODEBUG'u'PYTHONASYNCIODEBUG'b', created at 'u', created at 'b'gen'u'gen'b' was never yielded from'u' was never yielded from'b'_source_traceback'u'_source_traceback'b' +Coroutine object created at (most recent call last, truncated to 'u' +Coroutine object created at (most recent call last, truncated to 'b' last lines): +'u' last lines): +'b'Decorator to mark coroutines. + + If the coroutine is not yielded from before it is destroyed, + an error message is logged. + 'u'Decorator to mark coroutines. + + If the coroutine is not yielded from before it is destroyed, + an error message is logged. + 'b'"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead'u'"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead'b'Return True if func is a decorated coroutine function.'u'Return True if func is a decorated coroutine function.'b'_is_coroutine'u'_is_coroutine'b'Return True if obj is a coroutine object.'u'Return True if obj is a coroutine object.'b' without __name__>'u' without __name__>'b'cr_code'u'cr_code'b'gi_code'u'gi_code'b' running'u' running'b''u''b' done, defined at 'u' done, defined at 'b' running, defined at 'u' running, defined at 'b' running at 'u' running at 'u'asyncio.coroutines'u'coroutines'Concrete date/time and related types. + +See http://www.iana.org/time-zones/repository/tz-link.html for +time zone and DST data sources. +_time3652059_MAXORDINAL_DAYS_IN_MONTH_DAYS_BEFORE_MONTHdim_is_leapyear -> 1 if leap year, else 0._days_before_yearyear -> number of days before January 1st of year.365_days_in_monthyear, month -> number of days in that month in that year._days_before_monthyear, month -> number of days in year preceding first day of month.month must be in 1..12_ymd2ordyear, month, day -> ordinal, considering 01-Jan-0001 as day 1.day must be in 1..%d_DI400Y_DI100Y_DI4Y_ord2ymdordinal -> (year, month, day), considering 01-Jan-0001 as day 1.n400n100n4n1leapyearprecedingJanFebMarAprMayJunJulAugSepOctNovDec_MONTHNAMESMonTueWedThuFriSatSun_DAYNAMES_build_struct_timehhdstflagwdaydnum_format_timetimespec{:02d}{:02d}:{:02d}{:02d}:{:02d}:{:02d}{:02d}:{:02d}:{:02d}.{:03d}milliseconds{:02d}:{:02d}:{:02d}.{:06d}specsUnknown timespec value_format_offsetoff%s%02d:%02d:%02d.%06d_wrap_strftimefreplacezreplaceZreplacenewformatch%06d%c%02d%02d%02d.%06d%c%02d%02d%02d%c%02d%02d%%_parse_isoformat_datedtstrInvalid date separator: %sInvalid date separator_parse_hh_mm_ss_fftstrlen_strtime_compsIncomplete time componentnext_charInvalid time separator: %cInvalid microsecond componentlen_remainder_parse_isoformat_timeIsoformat time too shorttz_postimestrtzitzstrMalformed time zone stringtz_compstd_check_tznametzinfo.tzname() must return None or string, not '%s'"tzinfo.tzname() must return None or string, ""not '%s'"_check_utc_offsettzinfo.%s() must return None or timedelta, not '%s'"tzinfo.%s() must return None ""or timedelta, not '%s'"%s()=%s, must be strictly between -timedelta(hours=24) and timedelta(hours=24)"%s()=%s, must be strictly between ""-timedelta(hours=24) and timedelta(hours=24)"_check_int_fieldinteger argument expected, got float__index__ returned non-int (type %s)orig__int__ returned non-int (type %s)an integer is required (got type %s)_check_date_fieldsyear must be in %d..%d_check_time_fieldshour must be in 0..23minute must be in 0..59second must be in 0..59microsecond must be in 0..999999fold must be either 0 or 1_check_tzinfo_argtzinfo argument must be None or of a tzinfo subclass_cmperrorcan't compare '%s' to '%s'_divide_and_rounddivide a by b and round result to the nearest integer + + When the ratio is exactly half-way between two integers, + the even integer is returned. + greater_than_halfRepresent the difference between two datetime objects. + + Supported operators: + + - add, subtract timedelta + - unary plus, minus, abs + - compare to timedelta + - multiply, divide by int + + In addition, datetime supports subtraction of two datetime objects + returning a timedelta, and addition or subtraction of a datetime + and a timedelta giving a datetime. + + Representation: (days, seconds, microseconds). Why? Because I + felt like it. + _seconds_microseconds_hashcodemodfdayfrac24.024.3600.03600.daysecondsfracdaysecondswholesecondsfrac2.01000000.01e6usdouble2100000.02.1e610000003100000.03.1e6999999999timedelta # of days is too large: %ddays=%dseconds=%dmicroseconds=%d%s.%s(%s)%d:%02d:%02dplural%d day%s, Total seconds in the duration.86400_to_microsecondsusec_getstateConcrete date type. + + Constructors: + + __new__() + fromtimestamp() + today() + fromordinal() + + Operators: + + __repr__, __str__ + __eq__, __le__, __lt__, __ge__, __gt__, __hash__ + __add__, __radd__, __sub__ (add/radd only with timedelta arg) + + Methods: + + timetuple() + toordinal() + weekday() + isoweekday(), isocalendar(), isoformat() + ctime() + strftime() + + Properties (readonly): + year, month, day + _year_month_dayConstructor. + + Arguments: + + year, month, day (required, base 1) + Failed to encode latin1 string when unpickling a date object. pickle.load(data, encoding='latin1') is assumed."Failed to encode latin1 string when unpickling ""a date object. ""pickle.load(data, encoding='latin1') is assumed."__setstateConstruct a date from a POSIX timestamp (like time.time()).jdayConstruct a date from time.time().Construct a date from a proleptic Gregorian ordinal. + + January 1 of year 1 is day 1. Only the year, month and day are + non-zero in the result. + date_stringConstruct a date from the output of date.isoformat().fromisoformat: argument must be strInvalid isoformat string: Construct a date from the ISO year, week number and weekday. + + This is the inverse of the date.isocalendar() functionYear is out of range: out_of_rangefirst_weekdayInvalid week: Invalid weekday: (range is [1, 7])day_offset_isoweek1mondayday_1ord_dayConvert to formal string, for repr(). + + >>> dt = datetime(2010, 1, 1) + >>> repr(dt) + 'datetime.datetime(2010, 1, 1, 0, 0)' + + >>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc) + >>> repr(dt) + 'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)' + %s.%s(%d, %d, %d)Return ctime() style string.%s %s %2d 00:00:00 %04dFormat using strftime().must be str, not %sReturn the date formatted according to ISO. + + This is 'YYYY-MM-DD'. + + References: + - http://www.w3.org/TR/NOTE-datetime + - http://www.cl.cam.ac.uk/~mgk25/iso-time.html + %04d-%02d-%02dyear (1-9999)month (1-12)day (1-31)Return local time tuple compatible with time.localtime().Return proleptic Gregorian ordinal for the year, month and day. + + January 1 of year 1 is day 1. Only the year, month and day values + contribute to the result. + Return a new date with new values for the specified fields.m2Hash.Add a date to a timedelta.result out of rangeSubtract two dates, or a date and a timedelta.days1days2Return day of the week, where Monday == 0 ... Sunday == 6.Return day of the week, where Monday == 1 ... Sunday == 7.Return a 3-tuple containing ISO year, week number, and weekday. + + The first ISO week of the year is the (Mon-Sun) week + containing the year's first Thursday; everything else derives + from that. + + The first week is 1; Monday is 1 ... Sunday is 7. + + ISO calendar algorithm taken from + http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm + (used with permission) + week1mondayyhiylo_date_classAbstract base class for time zone info classes. + + Subclasses must override the name(), utcoffset() and dst() methods. + datetime -> string name of time zone.tzinfo subclass must override tzname()datetime -> timedelta, positive for east of UTC, negative for west of UTCtzinfo subclass must override utcoffset()datetime -> DST offset as timedelta, positive for east of UTC. + + Return 0 if DST not in effect. utcoffset() must include the DST + offset. + tzinfo subclass must override dst()datetime in UTC -> datetime in local time.fromutc() requires a datetime argumentdt.tzinfo is not selfdtofffromutc() requires a non-None utcoffset() result"fromutc() requires a non-None utcoffset() ""result"dtdstfromutc() requires a non-None dst() resultfromutc(): dt.dst gave inconsistent results; cannot convert"fromutc(): dt.dst gave inconsistent ""results; cannot convert"getinitargs_tzinfo_classTime with time zone. + + Constructors: + + __new__() + + Operators: + + __repr__, __str__ + __eq__, __le__, __lt__, __ge__, __gt__, __hash__ + + Methods: + + strftime() + isoformat() + utcoffset() + tzname() + dst() + + Properties (readonly): + hour, minute, second, microsecond, tzinfo, fold + _hour_minute_second_microsecond_tzinfoConstructor. + + Arguments: + + hour, minute (required) + second, microsecond (default to zero) + tzinfo (default to None) + fold (keyword only, default to zero) + 0x7FFailed to encode latin1 string when unpickling a time object. pickle.load(data, encoding='latin1') is assumed."a time object. "hour (0-23)minute (0-59)second (0-59)microsecond (0-999999)timezone info objectallow_mixedmytzottzmyoffotoffbase_comparecannot compare naive and aware timesmyhhmmothhmmtzoffwhole minute_tzstrReturn formatted timezone offset (+xx:xx) or an empty string.Convert to formal string, for repr()., %d, %d, %d%s.%s(%d, %d%s), tzinfo=%r, fold=1)Return the time formatted according to ISO. + + The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional + part is omitted if self.microsecond == 0. + + The optional argument timespec specifies the number of additional + terms of the time to include. Valid options are 'auto', 'hours', + 'minutes', 'seconds', 'milliseconds' and 'microseconds'. + time_stringConstruct a time from the output of isoformat().Format using strftime(). The date part of the timestamp passed + to underlying strftime should not be used. + Return the timezone offset as timedelta, positive east of UTC + (negative west of UTC).Return the timezone name. + + Note that the name is 100% informational -- there's no requirement that + it mean anything in particular. For example, "GMT", "UTC", "-500", + "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. + Return 0 if DST is not in effect, or the DST offset (as timedelta + positive eastward) if DST is in effect. + + This is purely informational; the DST offset has already been added to + the UTC offset returned by utcoffset() if applicable, so there's no + need to consult dst() unless you're interested in displaying the DST + info. + Return a new time with new values for the specified fields.us2us3us1basestatebad tzinfo state arg_time_classdatetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) + + The year, month and day arguments are required. tzinfo may be None, or an + instance of a tzinfo subclass. The remaining arguments may be ints. + Failed to encode latin1 string when unpickling a datetime object. pickle.load(data, encoding='latin1') is assumed."a datetime object. "_fromtimestampConstruct a datetime from a POSIX timestamp (like time.time()). + + A timezone info object may be passed in as well. + gmtimemax_fold_secondsprobe1transprobe2Construct a naive UTC datetime from a POSIX timestamp.Construct a datetime from time.time() and optional time zone info.Construct a UTC datetime from time.time().Construct a datetime from a given date and a given time.date argument must be a date instancetime argument must be a time instanceConstruct a datetime from the output of datetime.isoformat().dstrdate_componentstime_components_mktimeReturn integer POSIX timestamp.epochu1t1t2Return POSIX timestamp as float_EPOCHReturn UTC time tuple compatible with time.gmtime().Return the date part.Return the time part, with tzinfo None.Return the time part, with same tzinfo.Return a new datetime with new values for the specified fields._local_timezonetslocaltmtm_gmtoffgmtoffzonetz argument must be an instance of tzinfomyoffset%s %s %2d %02d:%02d:%02d %04dReturn the time formatted according to ISO. + + The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'. + By default, the fractional part is omitted if self.microsecond == 0. + + If self.tzinfo is not None, the UTC offset is also attached, giving + giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'. + + Optional argument sep specifies the separator between date and + time, default 'T'. + + The optional argument timespec specifies the number of additional + terms of the time to include. Valid options are 'auto', 'hours', + 'minutes', 'seconds', 'milliseconds' and 'microseconds'. + %04d-%02d-%02d%cConvert to string, for str().string, format -> new datetime parsed from a string (like time.strptime())._strptime_strptime_datetimeReturn the timezone offset as timedelta positive east of UTC (negative west of + UTC).cannot compare naive and aware datetimesAdd a datetime and a timedelta.Subtract two datetimes, or a datetime and a timedelta.secs1secs2cannot mix naive and timezone-aware timefirstday_offset_Omittedoffset must be a timedelta_minoffset_maxoffsetoffset must be a timedelta strictly between -timedelta(hours=24) and timedelta(hours=24)."offset must be a timedelta ""strictly between -timedelta(hours=24) and ""timedelta(hours=24)."pickle supportConvert to formal string, for repr(). + + >>> tz = timezone.utc + >>> repr(tz) + 'datetime.timezone.utc' + >>> tz = timezone(timedelta(hours=-5), 'EST') + >>> repr(tz) + "datetime.timezone(datetime.timedelta(-1, 68400), 'EST')" + datetime.timezone.utc%s.%s(%r)%s.%s(%r, %r)utcoffset() argument must be a datetime instance or None"utcoffset() argument must be a datetime instance"" or None"_name_from_offsettzname() argument must be a datetime instance or None"tzname() argument must be a datetime instance"dst() argument must be a datetime instance or None"dst() argument must be a datetime instance"fromutc: dt.tzinfo is not self"fromutc: dt.tzinfo ""is not self"fromutc() argument must be a datetime instance or None"fromutc() argument must be a datetime instance"'.'# date.max.toordinal()# Utility functions, adapted from Python's Demo/classes/Dates.py, which# also assumes the current Gregorian calendar indefinitely extended in# both directions. Difference: Dates.py calls January 1 of year 0 day# number 1. The code here calls January 1 of year 1 day number 1. This is# to match the definition of the "proleptic Gregorian" calendar in Dershowitz# and Reingold's "Calendrical Calculations", where it's the base calendar# for all computations. See the book for algorithms for converting between# proleptic Gregorian ordinals and many other calendar systems.# -1 is a placeholder for indexing purposes.# number of days in 400 years# " " " " 100 "# " " " " 4 "# A 4-year cycle has an extra leap day over what we'd get from pasting# together 4 single years.# Similarly, a 400-year cycle has an extra leap day over what we'd get from# pasting together 4 100-year cycles.# OTOH, a 100-year cycle has one fewer leap day than we'd get from# pasting together 25 4-year cycles.# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years# repeats exactly every 400 years. The basic strategy is to find the# closest 400-year boundary at or before n, then work with the offset# from that boundary to n. Life is much clearer if we subtract 1 from# n first -- then the values of n at 400-year boundaries are exactly# those divisible by _DI400Y:# D M Y n n-1# -- --- ---- ---------- ----------------# 31 Dec -400 -_DI400Y -_DI400Y -1# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary# ...# 30 Dec 000 -1 -2# 31 Dec 000 0 -1# 1 Jan 001 1 0 400-year boundary# 2 Jan 001 2 1# 3 Jan 001 3 2# 31 Dec 400 _DI400Y _DI400Y -1# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary# ..., -399, 1, 401, ...# Now n is the (non-negative) offset, in days, from January 1 of year, to# the desired date. Now compute how many 100-year cycles precede n.# Note that it's possible for n100 to equal 4! In that case 4 full# 100-year cycles precede the desired day, which implies the desired# day is December 31 at the end of a 400-year cycle.# Now compute how many 4-year cycles precede it.# And now how many single years. Again n1 can be 4, and again meaning# that the desired day is December 31 at the end of the 4-year cycle.# Now the year is correct, and n is the offset from January 1. We find# the month via an estimate that's either exact or one too large.# estimate is too large# Now the year and month are correct, and n is the offset from the# start of that month: we're done!# Month and day names. For localized versions, see the calendar module.# Skip trailing microseconds when us==0.# Correctly substitute for %z and %Z escapes in strftime formats.# Don't call utcoffset() or tzname() unless actually needed.# the string to use for %f# the string to use for %z# the string to use for %Z# Scan format for %z and %Z escapes, replacing as needed.# strftime is going to have at this: escape %# Helpers for parsing the result of isoformat()# It is assumed that this function will only be called with a# string of length exactly 10, and (though this is not used) ASCII-only# Parses things of the form HH[:MM[:SS[.fff[fff]]]]# Format supported is HH[:MM[:SS[.fff[fff]]]][+HH:MM[:SS[.ffffff]]]# This is equivalent to re.search('[+-]', tstr), but faster# Valid time zone strings are:# HH:MM len: 5# HH:MM:SS len: 8# HH:MM:SS.ffffff len: 15# Just raise TypeError if the arg isn't None or a string.# name is the offset-producing method, "utcoffset" or "dst".# offset is what it returned.# If offset isn't None or timedelta, raises TypeError.# If offset is None, returns None.# Else offset is checked for being in range.# If it is, its integer value is returned. Else ValueError is raised.# Based on the reference implementation for divmod_near# in Objects/longobject.c.# round up if either r / b > 0.5, or r / b == 0.5 and q is odd.# The expression r / b > 0.5 is equivalent to 2 * r > b if b is# positive, 2 * r < b if b negative.# Doing this efficiently and accurately in C is going to be difficult# and error-prone, due to ubiquitous overflow possibilities, and that# C double doesn't have enough bits of precision to represent# microseconds over 10K years faithfully. The code here tries to make# explicit where go-fast assumptions can be relied on, in order to# guide the C implementation; it's way more convoluted than speed-# ignoring auto-overflow-to-long idiomatic Python could be.# XXX Check that all inputs are ints or floats.# Final values, all integer.# s and us fit in 32-bit signed ints; d isn't bounded.# Normalize everything to days, seconds, microseconds.# Get rid of all fractions, and normalize s and us.# Take a deep breath .# can't overflow# days isn't referenced again before redefinition# daysecondsfrac isn't referenced again# seconds isn't referenced again before redefinition# exact value not critical# secondsfrac isn't referenced again# Just a little bit of carrying possible for microseconds and seconds.# Read-only field accessors# for CPython compatibility, we cannot use# our __class__ here, but need a real timedelta# Comparisons of timedelta objects with other.# Pickle support.# Pickle support# More informative error message.# Additional constructors# Year is bounded this way because 9999-12-31 is (9999, 52, 5)# ISO years have 53 weeks in them on years starting with a# Thursday and leap years starting on a Wednesday# Now compute the offset from (Y, 1, 1) in days:# Calculate the ordinal day for monday, week 1# Conversions to string# XXX These shouldn't depend on time.localtime(), because that# clips the usable dates to [1970 .. 2038). At least ctime() is# easily done without using strftime() -- that's better too because# strftime("%c", ...) is locale specific.# Standard conversions, __eq__, __le__, __lt__, __ge__, __gt__,# __hash__ (and helpers)# Comparisons of date objects with other.# Computations# Day-of-the-week and week-of-the-year, according to ISO# 1-Jan-0001 is a Monday# Internally, week and day have origin 0# so functions w/ args named "date" can get at the class# See the long comment block at the end of this file for an# explanation of this algorithm.# Standard conversions, __hash__ (and helpers)# Comparisons of time objects with other.# arbitrary non-zero value# zero or None# Conversion to string# The year must be >= 1000 else Python's strftime implementation# can raise a bogus exception.# Timezone functions# so functions w/ args named "time" can get at the class# clamp out leap seconds if the platform has them# As of version 2015f max fold in IANA database is# 23 hours at 1969-09-30 13:00:00 in Kwajalein.# Let's probe 24 hours in the past to detect a transition:# On Windows localtime_s throws an OSError for negative values,# thus we can't perform fold detection for values of time less# than the max time fold. See comments in _datetimemodule's# version of this method for more details.# Split this at the separator# Our goal is to solve t = local(u) for u.# We found one solution, but it may not be the one we need.# Look for an earlier solution (if `fold` is 0), or a# later one (if `fold` is 1).# We have found both offsets a and b, but neither t - a nor t - b is# a solution. This means t is in the gap.# Extract TZ data# Convert self to UTC, and attach the new time zone object.# Convert from UTC to tz's local time.# Ways to produce a string.# These are never zero# Comparisons of datetime objects with other.# Assume that allow_mixed means that we are called from __eq__# XXX What follows could be done more efficiently...# this will take offsets into account# Helper to calculate the day number of the Monday starting week 1# XXX This could be done more efficiently# See weekday() above# Sentinel value to disallow None# bpo-37642: These attributes are rounded to the nearest minute for backwards# compatibility, even though the constructor will accept a wider range of# values. This may change in the future.# Some time zone algebra. For a datetime x, let# x.n = x stripped of its timezone -- its naive time.# x.o = x.utcoffset(), and assuming that doesn't raise an exception or# return None# x.d = x.dst(), and assuming that doesn't raise an exception or# x.s = x's standard offset, x.o - x.d# Now some derived rules, where k is a duration (timedelta).# 1. x.o = x.s + x.d# This follows from the definition of x.s.# 2. If x and y have the same tzinfo member, x.s = y.s.# This is actually a requirement, an assumption we need to make about# sane tzinfo classes.# 3. The naive UTC time corresponding to x is x.n - x.o.# This is again a requirement for a sane tzinfo class.# 4. (x+k).s = x.s# This follows from #2, and that datetime.timetz+timedelta preserves tzinfo.# 5. (x+k).n = x.n + k# Again follows from how arithmetic is defined.# Now we can explain tz.fromutc(x). Let's assume it's an interesting case# (meaning that the various tzinfo methods exist, and don't blow up or return# None when called).# The function wants to return a datetime y with timezone tz, equivalent to x.# x is already in UTC.# By #3, we want# y.n - y.o = x.n [1]# The algorithm starts by attaching tz to x.n, and calling that y. So# x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]# becomes true; in effect, we want to solve [2] for k:# (y+k).n - (y+k).o = x.n [2]# By #1, this is the same as# (y+k).n - ((y+k).s + (y+k).d) = x.n [3]# By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.# Substituting that into [3],# x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving# k - (y+k).s - (y+k).d = 0; rearranging,# k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so# k = y.s - (y+k).d# On the RHS, (y+k).d can't be computed directly, but y.s can be, and we# approximate k by ignoring the (y+k).d term at first. Note that k can't be# very large, since all offset-returning methods return a duration of magnitude# less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must# be 0, so ignoring it has no consequence then.# In any case, the new value is# z = y + y.s [4]# It's helpful to step back at look at [4] from a higher level: it's simply# mapping from UTC to tz's standard time.# At this point, if# z.n - z.o = x.n [5]# we have an equivalent time, and are almost done. The insecurity here is# at the start of daylight time. Picture US Eastern for concreteness. The wall# time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good# sense then. The docs ask that an Eastern tzinfo class consider such a time to# be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST# on the day DST starts. We want to return the 1:MM EST spelling because that's# the only spelling that makes sense on the local wall clock.# In fact, if [5] holds at this point, we do have the standard-time spelling,# but that takes a bit of proof. We first prove a stronger result. What's the# difference between the LHS and RHS of [5]? Let# diff = x.n - (z.n - z.o) [6]# Now# z.n = by [4]# (y + y.s).n = by #5# y.n + y.s = since y.n = x.n# x.n + y.s = since z and y are have the same tzinfo member,# y.s = z.s by #2# x.n + z.s# Plugging that back into [6] gives# diff =# x.n - ((x.n + z.s) - z.o) = expanding# x.n - x.n - z.s + z.o = cancelling# - z.s + z.o = by #2# z.d# So diff = z.d.# If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time# spelling we wanted in the endcase described above. We're done. Contrarily,# if z.d = 0, then we have a UTC equivalent, and are also done.# If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to# add to z (in effect, z is in tz's standard time, and we need to shift the# local clock into tz's daylight time).# Let# z' = z + z.d = z + diff [7]# and we can again ask whether# z'.n - z'.o = x.n [8]# If so, we're done. If not, the tzinfo class is insane, according to the# assumptions we've made. This also requires a bit of proof. As before, let's# compute the difference between the LHS and RHS of [8] (and skipping some of# the justifications for the kinds of substitutions we've done several times# already):# diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]# x.n - (z.n + diff - z'.o) = replacing diff via [6]# x.n - (z.n + x.n - (z.n - z.o) - z'.o) =# x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n# - z.n + z.n - z.o + z'.o = cancel z.n# - z.o + z'.o = #1 twice# -z.s - z.d + z'.s + z'.d = z and z' have same tzinfo# z'.d - z.d# So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,# we've found the UTC-equivalent so are done. In fact, we stop with [7] and# return z', not bothering to compute z'.d.# How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by# a dst() offset, and starting *from* a time already in DST (we know z.d != 0),# would have to change the result dst() returns: we start in DST, and moving# a little further into it takes us out of DST.# There isn't a sane case where this can happen. The closest it gets is at# the end of DST, where there's an hour in UTC with no spelling in a hybrid# tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During# that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM# UTC) because the docs insist on that, but 0:MM is taken as being in daylight# time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local# clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in# standard time. Since that's what the local clock *does*, we want to map both# UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous# in local time, but so it goes -- it's the way the local clock works.# When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,# so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.# z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]# (correctly) concludes that z' is not UTC-equivalent to x.# Because we know z.d said z was in daylight time (else [5] would have held and# we would have stopped then), and we know z.d != z'.d (else [8] would have held# and we have stopped then), and there are only 2 possible values dst() can# return in Eastern, it follows that z'.d must be 0 (which it is in the example,# but the reasoning doesn't depend on the example -- it depends on there being# two possible dst() outcomes, one zero and the other non-zero). Therefore# z' must be in standard time, and is the spelling we want in this case.# Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is# concerned (because it takes z' as being in standard time rather than the# daylight time we intend here), but returning it gives the real-life "local# clock repeats an hour" behavior when mapping the "unspellable" UTC hour into# tz.# When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with# the 1:MM standard time spelling we want.# So how can this break? One of the assumptions must be violated. Two# possibilities:# 1) [2] effectively says that y.s is invariant across all y belong to a given# time zone. This isn't true if, for political reasons or continental drift,# a region decides to change its base offset from UTC.# 2) There may be versions of "double daylight" time where the tail end of# the analysis gives up a step too early. I haven't thought about that# enough to say.# In any case, it's clear that the default fromutc() is strong enough to handle# "almost all" time zones: so long as the standard offset is invariant, it# doesn't matter if daylight time transition points change from year to year, or# if daylight time is skipped in some years; it doesn't matter how large or# small dst() may get within its bounds; and it doesn't even matter if some# perverse time zone returns a negative dst()). So a breaking case must be# pretty bizarre, and a tzinfo subclass can override fromutc() if it is.# Clean up unused names# XXX Since import * above excludes names that start with _,# docstring does not get overwritten. In the future, it may be# appropriate to maintain a single module level docstring and# remove the following line.b'Concrete date/time and related types. + +See http://www.iana.org/time-zones/repository/tz-link.html for +time zone and DST data sources. +'u'Concrete date/time and related types. + +See http://www.iana.org/time-zones/repository/tz-link.html for +time zone and DST data sources. +'b'year -> 1 if leap year, else 0.'u'year -> 1 if leap year, else 0.'b'year -> number of days before January 1st of year.'u'year -> number of days before January 1st of year.'b'year, month -> number of days in that month in that year.'u'year, month -> number of days in that month in that year.'b'year, month -> number of days in year preceding first day of month.'u'year, month -> number of days in year preceding first day of month.'b'month must be in 1..12'u'month must be in 1..12'b'year, month, day -> ordinal, considering 01-Jan-0001 as day 1.'u'year, month, day -> ordinal, considering 01-Jan-0001 as day 1.'b'day must be in 1..%d'u'day must be in 1..%d'b'ordinal -> (year, month, day), considering 01-Jan-0001 as day 1.'u'ordinal -> (year, month, day), considering 01-Jan-0001 as day 1.'b'Jan'u'Jan'b'Feb'u'Feb'b'Mar'u'Mar'b'Apr'u'Apr'b'May'u'May'b'Jun'u'Jun'b'Jul'u'Jul'b'Aug'u'Aug'b'Sep'u'Sep'b'Oct'u'Oct'b'Nov'u'Nov'b'Dec'u'Dec'b'Mon'u'Mon'b'Tue'u'Tue'b'Wed'u'Wed'b'Thu'u'Thu'b'Fri'u'Fri'b'Sat'u'Sat'b'Sun'u'Sun'b'auto'u'auto'b'{:02d}'u'{:02d}'b'hours'u'hours'b'{:02d}:{:02d}'u'{:02d}:{:02d}'b'minutes'u'minutes'b'{:02d}:{:02d}:{:02d}'u'{:02d}:{:02d}:{:02d}'b'seconds'u'seconds'b'{:02d}:{:02d}:{:02d}.{:03d}'u'{:02d}:{:02d}:{:02d}.{:03d}'b'milliseconds'u'milliseconds'b'{:02d}:{:02d}:{:02d}.{:06d}'u'{:02d}:{:02d}:{:02d}.{:06d}'b'microseconds'u'microseconds'b'Unknown timespec value'u'Unknown timespec value'b'%s%02d:%02d'u'%s%02d:%02d'b':%02d'u':%02d'b'.%06d'u'.%06d'b'%06d'u'%06d'b'microsecond'u'microsecond'b'utcoffset'u'utcoffset'b'%c%02d%02d%02d.%06d'u'%c%02d%02d%02d.%06d'b'%c%02d%02d%02d'u'%c%02d%02d%02d'b'%c%02d%02d'u'%c%02d%02d'b'tzname'u'tzname'b'%%'u'%%'b'Invalid date separator: %s'u'Invalid date separator: %s'b'Invalid date separator'u'Invalid date separator'b'Incomplete time component'u'Incomplete time component'b'Invalid time separator: %c'u'Invalid time separator: %c'b'Invalid microsecond component'u'Invalid microsecond component'b'Isoformat time too short'u'Isoformat time too short'b'Malformed time zone string'u'Malformed time zone string'b'tzinfo.tzname() must return None or string, not '%s''u'tzinfo.tzname() must return None or string, not '%s''b'dst'u'dst'b'tzinfo.%s() must return None or timedelta, not '%s''u'tzinfo.%s() must return None or timedelta, not '%s''b'%s()=%s, must be strictly between -timedelta(hours=24) and timedelta(hours=24)'u'%s()=%s, must be strictly between -timedelta(hours=24) and timedelta(hours=24)'b'integer argument expected, got float'u'integer argument expected, got float'b'__index__ returned non-int (type %s)'u'__index__ returned non-int (type %s)'b'__int__ returned non-int (type %s)'u'__int__ returned non-int (type %s)'b'an integer is required (got type %s)'u'an integer is required (got type %s)'b'year must be in %d..%d'u'year must be in %d..%d'b'hour must be in 0..23'u'hour must be in 0..23'b'minute must be in 0..59'u'minute must be in 0..59'b'second must be in 0..59'u'second must be in 0..59'b'microsecond must be in 0..999999'u'microsecond must be in 0..999999'b'fold must be either 0 or 1'u'fold must be either 0 or 1'b'tzinfo argument must be None or of a tzinfo subclass'u'tzinfo argument must be None or of a tzinfo subclass'b'can't compare '%s' to '%s''u'can't compare '%s' to '%s''b'divide a by b and round result to the nearest integer + + When the ratio is exactly half-way between two integers, + the even integer is returned. + 'u'divide a by b and round result to the nearest integer + + When the ratio is exactly half-way between two integers, + the even integer is returned. + 'b'Represent the difference between two datetime objects. + + Supported operators: + + - add, subtract timedelta + - unary plus, minus, abs + - compare to timedelta + - multiply, divide by int + + In addition, datetime supports subtraction of two datetime objects + returning a timedelta, and addition or subtraction of a datetime + and a timedelta giving a datetime. + + Representation: (days, seconds, microseconds). Why? Because I + felt like it. + 'u'Represent the difference between two datetime objects. + + Supported operators: + + - add, subtract timedelta + - unary plus, minus, abs + - compare to timedelta + - multiply, divide by int + + In addition, datetime supports subtraction of two datetime objects + returning a timedelta, and addition or subtraction of a datetime + and a timedelta giving a datetime. + + Representation: (days, seconds, microseconds). Why? Because I + felt like it. + 'b'_days'u'_days'b'_seconds'u'_seconds'b'_microseconds'u'_microseconds'b'_hashcode'u'_hashcode'b'timedelta # of days is too large: %d'u'timedelta # of days is too large: %d'b'days=%d'u'days=%d'b'seconds=%d'u'seconds=%d'b'microseconds=%d'u'microseconds=%d'b'%s.%s(%s)'u'%s.%s(%s)'b'%d:%02d:%02d'u'%d:%02d:%02d'b'%d day%s, 'u'%d day%s, 'b'Total seconds in the duration.'u'Total seconds in the duration.'b'days'u'days'b'Concrete date type. + + Constructors: + + __new__() + fromtimestamp() + today() + fromordinal() + + Operators: + + __repr__, __str__ + __eq__, __le__, __lt__, __ge__, __gt__, __hash__ + __add__, __radd__, __sub__ (add/radd only with timedelta arg) + + Methods: + + timetuple() + toordinal() + weekday() + isoweekday(), isocalendar(), isoformat() + ctime() + strftime() + + Properties (readonly): + year, month, day + 'u'Concrete date type. + + Constructors: + + __new__() + fromtimestamp() + today() + fromordinal() + + Operators: + + __repr__, __str__ + __eq__, __le__, __lt__, __ge__, __gt__, __hash__ + __add__, __radd__, __sub__ (add/radd only with timedelta arg) + + Methods: + + timetuple() + toordinal() + weekday() + isoweekday(), isocalendar(), isoformat() + ctime() + strftime() + + Properties (readonly): + year, month, day + 'b'_year'u'_year'b'_month'u'_month'b'_day'u'_day'b'Constructor. + + Arguments: + + year, month, day (required, base 1) + 'u'Constructor. + + Arguments: + + year, month, day (required, base 1) + 'b'Failed to encode latin1 string when unpickling a date object. pickle.load(data, encoding='latin1') is assumed.'u'Failed to encode latin1 string when unpickling a date object. pickle.load(data, encoding='latin1') is assumed.'b'Construct a date from a POSIX timestamp (like time.time()).'u'Construct a date from a POSIX timestamp (like time.time()).'b'Construct a date from time.time().'u'Construct a date from time.time().'b'Construct a date from a proleptic Gregorian ordinal. + + January 1 of year 1 is day 1. Only the year, month and day are + non-zero in the result. + 'u'Construct a date from a proleptic Gregorian ordinal. + + January 1 of year 1 is day 1. Only the year, month and day are + non-zero in the result. + 'b'Construct a date from the output of date.isoformat().'u'Construct a date from the output of date.isoformat().'b'fromisoformat: argument must be str'u'fromisoformat: argument must be str'b'Invalid isoformat string: 'u'Invalid isoformat string: 'b'Construct a date from the ISO year, week number and weekday. + + This is the inverse of the date.isocalendar() function'u'Construct a date from the ISO year, week number and weekday. + + This is the inverse of the date.isocalendar() function'b'Year is out of range: 'u'Year is out of range: 'b'Invalid week: 'u'Invalid week: 'b'Invalid weekday: 'u'Invalid weekday: 'b' (range is [1, 7])'u' (range is [1, 7])'b'Convert to formal string, for repr(). + + >>> dt = datetime(2010, 1, 1) + >>> repr(dt) + 'datetime.datetime(2010, 1, 1, 0, 0)' + + >>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc) + >>> repr(dt) + 'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)' + 'u'Convert to formal string, for repr(). + + >>> dt = datetime(2010, 1, 1) + >>> repr(dt) + 'datetime.datetime(2010, 1, 1, 0, 0)' + + >>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc) + >>> repr(dt) + 'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)' + 'b'%s.%s(%d, %d, %d)'u'%s.%s(%d, %d, %d)'b'Return ctime() style string.'u'Return ctime() style string.'b'%s %s %2d 00:00:00 %04d'u'%s %s %2d 00:00:00 %04d'b'Format using strftime().'u'Format using strftime().'b'must be str, not %s'u'must be str, not %s'b'Return the date formatted according to ISO. + + This is 'YYYY-MM-DD'. + + References: + - http://www.w3.org/TR/NOTE-datetime + - http://www.cl.cam.ac.uk/~mgk25/iso-time.html + 'u'Return the date formatted according to ISO. + + This is 'YYYY-MM-DD'. + + References: + - http://www.w3.org/TR/NOTE-datetime + - http://www.cl.cam.ac.uk/~mgk25/iso-time.html + 'b'%04d-%02d-%02d'u'%04d-%02d-%02d'b'year (1-9999)'u'year (1-9999)'b'month (1-12)'u'month (1-12)'b'day (1-31)'u'day (1-31)'b'Return local time tuple compatible with time.localtime().'u'Return local time tuple compatible with time.localtime().'b'Return proleptic Gregorian ordinal for the year, month and day. + + January 1 of year 1 is day 1. Only the year, month and day values + contribute to the result. + 'u'Return proleptic Gregorian ordinal for the year, month and day. + + January 1 of year 1 is day 1. Only the year, month and day values + contribute to the result. + 'b'Return a new date with new values for the specified fields.'u'Return a new date with new values for the specified fields.'b'Hash.'u'Hash.'b'Add a date to a timedelta.'u'Add a date to a timedelta.'b'result out of range'u'result out of range'b'Subtract two dates, or a date and a timedelta.'u'Subtract two dates, or a date and a timedelta.'b'Return day of the week, where Monday == 0 ... Sunday == 6.'u'Return day of the week, where Monday == 0 ... Sunday == 6.'b'Return day of the week, where Monday == 1 ... Sunday == 7.'u'Return day of the week, where Monday == 1 ... Sunday == 7.'b'Return a 3-tuple containing ISO year, week number, and weekday. + + The first ISO week of the year is the (Mon-Sun) week + containing the year's first Thursday; everything else derives + from that. + + The first week is 1; Monday is 1 ... Sunday is 7. + + ISO calendar algorithm taken from + http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm + (used with permission) + 'u'Return a 3-tuple containing ISO year, week number, and weekday. + + The first ISO week of the year is the (Mon-Sun) week + containing the year's first Thursday; everything else derives + from that. + + The first week is 1; Monday is 1 ... Sunday is 7. + + ISO calendar algorithm taken from + http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm + (used with permission) + 'b'Abstract base class for time zone info classes. + + Subclasses must override the name(), utcoffset() and dst() methods. + 'u'Abstract base class for time zone info classes. + + Subclasses must override the name(), utcoffset() and dst() methods. + 'b'datetime -> string name of time zone.'u'datetime -> string name of time zone.'b'tzinfo subclass must override tzname()'u'tzinfo subclass must override tzname()'b'datetime -> timedelta, positive for east of UTC, negative for west of UTC'u'datetime -> timedelta, positive for east of UTC, negative for west of UTC'b'tzinfo subclass must override utcoffset()'u'tzinfo subclass must override utcoffset()'b'datetime -> DST offset as timedelta, positive for east of UTC. + + Return 0 if DST not in effect. utcoffset() must include the DST + offset. + 'u'datetime -> DST offset as timedelta, positive for east of UTC. + + Return 0 if DST not in effect. utcoffset() must include the DST + offset. + 'b'tzinfo subclass must override dst()'u'tzinfo subclass must override dst()'b'datetime in UTC -> datetime in local time.'u'datetime in UTC -> datetime in local time.'b'fromutc() requires a datetime argument'u'fromutc() requires a datetime argument'b'dt.tzinfo is not self'u'dt.tzinfo is not self'b'fromutc() requires a non-None utcoffset() result'u'fromutc() requires a non-None utcoffset() result'b'fromutc() requires a non-None dst() result'u'fromutc() requires a non-None dst() result'b'fromutc(): dt.dst gave inconsistent results; cannot convert'u'fromutc(): dt.dst gave inconsistent results; cannot convert'b'__getinitargs__'u'__getinitargs__'b'__getstate__'u'__getstate__'b'Time with time zone. + + Constructors: + + __new__() + + Operators: + + __repr__, __str__ + __eq__, __le__, __lt__, __ge__, __gt__, __hash__ + + Methods: + + strftime() + isoformat() + utcoffset() + tzname() + dst() + + Properties (readonly): + hour, minute, second, microsecond, tzinfo, fold + 'u'Time with time zone. + + Constructors: + + __new__() + + Operators: + + __repr__, __str__ + __eq__, __le__, __lt__, __ge__, __gt__, __hash__ + + Methods: + + strftime() + isoformat() + utcoffset() + tzname() + dst() + + Properties (readonly): + hour, minute, second, microsecond, tzinfo, fold + 'b'_hour'u'_hour'b'_minute'u'_minute'b'_second'u'_second'b'_microsecond'u'_microsecond'b'_tzinfo'u'_tzinfo'b'_fold'u'_fold'b'Constructor. + + Arguments: + + hour, minute (required) + second, microsecond (default to zero) + tzinfo (default to None) + fold (keyword only, default to zero) + 'u'Constructor. + + Arguments: + + hour, minute (required) + second, microsecond (default to zero) + tzinfo (default to None) + fold (keyword only, default to zero) + 'b'Failed to encode latin1 string when unpickling a time object. pickle.load(data, encoding='latin1') is assumed.'u'Failed to encode latin1 string when unpickling a time object. pickle.load(data, encoding='latin1') is assumed.'b'hour (0-23)'u'hour (0-23)'b'minute (0-59)'u'minute (0-59)'b'second (0-59)'u'second (0-59)'b'microsecond (0-999999)'u'microsecond (0-999999)'b'timezone info object'u'timezone info object'b'cannot compare naive and aware times'u'cannot compare naive and aware times'b'whole minute'u'whole minute'b'Return formatted timezone offset (+xx:xx) or an empty string.'u'Return formatted timezone offset (+xx:xx) or an empty string.'b'Convert to formal string, for repr().'u'Convert to formal string, for repr().'b', %d, %d'u', %d, %d'b', %d'u', %d'b'%s.%s(%d, %d%s)'u'%s.%s(%d, %d%s)'b', tzinfo=%r'u', tzinfo=%r'b', fold=1)'u', fold=1)'b'Return the time formatted according to ISO. + + The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional + part is omitted if self.microsecond == 0. + + The optional argument timespec specifies the number of additional + terms of the time to include. Valid options are 'auto', 'hours', + 'minutes', 'seconds', 'milliseconds' and 'microseconds'. + 'u'Return the time formatted according to ISO. + + The full format is 'HH:MM:SS.mmmmmm+zz:zz'. By default, the fractional + part is omitted if self.microsecond == 0. + + The optional argument timespec specifies the number of additional + terms of the time to include. Valid options are 'auto', 'hours', + 'minutes', 'seconds', 'milliseconds' and 'microseconds'. + 'b'Construct a time from the output of isoformat().'u'Construct a time from the output of isoformat().'b'Format using strftime(). The date part of the timestamp passed + to underlying strftime should not be used. + 'u'Format using strftime(). The date part of the timestamp passed + to underlying strftime should not be used. + 'b'Return the timezone offset as timedelta, positive east of UTC + (negative west of UTC).'u'Return the timezone offset as timedelta, positive east of UTC + (negative west of UTC).'b'Return the timezone name. + + Note that the name is 100% informational -- there's no requirement that + it mean anything in particular. For example, "GMT", "UTC", "-500", + "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. + 'u'Return the timezone name. + + Note that the name is 100% informational -- there's no requirement that + it mean anything in particular. For example, "GMT", "UTC", "-500", + "-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies. + 'b'Return 0 if DST is not in effect, or the DST offset (as timedelta + positive eastward) if DST is in effect. + + This is purely informational; the DST offset has already been added to + the UTC offset returned by utcoffset() if applicable, so there's no + need to consult dst() unless you're interested in displaying the DST + info. + 'u'Return 0 if DST is not in effect, or the DST offset (as timedelta + positive eastward) if DST is in effect. + + This is purely informational; the DST offset has already been added to + the UTC offset returned by utcoffset() if applicable, so there's no + need to consult dst() unless you're interested in displaying the DST + info. + 'b'Return a new time with new values for the specified fields.'u'Return a new time with new values for the specified fields.'b'bad tzinfo state arg'u'bad tzinfo state arg'b'datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) + + The year, month and day arguments are required. tzinfo may be None, or an + instance of a tzinfo subclass. The remaining arguments may be ints. + 'u'datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]]) + + The year, month and day arguments are required. tzinfo may be None, or an + instance of a tzinfo subclass. The remaining arguments may be ints. + 'b'Failed to encode latin1 string when unpickling a datetime object. pickle.load(data, encoding='latin1') is assumed.'u'Failed to encode latin1 string when unpickling a datetime object. pickle.load(data, encoding='latin1') is assumed.'b'Construct a datetime from a POSIX timestamp (like time.time()). + + A timezone info object may be passed in as well. + 'u'Construct a datetime from a POSIX timestamp (like time.time()). + + A timezone info object may be passed in as well. + 'b'Construct a naive UTC datetime from a POSIX timestamp.'u'Construct a naive UTC datetime from a POSIX timestamp.'b'Construct a datetime from time.time() and optional time zone info.'u'Construct a datetime from time.time() and optional time zone info.'b'Construct a UTC datetime from time.time().'u'Construct a UTC datetime from time.time().'b'Construct a datetime from a given date and a given time.'u'Construct a datetime from a given date and a given time.'b'date argument must be a date instance'u'date argument must be a date instance'b'time argument must be a time instance'u'time argument must be a time instance'b'Construct a datetime from the output of datetime.isoformat().'u'Construct a datetime from the output of datetime.isoformat().'b'Return integer POSIX timestamp.'u'Return integer POSIX timestamp.'b'Return POSIX timestamp as float'u'Return POSIX timestamp as float'b'Return UTC time tuple compatible with time.gmtime().'u'Return UTC time tuple compatible with time.gmtime().'b'Return the date part.'u'Return the date part.'b'Return the time part, with tzinfo None.'u'Return the time part, with tzinfo None.'b'Return the time part, with same tzinfo.'u'Return the time part, with same tzinfo.'b'Return a new datetime with new values for the specified fields.'u'Return a new datetime with new values for the specified fields.'b'tz argument must be an instance of tzinfo'u'tz argument must be an instance of tzinfo'b'%s %s %2d %02d:%02d:%02d %04d'u'%s %s %2d %02d:%02d:%02d %04d'b'T'u'T'b'Return the time formatted according to ISO. + + The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'. + By default, the fractional part is omitted if self.microsecond == 0. + + If self.tzinfo is not None, the UTC offset is also attached, giving + giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'. + + Optional argument sep specifies the separator between date and + time, default 'T'. + + The optional argument timespec specifies the number of additional + terms of the time to include. Valid options are 'auto', 'hours', + 'minutes', 'seconds', 'milliseconds' and 'microseconds'. + 'u'Return the time formatted according to ISO. + + The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'. + By default, the fractional part is omitted if self.microsecond == 0. + + If self.tzinfo is not None, the UTC offset is also attached, giving + giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'. + + Optional argument sep specifies the separator between date and + time, default 'T'. + + The optional argument timespec specifies the number of additional + terms of the time to include. Valid options are 'auto', 'hours', + 'minutes', 'seconds', 'milliseconds' and 'microseconds'. + 'b'%04d-%02d-%02d%c'u'%04d-%02d-%02d%c'b'Convert to string, for str().'u'Convert to string, for str().'b'string, format -> new datetime parsed from a string (like time.strptime()).'u'string, format -> new datetime parsed from a string (like time.strptime()).'b'Return the timezone offset as timedelta positive east of UTC (negative west of + UTC).'u'Return the timezone offset as timedelta positive east of UTC (negative west of + UTC).'b'cannot compare naive and aware datetimes'u'cannot compare naive and aware datetimes'b'Add a datetime and a timedelta.'u'Add a datetime and a timedelta.'b'Subtract two datetimes, or a datetime and a timedelta.'u'Subtract two datetimes, or a datetime and a timedelta.'b'cannot mix naive and timezone-aware time'u'cannot mix naive and timezone-aware time'b'_offset'u'_offset'b'_name'u'_name'b'offset must be a timedelta'u'offset must be a timedelta'b'offset must be a timedelta strictly between -timedelta(hours=24) and timedelta(hours=24).'u'offset must be a timedelta strictly between -timedelta(hours=24) and timedelta(hours=24).'b'pickle support'u'pickle support'b'Convert to formal string, for repr(). + + >>> tz = timezone.utc + >>> repr(tz) + 'datetime.timezone.utc' + >>> tz = timezone(timedelta(hours=-5), 'EST') + >>> repr(tz) + "datetime.timezone(datetime.timedelta(-1, 68400), 'EST')" + 'u'Convert to formal string, for repr(). + + >>> tz = timezone.utc + >>> repr(tz) + 'datetime.timezone.utc' + >>> tz = timezone(timedelta(hours=-5), 'EST') + >>> repr(tz) + "datetime.timezone(datetime.timedelta(-1, 68400), 'EST')" + 'b'datetime.timezone.utc'u'datetime.timezone.utc'b'%s.%s(%r)'u'%s.%s(%r)'b'%s.%s(%r, %r)'u'%s.%s(%r, %r)'b'utcoffset() argument must be a datetime instance or None'u'utcoffset() argument must be a datetime instance or None'b'tzname() argument must be a datetime instance or None'u'tzname() argument must be a datetime instance or None'b'dst() argument must be a datetime instance or None'u'dst() argument must be a datetime instance or None'b'fromutc: dt.tzinfo is not self'u'fromutc: dt.tzinfo is not self'b'fromutc() argument must be a datetime instance or None'u'fromutc() argument must be a datetime instance or None'u'datetime'DISTUTILS_DEBUG# If DISTUTILS_DEBUG is anything other than the empty string, we run in# debug mode.b'DISTUTILS_DEBUG'u'DISTUTILS_DEBUG'u'distutils.debug'_decimal_pydecimaldistutils.dep_util + +Utility functions for simple, timestamp-based dependency of files +and groups of files; also, function based entirely on such +timestamp dependency analysis.DistutilsFileErrorReturn true if 'source' exists and is more recently modified than + 'target', or if 'source' exists and 'target' doesn't. Return false if + both exist and 'target' is the same age or younger than 'source'. + Raise DistutilsFileError if 'source' does not exist. + file '%s' does not existmtime1mtime2targetsWalk two filename lists in parallel, testing if each source is newer + than its corresponding target. Return a pair of lists (sources, + targets) where source is newer than target, according to the semantics + of 'newer()'. + 'sources' and 'targets' must be same lengthn_sourcesn_targetsReturn true if 'target' is out-of-date with respect to any file + listed in 'sources'. In other words, if 'target' exists and is newer + than every file in 'sources', return false; otherwise return true. + 'missing' controls what we do when a source file is missing; the + default ("error") is to blow up with an OSError from inside 'stat()'; + if it is "ignore", we silently drop any missing source files; if it is + "newer", any missing source files make us assume that 'target' is + out-of-date (this is handy in "dry-run" mode: it'll make you pretend to + carry out commands that wouldn't work because inputs are missing, but + that doesn't matter because you're not actually going to run the + commands). + target_mtime# newer ()# build a pair of lists (sources, targets) where source is newer# newer_pairwise ()# If the target doesn't even exist, then it's definitely out-of-date.# Otherwise we have to find out the hard way: if *any* source file# is more recent than 'target', then 'target' is out-of-date and# we can immediately return true. If we fall through to the end# of the loop, then 'target' is up-to-date and we return false.# blow up when we stat() the file# missing source dropped from# target's dependency list# missing source means target is# out-of-date# newer_group ()b'distutils.dep_util + +Utility functions for simple, timestamp-based dependency of files +and groups of files; also, function based entirely on such +timestamp dependency analysis.'u'distutils.dep_util + +Utility functions for simple, timestamp-based dependency of files +and groups of files; also, function based entirely on such +timestamp dependency analysis.'b'Return true if 'source' exists and is more recently modified than + 'target', or if 'source' exists and 'target' doesn't. Return false if + both exist and 'target' is the same age or younger than 'source'. + Raise DistutilsFileError if 'source' does not exist. + 'u'Return true if 'source' exists and is more recently modified than + 'target', or if 'source' exists and 'target' doesn't. Return false if + both exist and 'target' is the same age or younger than 'source'. + Raise DistutilsFileError if 'source' does not exist. + 'b'file '%s' does not exist'u'file '%s' does not exist'b'Walk two filename lists in parallel, testing if each source is newer + than its corresponding target. Return a pair of lists (sources, + targets) where source is newer than target, according to the semantics + of 'newer()'. + 'u'Walk two filename lists in parallel, testing if each source is newer + than its corresponding target. Return a pair of lists (sources, + targets) where source is newer than target, according to the semantics + of 'newer()'. + 'b''sources' and 'targets' must be same length'u''sources' and 'targets' must be same length'b'Return true if 'target' is out-of-date with respect to any file + listed in 'sources'. In other words, if 'target' exists and is newer + than every file in 'sources', return false; otherwise return true. + 'missing' controls what we do when a source file is missing; the + default ("error") is to blow up with an OSError from inside 'stat()'; + if it is "ignore", we silently drop any missing source files; if it is + "newer", any missing source files make us assume that 'target' is + out-of-date (this is handy in "dry-run" mode: it'll make you pretend to + carry out commands that wouldn't work because inputs are missing, but + that doesn't matter because you're not actually going to run the + commands). + 'u'Return true if 'target' is out-of-date with respect to any file + listed in 'sources'. In other words, if 'target' exists and is newer + than every file in 'sources', return false; otherwise return true. + 'missing' controls what we do when a source file is missing; the + default ("error") is to blow up with an OSError from inside 'stat()'; + if it is "ignore", we silently drop any missing source files; if it is + "newer", any missing source files make us assume that 'target' is + out-of-date (this is handy in "dry-run" mode: it'll make you pretend to + carry out commands that wouldn't work because inputs are missing, but + that doesn't matter because you're not actually going to run the + commands). + 'u'distutils.dep_util'u'dep_util' +Module difflib -- helpers for computing deltas between objects. + +Function get_close_matches(word, possibilities, n=3, cutoff=0.6): + Use SequenceMatcher to return list of the best "good enough" matches. + +Function context_diff(a, b): + For two lists of strings, return a delta in context diff format. + +Function ndiff(a, b): + Return a delta: the difference between `a` and `b` (lists of strings). + +Function restore(delta, which): + Return one of the two sequences that generated an ndiff delta. + +Function unified_diff(a, b): + For two lists of strings, return a delta in unified diff format. + +Class SequenceMatcher: + A flexible class for comparing pairs of sequences of any type. + +Class Differ: + For producing human-readable deltas from sequences of lines of text. + +Class HtmlDiff: + For producing HTML side by side comparison with change highlights. +get_close_matchesSequenceMatcherDifferIS_CHARACTER_JUNKIS_LINE_JUNKcontext_diffunified_diffdiff_bytesHtmlDiffMatch_nlargesta b size_calculate_ratio + SequenceMatcher is a flexible class for comparing pairs of sequences of + any type, so long as the sequence elements are hashable. The basic + algorithm predates, and is a little fancier than, an algorithm + published in the late 1980's by Ratcliff and Obershelp under the + hyperbolic name "gestalt pattern matching". The basic idea is to find + the longest contiguous matching subsequence that contains no "junk" + elements (R-O doesn't address junk). The same idea is then applied + recursively to the pieces of the sequences to the left and to the right + of the matching subsequence. This does not yield minimal edit + sequences, but does tend to yield matches that "look right" to people. + + SequenceMatcher tries to compute a "human-friendly diff" between two + sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the + longest *contiguous* & junk-free matching subsequence. That's what + catches peoples' eyes. The Windows(tm) windiff has another interesting + notion, pairing up elements that appear uniquely in each sequence. + That, and the method here, appear to yield more intuitive difference + reports than does diff. This method appears to be the least vulnerable + to synching up on blocks of "junk lines", though (like blank lines in + ordinary text files, or maybe "

" lines in HTML files). That may be + because this is the only method of the 3 that has a *concept* of + "junk" . + + Example, comparing two strings, and considering blanks to be "junk": + + >>> s = SequenceMatcher(lambda x: x == " ", + ... "private Thread currentThread;", + ... "private volatile Thread currentThread;") + >>> + + .ratio() returns a float in [0, 1], measuring the "similarity" of the + sequences. As a rule of thumb, a .ratio() value over 0.6 means the + sequences are close matches: + + >>> print(round(s.ratio(), 3)) + 0.866 + >>> + + If you're only interested in where the sequences match, + .get_matching_blocks() is handy: + + >>> for block in s.get_matching_blocks(): + ... print("a[%d] and b[%d] match for %d elements" % block) + a[0] and b[0] match for 8 elements + a[8] and b[17] match for 21 elements + a[29] and b[38] match for 0 elements + + Note that the last tuple returned by .get_matching_blocks() is always a + dummy, (len(a), len(b), 0), and this is the only case in which the last + tuple element (number of elements matched) is 0. + + If you want to know how to change the first sequence into the second, + use .get_opcodes(): + + >>> for opcode in s.get_opcodes(): + ... print("%6s a[%d:%d] b[%d:%d]" % opcode) + equal a[0:8] b[0:8] + insert a[8:8] b[8:17] + equal a[8:29] b[17:38] + + See the Differ class for a fancy human-friendly file differencer, which + uses SequenceMatcher both to compare sequences of lines, and to compare + sequences of characters within similar (near-matching) lines. + + See also function get_close_matches() in this module, which shows how + simple code building on SequenceMatcher can be used to do useful work. + + Timing: Basic R-O is cubic time worst case and quadratic time expected + case. SequenceMatcher is quadratic time for the worst case and has + expected-case behavior dependent in a complicated way on how many + elements the sequences have in common; best case time is linear. + + Methods: + + __init__(isjunk=None, a='', b='') + Construct a SequenceMatcher. + + set_seqs(a, b) + Set the two sequences to be compared. + + set_seq1(a) + Set the first sequence to be compared. + + set_seq2(b) + Set the second sequence to be compared. + + find_longest_match(alo, ahi, blo, bhi) + Find longest matching block in a[alo:ahi] and b[blo:bhi]. + + get_matching_blocks() + Return list of triples describing matching subsequences. + + get_opcodes() + Return list of 5-tuples describing how to turn a into b. + + ratio() + Return a measure of the sequences' similarity (float in [0,1]). + + quick_ratio() + Return an upper bound on .ratio() relatively quickly. + + real_quick_ratio() + Return an upper bound on ratio() very quickly. + isjunkautojunkConstruct a SequenceMatcher. + + Optional arg isjunk is None (the default), or a one-argument + function that takes a sequence element and returns true iff the + element is junk. None is equivalent to passing "lambda x: 0", i.e. + no elements are considered to be junk. For example, pass + lambda x: x in " \t" + if you're comparing lines as sequences of characters, and don't + want to synch up on blanks or hard tabs. + + Optional arg a is the first of two sequences to be compared. By + default, an empty string. The elements of a must be hashable. See + also .set_seqs() and .set_seq1(). + + Optional arg b is the second of two sequences to be compared. By + default, an empty string. The elements of b must be hashable. See + also .set_seqs() and .set_seq2(). + + Optional arg autojunk should be set to False to disable the + "automatic junk heuristic" that treats popular elements as junk + (see module documentation for more information). + set_seqsSet the two sequences to be compared. + + >>> s = SequenceMatcher() + >>> s.set_seqs("abcd", "bcde") + >>> s.ratio() + 0.75 + set_seq1set_seq2Set the first sequence to be compared. + + The second sequence to be compared is not changed. + + >>> s = SequenceMatcher(None, "abcd", "bcde") + >>> s.ratio() + 0.75 + >>> s.set_seq1("bcde") + >>> s.ratio() + 1.0 + >>> + + SequenceMatcher computes and caches detailed information about the + second sequence, so if you want to compare one sequence S against + many sequences, use .set_seq2(S) once and call .set_seq1(x) + repeatedly for each of the other sequences. + + See also set_seqs() and set_seq2(). + matching_blocksopcodesSet the second sequence to be compared. + + The first sequence to be compared is not changed. + + >>> s = SequenceMatcher(None, "abcd", "bcde") + >>> s.ratio() + 0.75 + >>> s.set_seq2("abcd") + >>> s.ratio() + 1.0 + >>> + + SequenceMatcher computes and caches detailed information about the + second sequence, so if you want to compare one sequence S against + many sequences, use .set_seq2(S) once and call .set_seq1(x) + repeatedly for each of the other sequences. + + See also set_seqs() and set_seq1(). + fullbcount__chain_bb2jbjunkjunkbpopularpopularntestidxsfind_longest_matchaloahiblobhiFind longest matching block in a[alo:ahi] and b[blo:bhi]. + + If isjunk is not defined: + + Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where + alo <= i <= i+k <= ahi + blo <= j <= j+k <= bhi + and for all (i',j',k') meeting those conditions, + k >= k' + i <= i' + and if i == i', j <= j' + + In other words, of all maximal matching blocks, return one that + starts earliest in a, and of all those maximal matching blocks that + start earliest in a, return the one that starts earliest in b. + + >>> s = SequenceMatcher(None, " abcd", "abcd abcd") + >>> s.find_longest_match(0, 5, 0, 9) + Match(a=0, b=4, size=5) + + If isjunk is defined, first the longest matching block is + determined as above, but with the additional restriction that no + junk element appears in the block. Then that block is extended as + far as possible by matching (only) junk elements on both sides. So + the resulting block never matches on junk except as identical junk + happens to be adjacent to an "interesting" match. + + Here's the same example as before, but considering blanks to be + junk. That prevents " abcd" from matching the " abcd" at the tail + end of the second sequence directly. Instead only the "abcd" can + match, and matches the leftmost "abcd" in the second sequence: + + >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd") + >>> s.find_longest_match(0, 5, 0, 9) + Match(a=1, b=0, size=4) + + If no blocks match, return (alo, blo, 0). + + >>> s = SequenceMatcher(None, "ab", "c") + >>> s.find_longest_match(0, 2, 0, 1) + Match(a=0, b=0, size=0) + isbjunkbestibestjbestsizej2lennothingj2lengetnewj2lenget_matching_blocksReturn list of triples describing matching subsequences. + + Each triple is of the form (i, j, n), and means that + a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in + i and in j. New in Python 2.5, it's also guaranteed that if + (i, j, n) and (i', j', n') are adjacent triples in the list, and + the second is not the last triple in the list, then i+n != i' or + j+n != j'. IOW, adjacent triples never describe adjacent equal + blocks. + + The last triple is a dummy, (len(a), len(b), 0), and is the only + triple with n==0. + + >>> s = SequenceMatcher(None, "abxcd", "abcd") + >>> list(s.get_matching_blocks()) + [Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)] + lalbj1k1non_adjacentj2k2get_opcodesReturn list of 5-tuples describing how to turn a into b. + + Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple + has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the + tuple preceding it, and likewise for j1 == the previous j2. + + The tags are strings, with these meanings: + + 'replace': a[i1:i2] should be replaced by b[j1:j2] + 'delete': a[i1:i2] should be deleted. + Note that j1==j2 in this case. + 'insert': b[j1:j2] should be inserted at a[i1:i1]. + Note that i1==i2 in this case. + 'equal': a[i1:i2] == b[j1:j2] + + >>> a = "qabxcd" + >>> b = "abycdf" + >>> s = SequenceMatcher(None, a, b) + >>> for tag, i1, i2, j1, j2 in s.get_opcodes(): + ... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" % + ... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))) + delete a[0:1] (q) b[0:0] () + equal a[1:3] (ab) b[0:2] (ab) + replace a[3:4] (x) b[2:3] (y) + equal a[4:6] (cd) b[3:5] (cd) + insert a[6:6] () b[5:6] (f) + answeraibjequalget_grouped_opcodes Isolate change clusters by eliminating ranges with no changes. + + Return a generator of groups with up to n lines of context. + Each group is in the same format as returned by get_opcodes(). + + >>> from pprint import pprint + >>> a = list(map(str, range(1,40))) + >>> b = a[:] + >>> b[8:8] = ['i'] # Make an insertion + >>> b[20] += 'x' # Make a replacement + >>> b[23:28] = [] # Make a deletion + >>> b[30] += 'y' # Make another replacement + >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes())) + [[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)], + [('equal', 16, 19, 17, 20), + ('replace', 19, 20, 20, 21), + ('equal', 20, 22, 21, 23), + ('delete', 22, 27, 23, 23), + ('equal', 27, 30, 23, 26)], + [('equal', 31, 34, 27, 30), + ('replace', 34, 35, 30, 31), + ('equal', 35, 38, 31, 34)]] + codesnnratioReturn a measure of the sequences' similarity (float in [0,1]). + + Where T is the total number of elements in both sequences, and + M is the number of matches, this is 2.0*M / T. + Note that this is 1 if the sequences are identical, and 0 if + they have nothing in common. + + .ratio() is expensive to compute if you haven't already computed + .get_matching_blocks() or .get_opcodes(), in which case you may + want to try .quick_ratio() or .real_quick_ratio() first to get an + upper bound. + + >>> s = SequenceMatcher(None, "abcd", "bcde") + >>> s.ratio() + 0.75 + >>> s.quick_ratio() + 0.75 + >>> s.real_quick_ratio() + 1.0 + triplequick_ratioReturn an upper bound on ratio() relatively quickly. + + This isn't defined beyond that it is an upper bound on .ratio(), and + is faster to compute. + availavailhasnumbreal_quick_ratioReturn an upper bound on ratio() very quickly. + + This isn't defined beyond that it is an upper bound on .ratio(), and + is faster to compute than either .ratio() or .quick_ratio(). + 0.6possibilitiesUse SequenceMatcher to return list of the best "good enough" matches. + + word is a sequence for which close matches are desired (typically a + string). + + possibilities is a list of sequences against which to match word + (typically a list of strings). + + Optional arg n (default 3) is the maximum number of close matches to + return. n must be > 0. + + Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities + that don't score at least that similar to word are ignored. + + The best (no more than n) matches among the possibilities are returned + in a list, sorted by similarity score, most similar first. + + >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"]) + ['apple', 'ape'] + >>> import keyword as _keyword + >>> get_close_matches("wheel", _keyword.kwlist) + ['while'] + >>> get_close_matches("Apple", _keyword.kwlist) + [] + >>> get_close_matches("accept", _keyword.kwlist) + ['except'] + n must be > 0: %rcutoff must be in [0.0, 1.0]: %rscore_keep_original_wstag_sReplace whitespace with the original whitespace characters in `s`tag_c + Differ is a class for comparing sequences of lines of text, and + producing human-readable differences or deltas. Differ uses + SequenceMatcher both to compare sequences of lines, and to compare + sequences of characters within similar (near-matching) lines. + + Each line of a Differ delta begins with a two-letter code: + + '- ' line unique to sequence 1 + '+ ' line unique to sequence 2 + ' ' line common to both sequences + '? ' line not present in either input sequence + + Lines beginning with '? ' attempt to guide the eye to intraline + differences, and were not present in either input sequence. These lines + can be confusing if the sequences contain tab characters. + + Note that Differ makes no claim to produce a *minimal* diff. To the + contrary, minimal diffs are often counter-intuitive, because they synch + up anywhere possible, sometimes accidental matches 100 pages apart. + Restricting synch points to contiguous matches preserves some notion of + locality, at the occasional cost of producing a longer diff. + + Example: Comparing two texts. + + First we set up the texts, sequences of individual single-line strings + ending with newlines (such sequences can also be obtained from the + `readlines()` method of file-like objects): + + >>> text1 = ''' 1. Beautiful is better than ugly. + ... 2. Explicit is better than implicit. + ... 3. Simple is better than complex. + ... 4. Complex is better than complicated. + ... '''.splitlines(keepends=True) + >>> len(text1) + 4 + >>> text1[0][-1] + '\n' + >>> text2 = ''' 1. Beautiful is better than ugly. + ... 3. Simple is better than complex. + ... 4. Complicated is better than complex. + ... 5. Flat is better than nested. + ... '''.splitlines(keepends=True) + + Next we instantiate a Differ object: + + >>> d = Differ() + + Note that when instantiating a Differ object we may pass functions to + filter out line and character 'junk'. See Differ.__init__ for details. + + Finally, we compare the two: + + >>> result = list(d.compare(text1, text2)) + + 'result' is a list of strings, so let's pretty-print it: + + >>> from pprint import pprint as _pprint + >>> _pprint(result) + [' 1. Beautiful is better than ugly.\n', + '- 2. Explicit is better than implicit.\n', + '- 3. Simple is better than complex.\n', + '+ 3. Simple is better than complex.\n', + '? ++\n', + '- 4. Complex is better than complicated.\n', + '? ^ ---- ^\n', + '+ 4. Complicated is better than complex.\n', + '? ++++ ^ ^\n', + '+ 5. Flat is better than nested.\n'] + + As a single multi-line string it looks like this: + + >>> print(''.join(result), end="") + 1. Beautiful is better than ugly. + - 2. Explicit is better than implicit. + - 3. Simple is better than complex. + + 3. Simple is better than complex. + ? ++ + - 4. Complex is better than complicated. + ? ^ ---- ^ + + 4. Complicated is better than complex. + ? ++++ ^ ^ + + 5. Flat is better than nested. + + Methods: + + __init__(linejunk=None, charjunk=None) + Construct a text differencer, with optional filters. + + compare(a, b) + Compare two sequences of lines; generate the resulting delta. + linejunkcharjunk + Construct a text differencer, with optional filters. + + The two optional keyword parameters are for filter functions: + + - `linejunk`: A function that should accept a single string argument, + and return true iff the string is junk. The module-level function + `IS_LINE_JUNK` may be used to filter out lines without visible + characters, except for at most one splat ('#'). It is recommended + to leave linejunk None; the underlying SequenceMatcher class has + an adaptive notion of "noise" lines that's better than any static + definition the author has ever been able to craft. + + - `charjunk`: A function that should accept a string of length 1. The + module-level function `IS_CHARACTER_JUNK` may be used to filter out + whitespace characters (a blank or tab; **note**: bad idea to include + newline in this!). Use of IS_CHARACTER_JUNK is recommended. + + Compare two sequences of lines; generate the resulting delta. + + Each sequence must contain individual single-line strings ending with + newlines. Such sequences can be obtained from the `readlines()` method + of file-like objects. The delta generated also consists of newline- + terminated strings, ready to be printed as-is via the writeline() + method of a file-like object. + + Example: + + >>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True), + ... 'ore\ntree\nemu\n'.splitlines(True))), + ... end="") + - one + ? ^ + + ore + ? ^ + - two + - three + ? - + + tree + + emu + cruncher_fancy_replace_dumpGenerate comparison results for a same-tagged range._plain_replace + When replacing one block of lines with another, search the blocks + for *similar* lines; the best-matching pair (if any) is used as a + synch point, and intraline difference marking is done on the + similar pair. Lots of work, but often worth it. + + Example: + + >>> d = Differ() + >>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1, + ... ['abcdefGhijkl\n'], 0, 1) + >>> print(''.join(results), end="") + - abcDefghiJkl + ? ^ ^ ^ + + abcdefGhijkl + ? ^ ^ ^ + 0.74best_ratioeqieqjbest_ibest_j_fancy_helperaeltbeltatagsbtagsai1ai2bj1bj2_qformatalinebline + Format "?" output and deal with tabs. + + Example: + + >>> d = Differ() + >>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n', + ... ' ^ ^ ^ ', ' ^ ^ ^ ') + >>> for line in results: print(repr(line)) + ... + '- \tabcDefghiJkl\n' + '? \t ^ ^ ^\n' + '+ \tabcdefGhijkl\n' + '? \t ^ ^ ^\n' + - ? + \s*(?:#\s*)?$pat + Return True for ignorable line: iff `line` is blank or contains a single '#'. + + Examples: + + >>> IS_LINE_JUNK('\n') + True + >>> IS_LINE_JUNK(' # \n') + True + >>> IS_LINE_JUNK('hello\n') + False + + Return True for ignorable character: iff `ch` is a space or tab. + + Examples: + + >>> IS_CHARACTER_JUNK(' ') + True + >>> IS_CHARACTER_JUNK('\t') + True + >>> IS_CHARACTER_JUNK('\n') + False + >>> IS_CHARACTER_JUNK('x') + False + _format_range_unifiedConvert range to the "ed" formatbeginning{},{}fromfiledatetofiledatelineterm + Compare two sequences of lines; generate the delta as a unified diff. + + Unified diffs are a compact way of showing line changes and a few + lines of context. The number of context lines is set by 'n' which + defaults to three. + + By default, the diff control lines (those with ---, +++, or @@) are + created with a trailing newline. This is helpful so that inputs + created from file.readlines() result in diffs that are suitable for + file.writelines() since both the inputs and outputs have trailing + newlines. + + For inputs that do not have trailing newlines, set the lineterm + argument to "" so that the output will be uniformly newline free. + + The unidiff format normally has a header for filenames and modification + times. Any or all of these may be specified using strings for + 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. + The modification times are normally expressed in the ISO 8601 format. + + Example: + + >>> for line in unified_diff('one two three four'.split(), + ... 'zero one tree four'.split(), 'Original', 'Current', + ... '2005-01-26 23:30:50', '2010-04-02 10:20:52', + ... lineterm=''): + ... print(line) # doctest: +NORMALIZE_WHITESPACE + --- Original 2005-01-26 23:30:50 + +++ Current 2010-04-02 10:20:52 + @@ -1,4 +1,4 @@ + +zero + one + -two + -three + +tree + four + _check_types {}fromdatetodate--- {}{}{}+++ {}{}{}file1_rangefile2_range@@ -{} +{} @@{}_format_range_context + Compare two sequences of lines; generate the delta as a context diff. + + Context diffs are a compact way of showing line changes and a few + lines of context. The number of context lines is set by 'n' which + defaults to three. + + By default, the diff control lines (those with *** or ---) are + created with a trailing newline. This is helpful so that inputs + created from file.readlines() result in diffs that are suitable for + file.writelines() since both the inputs and outputs have trailing + newlines. + + For inputs that do not have trailing newlines, set the lineterm + argument to "" so that the output will be uniformly newline free. + + The context diff format normally has a header for filenames and + modification times. Any or all of these may be specified using + strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. + The modification times are normally expressed in the ISO 8601 format. + If not specified, the strings default to blanks. + + Example: + + >>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True), + ... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')), + ... end="") + *** Original + --- Current + *************** + *** 1,4 **** + one + ! two + ! three + four + --- 1,4 ---- + + zero + one + ! tree + four + ! *** {}{}{}****************** {} ****{}--- {} ----{}lines to compare must be str, not %s (%r)all arguments must be str, not: %rdfunc + Compare `a` and `b`, two sequences of lines represented as bytes rather + than str. This is a wrapper for `dfunc`, which is typically either + unified_diff() or context_diff(). Inputs are losslessly converted to + strings so that `dfunc` only has to worry about strings, and encoded + back to bytes on return. This is necessary to compare files with + unknown or inconsistent encoding. All other inputs (except `n`) must be + bytes rather than str. + all arguments must be bytes, not %s (%r) + Compare `a` and `b` (lists of strings); return a `Differ`-style delta. + + Optional keyword parameters `linejunk` and `charjunk` are for filter + functions, or can be None: + + - linejunk: A function that should accept a single string argument and + return true iff the string is junk. The default is None, and is + recommended; the underlying SequenceMatcher class has an adaptive + notion of "noise" lines. + + - charjunk: A function that accepts a character (string of length + 1), and returns true iff the character is junk. The default is + the module-level function IS_CHARACTER_JUNK, which filters out + whitespace characters (a blank or tab; note: it's a bad idea to + include newline in this!). + + Tools/scripts/ndiff.py is a command-line front-end to this function. + + Example: + + >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True), + ... 'ore\ntree\nemu\n'.splitlines(keepends=True)) + >>> print(''.join(diff), end="") + - one + ? ^ + + ore + ? ^ + - two + - three + ? - + + tree + + emu + _mdifffromlinestolinesReturns generator yielding marked up from/to side by side differences. + + Arguments: + fromlines -- list of text lines to compared to tolines + tolines -- list of text lines to be compared to fromlines + context -- number of context lines to display on each side of difference, + if None, all from/to text lines will be generated. + linejunk -- passed on to ndiff (see ndiff documentation) + charjunk -- passed on to ndiff (see ndiff documentation) + + This function returns an iterator which returns a tuple: + (from line tuple, to line tuple, boolean flag) + + from/to line tuple -- (line num, line text) + line num -- integer or None (to indicate a context separation) + line text -- original line text with following markers inserted: + '\0+' -- marks start of added text + '\0-' -- marks start of deleted text + '\0^' -- marks start of changed text + '\1' -- marks end of added/deleted/changed text + + boolean flag -- None indicates context separation, True indicates + either "from" or "to" line contains a change, otherwise False. + + This function/iterator was originally developed to generate side by side + file difference for making HTML pages (see HtmlDiff class for example + usage). + + Note, this function utilizes the ndiff function to generate the side by + side difference markup. Optional ndiff arguments may be passed to this + function and they in turn will be passed to ndiff. + (\++|\-+|\^+)change_rediff_lines_iterator_make_lineformat_keysidenum_linesReturns line of text with user's change markup and line formatting. + + lines -- list of lines from the ndiff generator to produce a line of + text from. When producing the line of text to return, the + lines used are removed from this list. + format_key -- '+' return first line in list with "add" markup around + the entire line. + '-' return first line in list with "delete" markup around + the entire line. + '?' return first line in list with add/delete/change + intraline markup (indices obtained from second line) + None return first line in list with no markup + side -- indice into the num_lines list (0=from,1=to) + num_lines -- from/to current line number. This is NOT intended to be a + passed parameter. It is present as a keyword argument to + maintain memory of the current line numbers between calls + of this function. + + Note, this function is purposefully not defined at the module scope so + that data it needs from its parent function (within whose context it + is defined) does not need to be of module scope. + markerssub_inforecord_sub_infomatch_objectspan_line_iteratorYields from/to lines of text with a change indication. + + This function is an iterator. It itself pulls lines from a + differencing iterator, processes them and yields them. When it can + it yields both a "from" and a "to" line, otherwise it will yield one + or the other. In addition to yielding the lines of from/to text, a + boolean flag is yielded to indicate if the text line(s) have + differences in them. + + Note, this function is purposefully not defined at the module scope so + that data it needs from its parent function (within whose context it + is defined) does not need to be of module scope. + num_blanks_pendingnum_blanks_to_yield-?+?--++--?+--+from_lineto_line-+?-?++--+-_line_pair_iteratorYields from/to lines of text with a change indication. + + This function is an iterator. It itself pulls lines from the line + iterator. Its difference from that iterator is that this function + always yields a pair of from/to text lines (with the change + indication). If necessary it will collect single from/to lines + until it has a matching pair from/to pair to yield. + + Note, this function is purposefully not defined at the module scope so + that data it needs from its parent function (within whose context it + is defined) does not need to be of module scope. + line_iteratorfound_difffromDiffto_diffline_pair_iteratorlines_to_writecontextLines + + + + + + + + + + + + %(table)s%(legend)s + + +_file_template + table.diff {font-family:Courier; border:medium;} + .diff_header {background-color:#e0e0e0} + td.diff_header {text-align:right} + .diff_next {background-color:#c0c0c0} + .diff_add {background-color:#aaffaa} + .diff_chg {background-color:#ffff77} + .diff_sub {background-color:#ffaaaa}_styles + + + + %(header_row)s + +%(data_rows)s +
_table_template + + + + +
Legends
+ + + + +
Colors
 Added 
Changed
Deleted
+ + + + +
Links
(f)irst change
(n)ext change
(t)op
_legendFor producing HTML side by side comparison with change highlights. + + This class can be used to create an HTML table (or a complete HTML file + containing the table) showing a side by side, line by line comparison + of text with inter-line and intra-line change highlights. The table can + be generated in either full or contextual difference mode. + + The following methods are provided for HTML generation: + + make_table -- generates HTML for a single side by side table + make_file -- generates complete HTML file with a single side by side table + + See tools/scripts/diff.py for an example usage of this class. + _default_prefixwrapcolumnHtmlDiff instance initializer + + Arguments: + tabsize -- tab stop spacing, defaults to 8. + wrapcolumn -- column number where lines are broken and wrapped, + defaults to None where lines are not wrapped. + linejunk,charjunk -- keyword arguments passed into ndiff() (used by + HtmlDiff() to generate the side by side HTML differences). See + ndiff() documentation for argument default values and descriptions. + _tabsize_wrapcolumn_linejunk_charjunkmake_filefromdesctodescnumlinesReturns HTML file of side by side comparison with change highlights + + Arguments: + fromlines -- list of "from" lines + tolines -- list of "to" lines + fromdesc -- "from" file column header string + todesc -- "to" file column header string + context -- set to True for contextual differences (defaults to False + which shows full differences). + numlines -- number of context lines. When context is set True, + controls number of lines displayed before and after the change. + When context is False, controls the number of lines to place + the "next" link anchors before the next change (so click of + "next" link jumps to just before the change). + charset -- charset of the HTML document + styleslegendmake_tabletable_tab_newline_replaceReturns from/to line lists with tabs expanded and newlines removed. + + Instead of tab characters being replaced by the number of spaces + needed to fill in to the next tab stop, this function will fill + the space with tab characters. This is done so that the difference + algorithms can identify changes in a file when tabs are replaced by + spaces and vice versa. At the end of the HTML generation, the tab + characters will be replaced with a nonbreakable space. + expand_tabs_split_linedata_listline_numBuilds list of text lines by splitting text lines at wrap point + + This function will determine if the input text line needs to be + wrapped (split) into separate lines. If so, the first wrap point + will be determined and the first line appended to the output + text line list. This function is used recursively to handle + the second part of the split line to further split it. + line1line2_line_wrapperdiffsReturns iterator that splits (wraps) mdiff text linesfromdatatodatafromlinefromtexttolinetotext_collect_linesCollects mdiff output into separate lists + + Before storing the mdiff from/to data into a list, it is converted + into a single line of text with HTML markup. + flaglist_format_linelinenumReturns HTML markup of "from" / "to" text lines + + side -- 0 or 1 indicating "from" or "to" text + flag -- indicates if difference on line + linenum -- line number (used for line number column) + text -- line text to be marked up + %d id="%s%s"_prefix %s%s_make_prefixCreate unique anchor prefixesfrom%d_fromprefixto%d_toprefix_convert_flagsMakes list of "next" linksnext_idnext_hrefnum_chgin_change id="difflib_chg_%s_%d"n No Differences Found  Empty File ftReturns HTML table of side by side comparison with change highlights + + Arguments: + fromlines -- list of "from" lines + tolines -- list of "to" lines + fromdesc -- "from" file column header string + todesc -- "to" file column header string + context -- set to True for contextual differences (defaults to False + which shows full differences). + numlines -- number of context lines. When context is set True, + controls number of lines displayed before and after the change. + When context is False, controls the number of lines to place + the "next" link anchors before the next change (so click of + "next" link jumps to just before the change). + context_lines %s%s%s%s + + +%s%s%s%s
%sheader_rowdata_rows+-^which + Generate one of the two sequences that generated a delta. + + Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract + lines originating from file 1 or 2 (parameter `which`), stripping off line + prefixes. + + Examples: + + >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True), + ... 'ore\ntree\nemu\n'.splitlines(keepends=True)) + >>> diff = list(diff) + >>> print(''.join(restore(diff, 1)), end="") + one + two + three + >>> print(''.join(restore(diff, 2)), end="") + ore + tree + emu + unknown delta choice (must be 1 or 2): %rprefixes# Members:# a# first sequence# b# second sequence; differences are computed as "what do# we need to do to 'a' to change it into 'b'?"# b2j# for x in b, b2j[x] is a list of the indices (into b)# at which x appears; junk and popular elements do not appear# fullbcount# for x in b, fullbcount[x] == the number of times x# appears in b; only materialized if really needed (used# only for computing quick_ratio())# matching_blocks# a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];# ascending & non-overlapping in i and in j; terminated by# a dummy (len(a), len(b), 0) sentinel# opcodes# a list of (tag, i1, i2, j1, j2) tuples, where tag is# one of# 'replace' a[i1:i2] should be replaced by b[j1:j2]# 'delete' a[i1:i2] should be deleted# 'insert' b[j1:j2] should be inserted# 'equal' a[i1:i2] == b[j1:j2]# isjunk# a user-supplied function taking a sequence element and# returning true iff the element is "junk" -- this has# subtle but helpful effects on the algorithm, which I'll# get around to writing up someday <0.9 wink>.# DON'T USE! Only __chain_b uses this. Use "in self.bjunk".# bjunk# the items in b for which isjunk is True.# bpopular# nonjunk items in b treated as junk by the heuristic (if used).# For each element x in b, set b2j[x] to a list of the indices in# b where x appears; the indices are in increasing order; note that# the number of times x appears in b is len(b2j[x]) ...# when self.isjunk is defined, junk elements don't show up in this# map at all, which stops the central find_longest_match method# from starting any matching block at a junk element ...# b2j also does not contain entries for "popular" elements, meaning# elements that account for more than 1 + 1% of the total elements, and# when the sequence is reasonably large (>= 200 elements); this can# be viewed as an adaptive notion of semi-junk, and yields an enormous# speedup when, e.g., comparing program files with hundreds of# instances of "return NULL;" ...# note that this is only called when b changes; so for cross-product# kinds of matches, it's best to call set_seq2 once, then set_seq1# repeatedly# Because isjunk is a user-defined (not C) function, and we test# for junk a LOT, it's important to minimize the number of calls.# Before the tricks described here, __chain_b was by far the most# time-consuming routine in the whole module! If anyone sees# Jim Roskind, thank him again for profile.py -- I never would# have guessed that.# The first trick is to build b2j ignoring the possibility# of junk. I.e., we don't call isjunk at all yet. Throwing# out the junk later is much cheaper than building b2j "right"# from the start.# Purge junk elements# separate loop avoids separate list of keys# Purge popular elements that are not junk# ditto; as fast for 1% deletion# CAUTION: stripping common prefix or suffix would be incorrect.# E.g.,# ab# acab# Longest matching block is "ab", but if common prefix is# stripped, it's "a" (tied with "b"). UNIX(tm) diff does so# strip, so ends up claiming that ab is changed to acab by# inserting "ca" in the middle. That's minimal but unintuitive:# "it's obvious" that someone inserted "ac" at the front.# Windiff ends up at the same place as diff, but by pairing up# the unique 'b's and then matching the first two 'a's.# find longest junk-free match# during an iteration of the loop, j2len[j] = length of longest# junk-free match ending with a[i-1] and b[j]# look at all instances of a[i] in b; note that because# b2j has no junk keys, the loop is skipped if a[i] is junk# a[i] matches b[j]# Extend the best by non-junk elements on each end. In particular,# "popular" non-junk elements aren't in b2j, which greatly speeds# the inner loop above, but also means "the best" match so far# doesn't contain any junk *or* popular non-junk elements.# Now that we have a wholly interesting match (albeit possibly# empty!), we may as well suck up the matching junk on each# side of it too. Can't think of a good reason not to, and it# saves post-processing the (possibly considerable) expense of# figuring out what to do with it. In the case of an empty# interesting match, this is clearly the right thing to do,# because no other kind of match is possible in the regions.# This is most naturally expressed as a recursive algorithm, but# at least one user bumped into extreme use cases that exceeded# the recursion limit on their box. So, now we maintain a list# ('queue`) of blocks we still need to look at, and append partial# results to `matching_blocks` in a loop; the matches are sorted# at the end.# a[alo:i] vs b[blo:j] unknown# a[i:i+k] same as b[j:j+k]# a[i+k:ahi] vs b[j+k:bhi] unknown# if k is 0, there was no matching block# It's possible that we have adjacent equal blocks in the# matching_blocks list now. Starting with 2.5, this code was added# to collapse them.# Is this block adjacent to i1, j1, k1?# Yes, so collapse them -- this just increases the length of# the first block by the length of the second, and the first# block so lengthened remains the block to compare against.# Not adjacent. Remember the first block (k1==0 means it's# the dummy we started with), and make the second block the# new block to compare against.# invariant: we've pumped out correct diffs to change# a[:i] into b[:j], and the next matching block is# a[ai:ai+size] == b[bj:bj+size]. So we need to pump# out a diff to change a[i:ai] into b[j:bj], pump out# the matching block, and move (i,j) beyond the match# the list of matching blocks is terminated by a# sentinel with size 0# Fixup leading and trailing groups if they show no changes.# End the current group and start a new one whenever# there is a large range with no changes.# viewing a and b as multisets, set matches to the cardinality# of their intersection; this counts the number of matches# without regard to order, so is clearly an upper bound# avail[x] is the number of times x appears in 'b' less the# number of times we've seen it in 'a' so far ... kinda# can't have more matches than the number of elements in the# shorter sequence# Move the best scorers to head of list# Strip scores for the best n matches# dump the shorter block first -- reduces the burden on short-term# memory if the blocks are of very different sizes# don't synch up unless the lines have a similarity score of at# least cutoff; best_ratio tracks the best score seen so far# 1st indices of equal lines (if any)# search for the pair that matches best without being identical# (identical lines must be junk lines, & we don't want to synch up# on junk -- unless we have to)# computing similarity is expensive, so use the quick# upper bounds first -- have seen this speed up messy# compares by a factor of 3.# note that ratio() is only expensive to compute the first# time it's called on a sequence pair; the expensive part# of the computation is cached by cruncher# no non-identical "pretty close" pair# no identical pair either -- treat it as a straight replace# no close pair, but an identical pair -- synch up on that# there's a close pair, so forget the identical pair (if any)# a[best_i] very similar to b[best_j]; eqi is None iff they're not# identical# pump out diffs from before the synch point# do intraline marking on the synch pair# pump out a '-', '?', '+', '?' quad for the synched lines# the synch pair is identical# pump out diffs from after the synch point# With respect to junk, an earlier version of ndiff simply refused to# *start* a match with a junk element. The result was cases like this:# before: private Thread currentThread;# after: private volatile Thread currentThread;# If you consider whitespace to be junk, the longest contiguous match# not starting with junk is "e Thread currentThread". So ndiff reported# that "e volatil" was inserted between the 't' and the 'e' in "private".# While an accurate view, to people that's absurd. The current version# looks for matching blocks that are entirely junk-free, then extends the# longest one of those as far as possible but only with matching junk.# So now "currentThread" is matched, then extended to suck up the# preceding blank; then "private" is matched, and extended to suck up the# following blank; then "Thread" is matched; and finally ndiff reports# that "volatile " was inserted before "Thread". The only quibble# remaining is that perhaps it was really the case that " volatile"# was inserted after "private". I can live with that .### Unified Diff# Per the diff spec at http://www.unix.org/single_unix_specification/# lines start numbering with one# empty ranges begin at line just before the range### Context Diff# See http://www.unix.org/single_unix_specification/# Checking types is weird, but the alternative is garbled output when# someone passes mixed bytes and str to {unified,context}_diff(). E.g.# without this check, passing filenames as bytes results in output like# --- b'oldfile.txt'# +++ b'newfile.txt'# because of how str.format() incorporates bytes objects.# regular expression for finding intraline change indices# create the difference iterator to generate the differences# Handle case where no user markup is to be added, just return line of# text with user's line format to allow for usage of the line number.# Handle case of intraline changes# find intraline changes (store change type and indices in tuples)# process each tuple inserting our special marks that won't be# noticed by an xml/html escaper.# Handle case of add/delete entire line# if line of text is just a newline, insert a space so there is# something for the user to highlight and see.# insert marks that won't be noticed by an xml/html escaper.# Return line of text, first allow user's line formatter to do its# thing (such as adding the line number) then replace the special# marks with what the user's change markup.# Load up next 4 lines so we can look ahead, create strings which# are a concatenation of the first character of each of the 4 lines# so we can do some very readable comparisons.# When no more lines, pump out any remaining blank lines so the# corresponding add/delete lines get a matching blank line so# all line pairs get yielded at the next level.# simple intraline change# in delete block, add block coming: we do NOT want to get# caught up on blank lines yet, just process the delete line# in delete block and see an intraline change or unchanged line# coming: yield the delete line and then blanks# intraline change# delete FROM line# in add block, delete block coming: we do NOT want to get# caught up on blank lines yet, just process the add line# will be leaving an add block: yield blanks then add line# inside an add block, yield the add line# unchanged text, yield it to both sides# Catch up on the blank lines so when we yield the next from/to# pair, they are lined up.# Collecting lines of text until we have a from/to pair# Once we have a pair, remove them from the collection and yield it# Handle case where user does not want context differencing, just yield# them up without doing anything else with them.# Handle case where user wants context differencing. We must do some# storage of lines until we know for sure that they are to be yielded.# Store lines up until we find a difference, note use of a# circular queue because we only need to keep around what# we need for context.# Yield lines that we have collected so far, but first yield# the user's separator.# Now yield the context lines after the change# If another change within the context, extend the context# Catch exception from next() and return normally# hide real spaces# expand tabs into spaces# replace spaces from expanded tabs back into tab characters# (we'll replace them with markup after we do differencing)# if blank line or context separator, just add it to the output list# if line text doesn't need wrapping, just add it to the output list# scan text looking for the wrap point, keeping track if the wrap# point is inside markers# wrap point is inside text, break it up into separate lines# if wrap point is inside markers, place end marker at end of first# line and start marker at beginning of second line because each# line will have its own table tag markup around it.# tack on first line onto the output list# use this routine again to wrap the remaining text# pull from/to data and flags from mdiff iterator# check for context separators and pass them through# for each from/to line split it at the wrap column to form# list of text lines.# yield from/to line in pairs inserting blank lines as# necessary when one side has more wrapped lines# pull from/to data and flags from mdiff style iterator# store HTML markup of the lines into the lists# exceptions occur for lines where context separators go# handle blank lines where linenum is '>' or ''# replace those things that would get confused with HTML symbols# make space non-breakable so they don't get compressed or line wrapped# Generate a unique anchor prefix so multiple tables# can exist on the same HTML page without conflicts.# store prefixes so line format method has access# all anchor names will be generated using the unique "to" prefix# process change flags, generating middle column of next anchors/links# at the beginning of a change, drop an anchor a few lines# (the context lines) before the change for the previous# link# at the beginning of a change, drop a link to the next# change# check for cases where there is no content to avoid exceptions# if not a change on first line, drop a link# redo the last link to link to the top# make unique anchor prefixes so that multiple tables may exist# on the same page without conflict.# change tabs to spaces before it gets more difficult after we insert# markup# create diffs iterator which generates side by side from/to data# set up iterator to wrap lines that exceed desired width# collect up from/to lines and flags into lists (also format the lines)# mdiff yields None on separator lines skip the bogus ones# generated for the first lineb' +Module difflib -- helpers for computing deltas between objects. + +Function get_close_matches(word, possibilities, n=3, cutoff=0.6): + Use SequenceMatcher to return list of the best "good enough" matches. + +Function context_diff(a, b): + For two lists of strings, return a delta in context diff format. + +Function ndiff(a, b): + Return a delta: the difference between `a` and `b` (lists of strings). + +Function restore(delta, which): + Return one of the two sequences that generated an ndiff delta. + +Function unified_diff(a, b): + For two lists of strings, return a delta in unified diff format. + +Class SequenceMatcher: + A flexible class for comparing pairs of sequences of any type. + +Class Differ: + For producing human-readable deltas from sequences of lines of text. + +Class HtmlDiff: + For producing HTML side by side comparison with change highlights. +'u' +Module difflib -- helpers for computing deltas between objects. + +Function get_close_matches(word, possibilities, n=3, cutoff=0.6): + Use SequenceMatcher to return list of the best "good enough" matches. + +Function context_diff(a, b): + For two lists of strings, return a delta in context diff format. + +Function ndiff(a, b): + Return a delta: the difference between `a` and `b` (lists of strings). + +Function restore(delta, which): + Return one of the two sequences that generated an ndiff delta. + +Function unified_diff(a, b): + For two lists of strings, return a delta in unified diff format. + +Class SequenceMatcher: + A flexible class for comparing pairs of sequences of any type. + +Class Differ: + For producing human-readable deltas from sequences of lines of text. + +Class HtmlDiff: + For producing HTML side by side comparison with change highlights. +'b'get_close_matches'u'get_close_matches'b'ndiff'u'ndiff'b'restore'u'restore'b'SequenceMatcher'u'SequenceMatcher'b'Differ'u'Differ'b'IS_CHARACTER_JUNK'u'IS_CHARACTER_JUNK'b'IS_LINE_JUNK'u'IS_LINE_JUNK'b'context_diff'u'context_diff'b'unified_diff'u'unified_diff'b'diff_bytes'u'diff_bytes'b'HtmlDiff'u'HtmlDiff'b'Match'u'Match'b'a b size'u'a b size'b' + SequenceMatcher is a flexible class for comparing pairs of sequences of + any type, so long as the sequence elements are hashable. The basic + algorithm predates, and is a little fancier than, an algorithm + published in the late 1980's by Ratcliff and Obershelp under the + hyperbolic name "gestalt pattern matching". The basic idea is to find + the longest contiguous matching subsequence that contains no "junk" + elements (R-O doesn't address junk). The same idea is then applied + recursively to the pieces of the sequences to the left and to the right + of the matching subsequence. This does not yield minimal edit + sequences, but does tend to yield matches that "look right" to people. + + SequenceMatcher tries to compute a "human-friendly diff" between two + sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the + longest *contiguous* & junk-free matching subsequence. That's what + catches peoples' eyes. The Windows(tm) windiff has another interesting + notion, pairing up elements that appear uniquely in each sequence. + That, and the method here, appear to yield more intuitive difference + reports than does diff. This method appears to be the least vulnerable + to synching up on blocks of "junk lines", though (like blank lines in + ordinary text files, or maybe "

" lines in HTML files). That may be + because this is the only method of the 3 that has a *concept* of + "junk" . + + Example, comparing two strings, and considering blanks to be "junk": + + >>> s = SequenceMatcher(lambda x: x == " ", + ... "private Thread currentThread;", + ... "private volatile Thread currentThread;") + >>> + + .ratio() returns a float in [0, 1], measuring the "similarity" of the + sequences. As a rule of thumb, a .ratio() value over 0.6 means the + sequences are close matches: + + >>> print(round(s.ratio(), 3)) + 0.866 + >>> + + If you're only interested in where the sequences match, + .get_matching_blocks() is handy: + + >>> for block in s.get_matching_blocks(): + ... print("a[%d] and b[%d] match for %d elements" % block) + a[0] and b[0] match for 8 elements + a[8] and b[17] match for 21 elements + a[29] and b[38] match for 0 elements + + Note that the last tuple returned by .get_matching_blocks() is always a + dummy, (len(a), len(b), 0), and this is the only case in which the last + tuple element (number of elements matched) is 0. + + If you want to know how to change the first sequence into the second, + use .get_opcodes(): + + >>> for opcode in s.get_opcodes(): + ... print("%6s a[%d:%d] b[%d:%d]" % opcode) + equal a[0:8] b[0:8] + insert a[8:8] b[8:17] + equal a[8:29] b[17:38] + + See the Differ class for a fancy human-friendly file differencer, which + uses SequenceMatcher both to compare sequences of lines, and to compare + sequences of characters within similar (near-matching) lines. + + See also function get_close_matches() in this module, which shows how + simple code building on SequenceMatcher can be used to do useful work. + + Timing: Basic R-O is cubic time worst case and quadratic time expected + case. SequenceMatcher is quadratic time for the worst case and has + expected-case behavior dependent in a complicated way on how many + elements the sequences have in common; best case time is linear. + + Methods: + + __init__(isjunk=None, a='', b='') + Construct a SequenceMatcher. + + set_seqs(a, b) + Set the two sequences to be compared. + + set_seq1(a) + Set the first sequence to be compared. + + set_seq2(b) + Set the second sequence to be compared. + + find_longest_match(alo, ahi, blo, bhi) + Find longest matching block in a[alo:ahi] and b[blo:bhi]. + + get_matching_blocks() + Return list of triples describing matching subsequences. + + get_opcodes() + Return list of 5-tuples describing how to turn a into b. + + ratio() + Return a measure of the sequences' similarity (float in [0,1]). + + quick_ratio() + Return an upper bound on .ratio() relatively quickly. + + real_quick_ratio() + Return an upper bound on ratio() very quickly. + 'u' + SequenceMatcher is a flexible class for comparing pairs of sequences of + any type, so long as the sequence elements are hashable. The basic + algorithm predates, and is a little fancier than, an algorithm + published in the late 1980's by Ratcliff and Obershelp under the + hyperbolic name "gestalt pattern matching". The basic idea is to find + the longest contiguous matching subsequence that contains no "junk" + elements (R-O doesn't address junk). The same idea is then applied + recursively to the pieces of the sequences to the left and to the right + of the matching subsequence. This does not yield minimal edit + sequences, but does tend to yield matches that "look right" to people. + + SequenceMatcher tries to compute a "human-friendly diff" between two + sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the + longest *contiguous* & junk-free matching subsequence. That's what + catches peoples' eyes. The Windows(tm) windiff has another interesting + notion, pairing up elements that appear uniquely in each sequence. + That, and the method here, appear to yield more intuitive difference + reports than does diff. This method appears to be the least vulnerable + to synching up on blocks of "junk lines", though (like blank lines in + ordinary text files, or maybe "

" lines in HTML files). That may be + because this is the only method of the 3 that has a *concept* of + "junk" . + + Example, comparing two strings, and considering blanks to be "junk": + + >>> s = SequenceMatcher(lambda x: x == " ", + ... "private Thread currentThread;", + ... "private volatile Thread currentThread;") + >>> + + .ratio() returns a float in [0, 1], measuring the "similarity" of the + sequences. As a rule of thumb, a .ratio() value over 0.6 means the + sequences are close matches: + + >>> print(round(s.ratio(), 3)) + 0.866 + >>> + + If you're only interested in where the sequences match, + .get_matching_blocks() is handy: + + >>> for block in s.get_matching_blocks(): + ... print("a[%d] and b[%d] match for %d elements" % block) + a[0] and b[0] match for 8 elements + a[8] and b[17] match for 21 elements + a[29] and b[38] match for 0 elements + + Note that the last tuple returned by .get_matching_blocks() is always a + dummy, (len(a), len(b), 0), and this is the only case in which the last + tuple element (number of elements matched) is 0. + + If you want to know how to change the first sequence into the second, + use .get_opcodes(): + + >>> for opcode in s.get_opcodes(): + ... print("%6s a[%d:%d] b[%d:%d]" % opcode) + equal a[0:8] b[0:8] + insert a[8:8] b[8:17] + equal a[8:29] b[17:38] + + See the Differ class for a fancy human-friendly file differencer, which + uses SequenceMatcher both to compare sequences of lines, and to compare + sequences of characters within similar (near-matching) lines. + + See also function get_close_matches() in this module, which shows how + simple code building on SequenceMatcher can be used to do useful work. + + Timing: Basic R-O is cubic time worst case and quadratic time expected + case. SequenceMatcher is quadratic time for the worst case and has + expected-case behavior dependent in a complicated way on how many + elements the sequences have in common; best case time is linear. + + Methods: + + __init__(isjunk=None, a='', b='') + Construct a SequenceMatcher. + + set_seqs(a, b) + Set the two sequences to be compared. + + set_seq1(a) + Set the first sequence to be compared. + + set_seq2(b) + Set the second sequence to be compared. + + find_longest_match(alo, ahi, blo, bhi) + Find longest matching block in a[alo:ahi] and b[blo:bhi]. + + get_matching_blocks() + Return list of triples describing matching subsequences. + + get_opcodes() + Return list of 5-tuples describing how to turn a into b. + + ratio() + Return a measure of the sequences' similarity (float in [0,1]). + + quick_ratio() + Return an upper bound on .ratio() relatively quickly. + + real_quick_ratio() + Return an upper bound on ratio() very quickly. + 'b'Construct a SequenceMatcher. + + Optional arg isjunk is None (the default), or a one-argument + function that takes a sequence element and returns true iff the + element is junk. None is equivalent to passing "lambda x: 0", i.e. + no elements are considered to be junk. For example, pass + lambda x: x in " \t" + if you're comparing lines as sequences of characters, and don't + want to synch up on blanks or hard tabs. + + Optional arg a is the first of two sequences to be compared. By + default, an empty string. The elements of a must be hashable. See + also .set_seqs() and .set_seq1(). + + Optional arg b is the second of two sequences to be compared. By + default, an empty string. The elements of b must be hashable. See + also .set_seqs() and .set_seq2(). + + Optional arg autojunk should be set to False to disable the + "automatic junk heuristic" that treats popular elements as junk + (see module documentation for more information). + 'u'Construct a SequenceMatcher. + + Optional arg isjunk is None (the default), or a one-argument + function that takes a sequence element and returns true iff the + element is junk. None is equivalent to passing "lambda x: 0", i.e. + no elements are considered to be junk. For example, pass + lambda x: x in " \t" + if you're comparing lines as sequences of characters, and don't + want to synch up on blanks or hard tabs. + + Optional arg a is the first of two sequences to be compared. By + default, an empty string. The elements of a must be hashable. See + also .set_seqs() and .set_seq1(). + + Optional arg b is the second of two sequences to be compared. By + default, an empty string. The elements of b must be hashable. See + also .set_seqs() and .set_seq2(). + + Optional arg autojunk should be set to False to disable the + "automatic junk heuristic" that treats popular elements as junk + (see module documentation for more information). + 'b'Set the two sequences to be compared. + + >>> s = SequenceMatcher() + >>> s.set_seqs("abcd", "bcde") + >>> s.ratio() + 0.75 + 'u'Set the two sequences to be compared. + + >>> s = SequenceMatcher() + >>> s.set_seqs("abcd", "bcde") + >>> s.ratio() + 0.75 + 'b'Set the first sequence to be compared. + + The second sequence to be compared is not changed. + + >>> s = SequenceMatcher(None, "abcd", "bcde") + >>> s.ratio() + 0.75 + >>> s.set_seq1("bcde") + >>> s.ratio() + 1.0 + >>> + + SequenceMatcher computes and caches detailed information about the + second sequence, so if you want to compare one sequence S against + many sequences, use .set_seq2(S) once and call .set_seq1(x) + repeatedly for each of the other sequences. + + See also set_seqs() and set_seq2(). + 'u'Set the first sequence to be compared. + + The second sequence to be compared is not changed. + + >>> s = SequenceMatcher(None, "abcd", "bcde") + >>> s.ratio() + 0.75 + >>> s.set_seq1("bcde") + >>> s.ratio() + 1.0 + >>> + + SequenceMatcher computes and caches detailed information about the + second sequence, so if you want to compare one sequence S against + many sequences, use .set_seq2(S) once and call .set_seq1(x) + repeatedly for each of the other sequences. + + See also set_seqs() and set_seq2(). + 'b'Set the second sequence to be compared. + + The first sequence to be compared is not changed. + + >>> s = SequenceMatcher(None, "abcd", "bcde") + >>> s.ratio() + 0.75 + >>> s.set_seq2("abcd") + >>> s.ratio() + 1.0 + >>> + + SequenceMatcher computes and caches detailed information about the + second sequence, so if you want to compare one sequence S against + many sequences, use .set_seq2(S) once and call .set_seq1(x) + repeatedly for each of the other sequences. + + See also set_seqs() and set_seq1(). + 'u'Set the second sequence to be compared. + + The first sequence to be compared is not changed. + + >>> s = SequenceMatcher(None, "abcd", "bcde") + >>> s.ratio() + 0.75 + >>> s.set_seq2("abcd") + >>> s.ratio() + 1.0 + >>> + + SequenceMatcher computes and caches detailed information about the + second sequence, so if you want to compare one sequence S against + many sequences, use .set_seq2(S) once and call .set_seq1(x) + repeatedly for each of the other sequences. + + See also set_seqs() and set_seq1(). + 'b'Find longest matching block in a[alo:ahi] and b[blo:bhi]. + + If isjunk is not defined: + + Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where + alo <= i <= i+k <= ahi + blo <= j <= j+k <= bhi + and for all (i',j',k') meeting those conditions, + k >= k' + i <= i' + and if i == i', j <= j' + + In other words, of all maximal matching blocks, return one that + starts earliest in a, and of all those maximal matching blocks that + start earliest in a, return the one that starts earliest in b. + + >>> s = SequenceMatcher(None, " abcd", "abcd abcd") + >>> s.find_longest_match(0, 5, 0, 9) + Match(a=0, b=4, size=5) + + If isjunk is defined, first the longest matching block is + determined as above, but with the additional restriction that no + junk element appears in the block. Then that block is extended as + far as possible by matching (only) junk elements on both sides. So + the resulting block never matches on junk except as identical junk + happens to be adjacent to an "interesting" match. + + Here's the same example as before, but considering blanks to be + junk. That prevents " abcd" from matching the " abcd" at the tail + end of the second sequence directly. Instead only the "abcd" can + match, and matches the leftmost "abcd" in the second sequence: + + >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd") + >>> s.find_longest_match(0, 5, 0, 9) + Match(a=1, b=0, size=4) + + If no blocks match, return (alo, blo, 0). + + >>> s = SequenceMatcher(None, "ab", "c") + >>> s.find_longest_match(0, 2, 0, 1) + Match(a=0, b=0, size=0) + 'u'Find longest matching block in a[alo:ahi] and b[blo:bhi]. + + If isjunk is not defined: + + Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where + alo <= i <= i+k <= ahi + blo <= j <= j+k <= bhi + and for all (i',j',k') meeting those conditions, + k >= k' + i <= i' + and if i == i', j <= j' + + In other words, of all maximal matching blocks, return one that + starts earliest in a, and of all those maximal matching blocks that + start earliest in a, return the one that starts earliest in b. + + >>> s = SequenceMatcher(None, " abcd", "abcd abcd") + >>> s.find_longest_match(0, 5, 0, 9) + Match(a=0, b=4, size=5) + + If isjunk is defined, first the longest matching block is + determined as above, but with the additional restriction that no + junk element appears in the block. Then that block is extended as + far as possible by matching (only) junk elements on both sides. So + the resulting block never matches on junk except as identical junk + happens to be adjacent to an "interesting" match. + + Here's the same example as before, but considering blanks to be + junk. That prevents " abcd" from matching the " abcd" at the tail + end of the second sequence directly. Instead only the "abcd" can + match, and matches the leftmost "abcd" in the second sequence: + + >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd") + >>> s.find_longest_match(0, 5, 0, 9) + Match(a=1, b=0, size=4) + + If no blocks match, return (alo, blo, 0). + + >>> s = SequenceMatcher(None, "ab", "c") + >>> s.find_longest_match(0, 2, 0, 1) + Match(a=0, b=0, size=0) + 'b'Return list of triples describing matching subsequences. + + Each triple is of the form (i, j, n), and means that + a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in + i and in j. New in Python 2.5, it's also guaranteed that if + (i, j, n) and (i', j', n') are adjacent triples in the list, and + the second is not the last triple in the list, then i+n != i' or + j+n != j'. IOW, adjacent triples never describe adjacent equal + blocks. + + The last triple is a dummy, (len(a), len(b), 0), and is the only + triple with n==0. + + >>> s = SequenceMatcher(None, "abxcd", "abcd") + >>> list(s.get_matching_blocks()) + [Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)] + 'u'Return list of triples describing matching subsequences. + + Each triple is of the form (i, j, n), and means that + a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in + i and in j. New in Python 2.5, it's also guaranteed that if + (i, j, n) and (i', j', n') are adjacent triples in the list, and + the second is not the last triple in the list, then i+n != i' or + j+n != j'. IOW, adjacent triples never describe adjacent equal + blocks. + + The last triple is a dummy, (len(a), len(b), 0), and is the only + triple with n==0. + + >>> s = SequenceMatcher(None, "abxcd", "abcd") + >>> list(s.get_matching_blocks()) + [Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)] + 'b'Return list of 5-tuples describing how to turn a into b. + + Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple + has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the + tuple preceding it, and likewise for j1 == the previous j2. + + The tags are strings, with these meanings: + + 'replace': a[i1:i2] should be replaced by b[j1:j2] + 'delete': a[i1:i2] should be deleted. + Note that j1==j2 in this case. + 'insert': b[j1:j2] should be inserted at a[i1:i1]. + Note that i1==i2 in this case. + 'equal': a[i1:i2] == b[j1:j2] + + >>> a = "qabxcd" + >>> b = "abycdf" + >>> s = SequenceMatcher(None, a, b) + >>> for tag, i1, i2, j1, j2 in s.get_opcodes(): + ... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" % + ... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))) + delete a[0:1] (q) b[0:0] () + equal a[1:3] (ab) b[0:2] (ab) + replace a[3:4] (x) b[2:3] (y) + equal a[4:6] (cd) b[3:5] (cd) + insert a[6:6] () b[5:6] (f) + 'u'Return list of 5-tuples describing how to turn a into b. + + Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple + has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the + tuple preceding it, and likewise for j1 == the previous j2. + + The tags are strings, with these meanings: + + 'replace': a[i1:i2] should be replaced by b[j1:j2] + 'delete': a[i1:i2] should be deleted. + Note that j1==j2 in this case. + 'insert': b[j1:j2] should be inserted at a[i1:i1]. + Note that i1==i2 in this case. + 'equal': a[i1:i2] == b[j1:j2] + + >>> a = "qabxcd" + >>> b = "abycdf" + >>> s = SequenceMatcher(None, a, b) + >>> for tag, i1, i2, j1, j2 in s.get_opcodes(): + ... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" % + ... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))) + delete a[0:1] (q) b[0:0] () + equal a[1:3] (ab) b[0:2] (ab) + replace a[3:4] (x) b[2:3] (y) + equal a[4:6] (cd) b[3:5] (cd) + insert a[6:6] () b[5:6] (f) + 'b'equal'u'equal'b' Isolate change clusters by eliminating ranges with no changes. + + Return a generator of groups with up to n lines of context. + Each group is in the same format as returned by get_opcodes(). + + >>> from pprint import pprint + >>> a = list(map(str, range(1,40))) + >>> b = a[:] + >>> b[8:8] = ['i'] # Make an insertion + >>> b[20] += 'x' # Make a replacement + >>> b[23:28] = [] # Make a deletion + >>> b[30] += 'y' # Make another replacement + >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes())) + [[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)], + [('equal', 16, 19, 17, 20), + ('replace', 19, 20, 20, 21), + ('equal', 20, 22, 21, 23), + ('delete', 22, 27, 23, 23), + ('equal', 27, 30, 23, 26)], + [('equal', 31, 34, 27, 30), + ('replace', 34, 35, 30, 31), + ('equal', 35, 38, 31, 34)]] + 'u' Isolate change clusters by eliminating ranges with no changes. + + Return a generator of groups with up to n lines of context. + Each group is in the same format as returned by get_opcodes(). + + >>> from pprint import pprint + >>> a = list(map(str, range(1,40))) + >>> b = a[:] + >>> b[8:8] = ['i'] # Make an insertion + >>> b[20] += 'x' # Make a replacement + >>> b[23:28] = [] # Make a deletion + >>> b[30] += 'y' # Make another replacement + >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes())) + [[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)], + [('equal', 16, 19, 17, 20), + ('replace', 19, 20, 20, 21), + ('equal', 20, 22, 21, 23), + ('delete', 22, 27, 23, 23), + ('equal', 27, 30, 23, 26)], + [('equal', 31, 34, 27, 30), + ('replace', 34, 35, 30, 31), + ('equal', 35, 38, 31, 34)]] + 'b'Return a measure of the sequences' similarity (float in [0,1]). + + Where T is the total number of elements in both sequences, and + M is the number of matches, this is 2.0*M / T. + Note that this is 1 if the sequences are identical, and 0 if + they have nothing in common. + + .ratio() is expensive to compute if you haven't already computed + .get_matching_blocks() or .get_opcodes(), in which case you may + want to try .quick_ratio() or .real_quick_ratio() first to get an + upper bound. + + >>> s = SequenceMatcher(None, "abcd", "bcde") + >>> s.ratio() + 0.75 + >>> s.quick_ratio() + 0.75 + >>> s.real_quick_ratio() + 1.0 + 'u'Return a measure of the sequences' similarity (float in [0,1]). + + Where T is the total number of elements in both sequences, and + M is the number of matches, this is 2.0*M / T. + Note that this is 1 if the sequences are identical, and 0 if + they have nothing in common. + + .ratio() is expensive to compute if you haven't already computed + .get_matching_blocks() or .get_opcodes(), in which case you may + want to try .quick_ratio() or .real_quick_ratio() first to get an + upper bound. + + >>> s = SequenceMatcher(None, "abcd", "bcde") + >>> s.ratio() + 0.75 + >>> s.quick_ratio() + 0.75 + >>> s.real_quick_ratio() + 1.0 + 'b'Return an upper bound on ratio() relatively quickly. + + This isn't defined beyond that it is an upper bound on .ratio(), and + is faster to compute. + 'u'Return an upper bound on ratio() relatively quickly. + + This isn't defined beyond that it is an upper bound on .ratio(), and + is faster to compute. + 'b'Return an upper bound on ratio() very quickly. + + This isn't defined beyond that it is an upper bound on .ratio(), and + is faster to compute than either .ratio() or .quick_ratio(). + 'u'Return an upper bound on ratio() very quickly. + + This isn't defined beyond that it is an upper bound on .ratio(), and + is faster to compute than either .ratio() or .quick_ratio(). + 'b'Use SequenceMatcher to return list of the best "good enough" matches. + + word is a sequence for which close matches are desired (typically a + string). + + possibilities is a list of sequences against which to match word + (typically a list of strings). + + Optional arg n (default 3) is the maximum number of close matches to + return. n must be > 0. + + Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities + that don't score at least that similar to word are ignored. + + The best (no more than n) matches among the possibilities are returned + in a list, sorted by similarity score, most similar first. + + >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"]) + ['apple', 'ape'] + >>> import keyword as _keyword + >>> get_close_matches("wheel", _keyword.kwlist) + ['while'] + >>> get_close_matches("Apple", _keyword.kwlist) + [] + >>> get_close_matches("accept", _keyword.kwlist) + ['except'] + 'u'Use SequenceMatcher to return list of the best "good enough" matches. + + word is a sequence for which close matches are desired (typically a + string). + + possibilities is a list of sequences against which to match word + (typically a list of strings). + + Optional arg n (default 3) is the maximum number of close matches to + return. n must be > 0. + + Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities + that don't score at least that similar to word are ignored. + + The best (no more than n) matches among the possibilities are returned + in a list, sorted by similarity score, most similar first. + + >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"]) + ['apple', 'ape'] + >>> import keyword as _keyword + >>> get_close_matches("wheel", _keyword.kwlist) + ['while'] + >>> get_close_matches("Apple", _keyword.kwlist) + [] + >>> get_close_matches("accept", _keyword.kwlist) + ['except'] + 'b'n must be > 0: %r'u'n must be > 0: %r'b'cutoff must be in [0.0, 1.0]: %r'u'cutoff must be in [0.0, 1.0]: %r'b'Replace whitespace with the original whitespace characters in `s`'u'Replace whitespace with the original whitespace characters in `s`'b' + Differ is a class for comparing sequences of lines of text, and + producing human-readable differences or deltas. Differ uses + SequenceMatcher both to compare sequences of lines, and to compare + sequences of characters within similar (near-matching) lines. + + Each line of a Differ delta begins with a two-letter code: + + '- ' line unique to sequence 1 + '+ ' line unique to sequence 2 + ' ' line common to both sequences + '? ' line not present in either input sequence + + Lines beginning with '? ' attempt to guide the eye to intraline + differences, and were not present in either input sequence. These lines + can be confusing if the sequences contain tab characters. + + Note that Differ makes no claim to produce a *minimal* diff. To the + contrary, minimal diffs are often counter-intuitive, because they synch + up anywhere possible, sometimes accidental matches 100 pages apart. + Restricting synch points to contiguous matches preserves some notion of + locality, at the occasional cost of producing a longer diff. + + Example: Comparing two texts. + + First we set up the texts, sequences of individual single-line strings + ending with newlines (such sequences can also be obtained from the + `readlines()` method of file-like objects): + + >>> text1 = ''' 1. Beautiful is better than ugly. + ... 2. Explicit is better than implicit. + ... 3. Simple is better than complex. + ... 4. Complex is better than complicated. + ... '''.splitlines(keepends=True) + >>> len(text1) + 4 + >>> text1[0][-1] + '\n' + >>> text2 = ''' 1. Beautiful is better than ugly. + ... 3. Simple is better than complex. + ... 4. Complicated is better than complex. + ... 5. Flat is better than nested. + ... '''.splitlines(keepends=True) + + Next we instantiate a Differ object: + + >>> d = Differ() + + Note that when instantiating a Differ object we may pass functions to + filter out line and character 'junk'. See Differ.__init__ for details. + + Finally, we compare the two: + + >>> result = list(d.compare(text1, text2)) + + 'result' is a list of strings, so let's pretty-print it: + + >>> from pprint import pprint as _pprint + >>> _pprint(result) + [' 1. Beautiful is better than ugly.\n', + '- 2. Explicit is better than implicit.\n', + '- 3. Simple is better than complex.\n', + '+ 3. Simple is better than complex.\n', + '? ++\n', + '- 4. Complex is better than complicated.\n', + '? ^ ---- ^\n', + '+ 4. Complicated is better than complex.\n', + '? ++++ ^ ^\n', + '+ 5. Flat is better than nested.\n'] + + As a single multi-line string it looks like this: + + >>> print(''.join(result), end="") + 1. Beautiful is better than ugly. + - 2. Explicit is better than implicit. + - 3. Simple is better than complex. + + 3. Simple is better than complex. + ? ++ + - 4. Complex is better than complicated. + ? ^ ---- ^ + + 4. Complicated is better than complex. + ? ++++ ^ ^ + + 5. Flat is better than nested. + + Methods: + + __init__(linejunk=None, charjunk=None) + Construct a text differencer, with optional filters. + + compare(a, b) + Compare two sequences of lines; generate the resulting delta. + 'u' + Differ is a class for comparing sequences of lines of text, and + producing human-readable differences or deltas. Differ uses + SequenceMatcher both to compare sequences of lines, and to compare + sequences of characters within similar (near-matching) lines. + + Each line of a Differ delta begins with a two-letter code: + + '- ' line unique to sequence 1 + '+ ' line unique to sequence 2 + ' ' line common to both sequences + '? ' line not present in either input sequence + + Lines beginning with '? ' attempt to guide the eye to intraline + differences, and were not present in either input sequence. These lines + can be confusing if the sequences contain tab characters. + + Note that Differ makes no claim to produce a *minimal* diff. To the + contrary, minimal diffs are often counter-intuitive, because they synch + up anywhere possible, sometimes accidental matches 100 pages apart. + Restricting synch points to contiguous matches preserves some notion of + locality, at the occasional cost of producing a longer diff. + + Example: Comparing two texts. + + First we set up the texts, sequences of individual single-line strings + ending with newlines (such sequences can also be obtained from the + `readlines()` method of file-like objects): + + >>> text1 = ''' 1. Beautiful is better than ugly. + ... 2. Explicit is better than implicit. + ... 3. Simple is better than complex. + ... 4. Complex is better than complicated. + ... '''.splitlines(keepends=True) + >>> len(text1) + 4 + >>> text1[0][-1] + '\n' + >>> text2 = ''' 1. Beautiful is better than ugly. + ... 3. Simple is better than complex. + ... 4. Complicated is better than complex. + ... 5. Flat is better than nested. + ... '''.splitlines(keepends=True) + + Next we instantiate a Differ object: + + >>> d = Differ() + + Note that when instantiating a Differ object we may pass functions to + filter out line and character 'junk'. See Differ.__init__ for details. + + Finally, we compare the two: + + >>> result = list(d.compare(text1, text2)) + + 'result' is a list of strings, so let's pretty-print it: + + >>> from pprint import pprint as _pprint + >>> _pprint(result) + [' 1. Beautiful is better than ugly.\n', + '- 2. Explicit is better than implicit.\n', + '- 3. Simple is better than complex.\n', + '+ 3. Simple is better than complex.\n', + '? ++\n', + '- 4. Complex is better than complicated.\n', + '? ^ ---- ^\n', + '+ 4. Complicated is better than complex.\n', + '? ++++ ^ ^\n', + '+ 5. Flat is better than nested.\n'] + + As a single multi-line string it looks like this: + + >>> print(''.join(result), end="") + 1. Beautiful is better than ugly. + - 2. Explicit is better than implicit. + - 3. Simple is better than complex. + + 3. Simple is better than complex. + ? ++ + - 4. Complex is better than complicated. + ? ^ ---- ^ + + 4. Complicated is better than complex. + ? ++++ ^ ^ + + 5. Flat is better than nested. + + Methods: + + __init__(linejunk=None, charjunk=None) + Construct a text differencer, with optional filters. + + compare(a, b) + Compare two sequences of lines; generate the resulting delta. + 'b' + Construct a text differencer, with optional filters. + + The two optional keyword parameters are for filter functions: + + - `linejunk`: A function that should accept a single string argument, + and return true iff the string is junk. The module-level function + `IS_LINE_JUNK` may be used to filter out lines without visible + characters, except for at most one splat ('#'). It is recommended + to leave linejunk None; the underlying SequenceMatcher class has + an adaptive notion of "noise" lines that's better than any static + definition the author has ever been able to craft. + + - `charjunk`: A function that should accept a string of length 1. The + module-level function `IS_CHARACTER_JUNK` may be used to filter out + whitespace characters (a blank or tab; **note**: bad idea to include + newline in this!). Use of IS_CHARACTER_JUNK is recommended. + 'u' + Construct a text differencer, with optional filters. + + The two optional keyword parameters are for filter functions: + + - `linejunk`: A function that should accept a single string argument, + and return true iff the string is junk. The module-level function + `IS_LINE_JUNK` may be used to filter out lines without visible + characters, except for at most one splat ('#'). It is recommended + to leave linejunk None; the underlying SequenceMatcher class has + an adaptive notion of "noise" lines that's better than any static + definition the author has ever been able to craft. + + - `charjunk`: A function that should accept a string of length 1. The + module-level function `IS_CHARACTER_JUNK` may be used to filter out + whitespace characters (a blank or tab; **note**: bad idea to include + newline in this!). Use of IS_CHARACTER_JUNK is recommended. + 'b' + Compare two sequences of lines; generate the resulting delta. + + Each sequence must contain individual single-line strings ending with + newlines. Such sequences can be obtained from the `readlines()` method + of file-like objects. The delta generated also consists of newline- + terminated strings, ready to be printed as-is via the writeline() + method of a file-like object. + + Example: + + >>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True), + ... 'ore\ntree\nemu\n'.splitlines(True))), + ... end="") + - one + ? ^ + + ore + ? ^ + - two + - three + ? - + + tree + + emu + 'u' + Compare two sequences of lines; generate the resulting delta. + + Each sequence must contain individual single-line strings ending with + newlines. Such sequences can be obtained from the `readlines()` method + of file-like objects. The delta generated also consists of newline- + terminated strings, ready to be printed as-is via the writeline() + method of a file-like object. + + Example: + + >>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True), + ... 'ore\ntree\nemu\n'.splitlines(True))), + ... end="") + - one + ? ^ + + ore + ? ^ + - two + - three + ? - + + tree + + emu + 'b'Generate comparison results for a same-tagged range.'u'Generate comparison results for a same-tagged range.'b' + When replacing one block of lines with another, search the blocks + for *similar* lines; the best-matching pair (if any) is used as a + synch point, and intraline difference marking is done on the + similar pair. Lots of work, but often worth it. + + Example: + + >>> d = Differ() + >>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1, + ... ['abcdefGhijkl\n'], 0, 1) + >>> print(''.join(results), end="") + - abcDefghiJkl + ? ^ ^ ^ + + abcdefGhijkl + ? ^ ^ ^ + 'u' + When replacing one block of lines with another, search the blocks + for *similar* lines; the best-matching pair (if any) is used as a + synch point, and intraline difference marking is done on the + similar pair. Lots of work, but often worth it. + + Example: + + >>> d = Differ() + >>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1, + ... ['abcdefGhijkl\n'], 0, 1) + >>> print(''.join(results), end="") + - abcDefghiJkl + ? ^ ^ ^ + + abcdefGhijkl + ? ^ ^ ^ + 'b' + Format "?" output and deal with tabs. + + Example: + + >>> d = Differ() + >>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n', + ... ' ^ ^ ^ ', ' ^ ^ ^ ') + >>> for line in results: print(repr(line)) + ... + '- \tabcDefghiJkl\n' + '? \t ^ ^ ^\n' + '+ \tabcdefGhijkl\n' + '? \t ^ ^ ^\n' + 'u' + Format "?" output and deal with tabs. + + Example: + + >>> d = Differ() + >>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n', + ... ' ^ ^ ^ ', ' ^ ^ ^ ') + >>> for line in results: print(repr(line)) + ... + '- \tabcDefghiJkl\n' + '? \t ^ ^ ^\n' + '+ \tabcdefGhijkl\n' + '? \t ^ ^ ^\n' + 'b'- 'u'- 'b'? 'u'? 'b'+ 'u'+ 'b'\s*(?:#\s*)?$'u'\s*(?:#\s*)?$'b' + Return True for ignorable line: iff `line` is blank or contains a single '#'. + + Examples: + + >>> IS_LINE_JUNK('\n') + True + >>> IS_LINE_JUNK(' # \n') + True + >>> IS_LINE_JUNK('hello\n') + False + 'u' + Return True for ignorable line: iff `line` is blank or contains a single '#'. + + Examples: + + >>> IS_LINE_JUNK('\n') + True + >>> IS_LINE_JUNK(' # \n') + True + >>> IS_LINE_JUNK('hello\n') + False + 'b' + Return True for ignorable character: iff `ch` is a space or tab. + + Examples: + + >>> IS_CHARACTER_JUNK(' ') + True + >>> IS_CHARACTER_JUNK('\t') + True + >>> IS_CHARACTER_JUNK('\n') + False + >>> IS_CHARACTER_JUNK('x') + False + 'u' + Return True for ignorable character: iff `ch` is a space or tab. + + Examples: + + >>> IS_CHARACTER_JUNK(' ') + True + >>> IS_CHARACTER_JUNK('\t') + True + >>> IS_CHARACTER_JUNK('\n') + False + >>> IS_CHARACTER_JUNK('x') + False + 'b'Convert range to the "ed" format'u'Convert range to the "ed" format'b'{},{}'u'{},{}'b' + Compare two sequences of lines; generate the delta as a unified diff. + + Unified diffs are a compact way of showing line changes and a few + lines of context. The number of context lines is set by 'n' which + defaults to three. + + By default, the diff control lines (those with ---, +++, or @@) are + created with a trailing newline. This is helpful so that inputs + created from file.readlines() result in diffs that are suitable for + file.writelines() since both the inputs and outputs have trailing + newlines. + + For inputs that do not have trailing newlines, set the lineterm + argument to "" so that the output will be uniformly newline free. + + The unidiff format normally has a header for filenames and modification + times. Any or all of these may be specified using strings for + 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. + The modification times are normally expressed in the ISO 8601 format. + + Example: + + >>> for line in unified_diff('one two three four'.split(), + ... 'zero one tree four'.split(), 'Original', 'Current', + ... '2005-01-26 23:30:50', '2010-04-02 10:20:52', + ... lineterm=''): + ... print(line) # doctest: +NORMALIZE_WHITESPACE + --- Original 2005-01-26 23:30:50 + +++ Current 2010-04-02 10:20:52 + @@ -1,4 +1,4 @@ + +zero + one + -two + -three + +tree + four + 'u' + Compare two sequences of lines; generate the delta as a unified diff. + + Unified diffs are a compact way of showing line changes and a few + lines of context. The number of context lines is set by 'n' which + defaults to three. + + By default, the diff control lines (those with ---, +++, or @@) are + created with a trailing newline. This is helpful so that inputs + created from file.readlines() result in diffs that are suitable for + file.writelines() since both the inputs and outputs have trailing + newlines. + + For inputs that do not have trailing newlines, set the lineterm + argument to "" so that the output will be uniformly newline free. + + The unidiff format normally has a header for filenames and modification + times. Any or all of these may be specified using strings for + 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. + The modification times are normally expressed in the ISO 8601 format. + + Example: + + >>> for line in unified_diff('one two three four'.split(), + ... 'zero one tree four'.split(), 'Original', 'Current', + ... '2005-01-26 23:30:50', '2010-04-02 10:20:52', + ... lineterm=''): + ... print(line) # doctest: +NORMALIZE_WHITESPACE + --- Original 2005-01-26 23:30:50 + +++ Current 2010-04-02 10:20:52 + @@ -1,4 +1,4 @@ + +zero + one + -two + -three + +tree + four + 'b' {}'u' {}'b'--- {}{}{}'u'--- {}{}{}'b'+++ {}{}{}'u'+++ {}{}{}'b'@@ -{} +{} @@{}'u'@@ -{} +{} @@{}'b' + Compare two sequences of lines; generate the delta as a context diff. + + Context diffs are a compact way of showing line changes and a few + lines of context. The number of context lines is set by 'n' which + defaults to three. + + By default, the diff control lines (those with *** or ---) are + created with a trailing newline. This is helpful so that inputs + created from file.readlines() result in diffs that are suitable for + file.writelines() since both the inputs and outputs have trailing + newlines. + + For inputs that do not have trailing newlines, set the lineterm + argument to "" so that the output will be uniformly newline free. + + The context diff format normally has a header for filenames and + modification times. Any or all of these may be specified using + strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. + The modification times are normally expressed in the ISO 8601 format. + If not specified, the strings default to blanks. + + Example: + + >>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True), + ... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')), + ... end="") + *** Original + --- Current + *************** + *** 1,4 **** + one + ! two + ! three + four + --- 1,4 ---- + + zero + one + ! tree + four + 'u' + Compare two sequences of lines; generate the delta as a context diff. + + Context diffs are a compact way of showing line changes and a few + lines of context. The number of context lines is set by 'n' which + defaults to three. + + By default, the diff control lines (those with *** or ---) are + created with a trailing newline. This is helpful so that inputs + created from file.readlines() result in diffs that are suitable for + file.writelines() since both the inputs and outputs have trailing + newlines. + + For inputs that do not have trailing newlines, set the lineterm + argument to "" so that the output will be uniformly newline free. + + The context diff format normally has a header for filenames and + modification times. Any or all of these may be specified using + strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. + The modification times are normally expressed in the ISO 8601 format. + If not specified, the strings default to blanks. + + Example: + + >>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True), + ... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')), + ... end="") + *** Original + --- Current + *************** + *** 1,4 **** + one + ! two + ! three + four + --- 1,4 ---- + + zero + one + ! tree + four + 'b'! 'u'! 'b'*** {}{}{}'u'*** {}{}{}'b'***************'u'***************'b'*** {} ****{}'u'*** {} ****{}'b'--- {} ----{}'u'--- {} ----{}'b'lines to compare must be str, not %s (%r)'u'lines to compare must be str, not %s (%r)'b'all arguments must be str, not: %r'u'all arguments must be str, not: %r'b' + Compare `a` and `b`, two sequences of lines represented as bytes rather + than str. This is a wrapper for `dfunc`, which is typically either + unified_diff() or context_diff(). Inputs are losslessly converted to + strings so that `dfunc` only has to worry about strings, and encoded + back to bytes on return. This is necessary to compare files with + unknown or inconsistent encoding. All other inputs (except `n`) must be + bytes rather than str. + 'u' + Compare `a` and `b`, two sequences of lines represented as bytes rather + than str. This is a wrapper for `dfunc`, which is typically either + unified_diff() or context_diff(). Inputs are losslessly converted to + strings so that `dfunc` only has to worry about strings, and encoded + back to bytes on return. This is necessary to compare files with + unknown or inconsistent encoding. All other inputs (except `n`) must be + bytes rather than str. + 'b'all arguments must be bytes, not %s (%r)'u'all arguments must be bytes, not %s (%r)'b' + Compare `a` and `b` (lists of strings); return a `Differ`-style delta. + + Optional keyword parameters `linejunk` and `charjunk` are for filter + functions, or can be None: + + - linejunk: A function that should accept a single string argument and + return true iff the string is junk. The default is None, and is + recommended; the underlying SequenceMatcher class has an adaptive + notion of "noise" lines. + + - charjunk: A function that accepts a character (string of length + 1), and returns true iff the character is junk. The default is + the module-level function IS_CHARACTER_JUNK, which filters out + whitespace characters (a blank or tab; note: it's a bad idea to + include newline in this!). + + Tools/scripts/ndiff.py is a command-line front-end to this function. + + Example: + + >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True), + ... 'ore\ntree\nemu\n'.splitlines(keepends=True)) + >>> print(''.join(diff), end="") + - one + ? ^ + + ore + ? ^ + - two + - three + ? - + + tree + + emu + 'u' + Compare `a` and `b` (lists of strings); return a `Differ`-style delta. + + Optional keyword parameters `linejunk` and `charjunk` are for filter + functions, or can be None: + + - linejunk: A function that should accept a single string argument and + return true iff the string is junk. The default is None, and is + recommended; the underlying SequenceMatcher class has an adaptive + notion of "noise" lines. + + - charjunk: A function that accepts a character (string of length + 1), and returns true iff the character is junk. The default is + the module-level function IS_CHARACTER_JUNK, which filters out + whitespace characters (a blank or tab; note: it's a bad idea to + include newline in this!). + + Tools/scripts/ndiff.py is a command-line front-end to this function. + + Example: + + >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True), + ... 'ore\ntree\nemu\n'.splitlines(keepends=True)) + >>> print(''.join(diff), end="") + - one + ? ^ + + ore + ? ^ + - two + - three + ? - + + tree + + emu + 'b'Returns generator yielding marked up from/to side by side differences. + + Arguments: + fromlines -- list of text lines to compared to tolines + tolines -- list of text lines to be compared to fromlines + context -- number of context lines to display on each side of difference, + if None, all from/to text lines will be generated. + linejunk -- passed on to ndiff (see ndiff documentation) + charjunk -- passed on to ndiff (see ndiff documentation) + + This function returns an iterator which returns a tuple: + (from line tuple, to line tuple, boolean flag) + + from/to line tuple -- (line num, line text) + line num -- integer or None (to indicate a context separation) + line text -- original line text with following markers inserted: + '\0+' -- marks start of added text + '\0-' -- marks start of deleted text + '\0^' -- marks start of changed text + '\1' -- marks end of added/deleted/changed text + + boolean flag -- None indicates context separation, True indicates + either "from" or "to" line contains a change, otherwise False. + + This function/iterator was originally developed to generate side by side + file difference for making HTML pages (see HtmlDiff class for example + usage). + + Note, this function utilizes the ndiff function to generate the side by + side difference markup. Optional ndiff arguments may be passed to this + function and they in turn will be passed to ndiff. + 'u'Returns generator yielding marked up from/to side by side differences. + + Arguments: + fromlines -- list of text lines to compared to tolines + tolines -- list of text lines to be compared to fromlines + context -- number of context lines to display on each side of difference, + if None, all from/to text lines will be generated. + linejunk -- passed on to ndiff (see ndiff documentation) + charjunk -- passed on to ndiff (see ndiff documentation) + + This function returns an iterator which returns a tuple: + (from line tuple, to line tuple, boolean flag) + + from/to line tuple -- (line num, line text) + line num -- integer or None (to indicate a context separation) + line text -- original line text with following markers inserted: + '\0+' -- marks start of added text + '\0-' -- marks start of deleted text + '\0^' -- marks start of changed text + '\1' -- marks end of added/deleted/changed text + + boolean flag -- None indicates context separation, True indicates + either "from" or "to" line contains a change, otherwise False. + + This function/iterator was originally developed to generate side by side + file difference for making HTML pages (see HtmlDiff class for example + usage). + + Note, this function utilizes the ndiff function to generate the side by + side difference markup. Optional ndiff arguments may be passed to this + function and they in turn will be passed to ndiff. + 'b'(\++|\-+|\^+)'u'(\++|\-+|\^+)'b'Returns line of text with user's change markup and line formatting. + + lines -- list of lines from the ndiff generator to produce a line of + text from. When producing the line of text to return, the + lines used are removed from this list. + format_key -- '+' return first line in list with "add" markup around + the entire line. + '-' return first line in list with "delete" markup around + the entire line. + '?' return first line in list with add/delete/change + intraline markup (indices obtained from second line) + None return first line in list with no markup + side -- indice into the num_lines list (0=from,1=to) + num_lines -- from/to current line number. This is NOT intended to be a + passed parameter. It is present as a keyword argument to + maintain memory of the current line numbers between calls + of this function. + + Note, this function is purposefully not defined at the module scope so + that data it needs from its parent function (within whose context it + is defined) does not need to be of module scope. + 'u'Returns line of text with user's change markup and line formatting. + + lines -- list of lines from the ndiff generator to produce a line of + text from. When producing the line of text to return, the + lines used are removed from this list. + format_key -- '+' return first line in list with "add" markup around + the entire line. + '-' return first line in list with "delete" markup around + the entire line. + '?' return first line in list with add/delete/change + intraline markup (indices obtained from second line) + None return first line in list with no markup + side -- indice into the num_lines list (0=from,1=to) + num_lines -- from/to current line number. This is NOT intended to be a + passed parameter. It is present as a keyword argument to + maintain memory of the current line numbers between calls + of this function. + + Note, this function is purposefully not defined at the module scope so + that data it needs from its parent function (within whose context it + is defined) does not need to be of module scope. + 'b''u''b'Yields from/to lines of text with a change indication. + + This function is an iterator. It itself pulls lines from a + differencing iterator, processes them and yields them. When it can + it yields both a "from" and a "to" line, otherwise it will yield one + or the other. In addition to yielding the lines of from/to text, a + boolean flag is yielded to indicate if the text line(s) have + differences in them. + + Note, this function is purposefully not defined at the module scope so + that data it needs from its parent function (within whose context it + is defined) does not need to be of module scope. + 'u'Yields from/to lines of text with a change indication. + + This function is an iterator. It itself pulls lines from a + differencing iterator, processes them and yields them. When it can + it yields both a "from" and a "to" line, otherwise it will yield one + or the other. In addition to yielding the lines of from/to text, a + boolean flag is yielded to indicate if the text line(s) have + differences in them. + + Note, this function is purposefully not defined at the module scope so + that data it needs from its parent function (within whose context it + is defined) does not need to be of module scope. + 'b'X'u'X'b'-?+?'u'-?+?'b'--++'u'--++'b'--?+'u'--?+'b'--+'u'--+'b'-+?'u'-+?'b'-?+'u'-?+'b'+--'u'+--'b'+-'u'+-'b'Yields from/to lines of text with a change indication. + + This function is an iterator. It itself pulls lines from the line + iterator. Its difference from that iterator is that this function + always yields a pair of from/to text lines (with the change + indication). If necessary it will collect single from/to lines + until it has a matching pair from/to pair to yield. + + Note, this function is purposefully not defined at the module scope so + that data it needs from its parent function (within whose context it + is defined) does not need to be of module scope. + 'u'Yields from/to lines of text with a change indication. + + This function is an iterator. It itself pulls lines from the line + iterator. Its difference from that iterator is that this function + always yields a pair of from/to text lines (with the change + indication). If necessary it will collect single from/to lines + until it has a matching pair from/to pair to yield. + + Note, this function is purposefully not defined at the module scope so + that data it needs from its parent function (within whose context it + is defined) does not need to be of module scope. + 'b' + + + + + + + + + + + + %(table)s%(legend)s + + +'u' + + + + + + + + + + + + %(table)s%(legend)s + + +'b' + table.diff {font-family:Courier; border:medium;} + .diff_header {background-color:#e0e0e0} + td.diff_header {text-align:right} + .diff_next {background-color:#c0c0c0} + .diff_add {background-color:#aaffaa} + .diff_chg {background-color:#ffff77} + .diff_sub {background-color:#ffaaaa}'u' + table.diff {font-family:Courier; border:medium;} + .diff_header {background-color:#e0e0e0} + td.diff_header {text-align:right} + .diff_next {background-color:#c0c0c0} + .diff_add {background-color:#aaffaa} + .diff_chg {background-color:#ffff77} + .diff_sub {background-color:#ffaaaa}'b' + + + + %(header_row)s + +%(data_rows)s +
'u' + + + + %(header_row)s + +%(data_rows)s +
'b' + + + + +
Legends
+ + + + +
Colors
 Added 
Changed
Deleted
+ + + + +
Links
(f)irst change
(n)ext change
(t)op
'u' + + + + +
Legends
+ + + + +
Colors
 Added 
Changed
Deleted
+ + + + +
Links
(f)irst change
(n)ext change
(t)op
'b'For producing HTML side by side comparison with change highlights. + + This class can be used to create an HTML table (or a complete HTML file + containing the table) showing a side by side, line by line comparison + of text with inter-line and intra-line change highlights. The table can + be generated in either full or contextual difference mode. + + The following methods are provided for HTML generation: + + make_table -- generates HTML for a single side by side table + make_file -- generates complete HTML file with a single side by side table + + See tools/scripts/diff.py for an example usage of this class. + 'u'For producing HTML side by side comparison with change highlights. + + This class can be used to create an HTML table (or a complete HTML file + containing the table) showing a side by side, line by line comparison + of text with inter-line and intra-line change highlights. The table can + be generated in either full or contextual difference mode. + + The following methods are provided for HTML generation: + + make_table -- generates HTML for a single side by side table + make_file -- generates complete HTML file with a single side by side table + + See tools/scripts/diff.py for an example usage of this class. + 'b'HtmlDiff instance initializer + + Arguments: + tabsize -- tab stop spacing, defaults to 8. + wrapcolumn -- column number where lines are broken and wrapped, + defaults to None where lines are not wrapped. + linejunk,charjunk -- keyword arguments passed into ndiff() (used by + HtmlDiff() to generate the side by side HTML differences). See + ndiff() documentation for argument default values and descriptions. + 'u'HtmlDiff instance initializer + + Arguments: + tabsize -- tab stop spacing, defaults to 8. + wrapcolumn -- column number where lines are broken and wrapped, + defaults to None where lines are not wrapped. + linejunk,charjunk -- keyword arguments passed into ndiff() (used by + HtmlDiff() to generate the side by side HTML differences). See + ndiff() documentation for argument default values and descriptions. + 'b'Returns HTML file of side by side comparison with change highlights + + Arguments: + fromlines -- list of "from" lines + tolines -- list of "to" lines + fromdesc -- "from" file column header string + todesc -- "to" file column header string + context -- set to True for contextual differences (defaults to False + which shows full differences). + numlines -- number of context lines. When context is set True, + controls number of lines displayed before and after the change. + When context is False, controls the number of lines to place + the "next" link anchors before the next change (so click of + "next" link jumps to just before the change). + charset -- charset of the HTML document + 'u'Returns HTML file of side by side comparison with change highlights + + Arguments: + fromlines -- list of "from" lines + tolines -- list of "to" lines + fromdesc -- "from" file column header string + todesc -- "to" file column header string + context -- set to True for contextual differences (defaults to False + which shows full differences). + numlines -- number of context lines. When context is set True, + controls number of lines displayed before and after the change. + When context is False, controls the number of lines to place + the "next" link anchors before the next change (so click of + "next" link jumps to just before the change). + charset -- charset of the HTML document + 'b'Returns from/to line lists with tabs expanded and newlines removed. + + Instead of tab characters being replaced by the number of spaces + needed to fill in to the next tab stop, this function will fill + the space with tab characters. This is done so that the difference + algorithms can identify changes in a file when tabs are replaced by + spaces and vice versa. At the end of the HTML generation, the tab + characters will be replaced with a nonbreakable space. + 'u'Returns from/to line lists with tabs expanded and newlines removed. + + Instead of tab characters being replaced by the number of spaces + needed to fill in to the next tab stop, this function will fill + the space with tab characters. This is done so that the difference + algorithms can identify changes in a file when tabs are replaced by + spaces and vice versa. At the end of the HTML generation, the tab + characters will be replaced with a nonbreakable space. + 'b'Builds list of text lines by splitting text lines at wrap point + + This function will determine if the input text line needs to be + wrapped (split) into separate lines. If so, the first wrap point + will be determined and the first line appended to the output + text line list. This function is used recursively to handle + the second part of the split line to further split it. + 'u'Builds list of text lines by splitting text lines at wrap point + + This function will determine if the input text line needs to be + wrapped (split) into separate lines. If so, the first wrap point + will be determined and the first line appended to the output + text line list. This function is used recursively to handle + the second part of the split line to further split it. + 'b'Returns iterator that splits (wraps) mdiff text lines'u'Returns iterator that splits (wraps) mdiff text lines'b'Collects mdiff output into separate lists + + Before storing the mdiff from/to data into a list, it is converted + into a single line of text with HTML markup. + 'u'Collects mdiff output into separate lists + + Before storing the mdiff from/to data into a list, it is converted + into a single line of text with HTML markup. + 'b'Returns HTML markup of "from" / "to" text lines + + side -- 0 or 1 indicating "from" or "to" text + flag -- indicates if difference on line + linenum -- line number (used for line number column) + text -- line text to be marked up + 'u'Returns HTML markup of "from" / "to" text lines + + side -- 0 or 1 indicating "from" or "to" text + flag -- indicates if difference on line + linenum -- line number (used for line number column) + text -- line text to be marked up + 'b'%d'u'%d'b' id="%s%s"'u' id="%s%s"'b' 'u' 'b'%s%s'u'%s%s'b'Create unique anchor prefixes'u'Create unique anchor prefixes'b'from%d_'u'from%d_'b'to%d_'u'to%d_'b'Makes list of "next" links'u'Makes list of "next" links'b' id="difflib_chg_%s_%d"'u' id="difflib_chg_%s_%d"'b'n'u'n'b' No Differences Found 'u' No Differences Found 'b' Empty File 'u' Empty File 'b'f'u'f'b't'u't'b'Returns HTML table of side by side comparison with change highlights + + Arguments: + fromlines -- list of "from" lines + tolines -- list of "to" lines + fromdesc -- "from" file column header string + todesc -- "to" file column header string + context -- set to True for contextual differences (defaults to False + which shows full differences). + numlines -- number of context lines. When context is set True, + controls number of lines displayed before and after the change. + When context is False, controls the number of lines to place + the "next" link anchors before the next change (so click of + "next" link jumps to just before the change). + 'u'Returns HTML table of side by side comparison with change highlights + + Arguments: + fromlines -- list of "from" lines + tolines -- list of "to" lines + fromdesc -- "from" file column header string + todesc -- "to" file column header string + context -- set to True for contextual differences (defaults to False + which shows full differences). + numlines -- number of context lines. When context is set True, + controls number of lines displayed before and after the change. + When context is False, controls the number of lines to place + the "next" link anchors before the next change (so click of + "next" link jumps to just before the change). + 'b' %s%s'u' %s%s'b'%s%s +'u'%s%s +'b' + +'u' + +'b'%s%s%s%s'u'%s%s%s%s'b'
'u'
'b'%s'u'%s'b'+'u'+'b''u''b'-'u'-'b''u''b'^'u'^'b''u''b''u''b' + Generate one of the two sequences that generated a delta. + + Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract + lines originating from file 1 or 2 (parameter `which`), stripping off line + prefixes. + + Examples: + + >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True), + ... 'ore\ntree\nemu\n'.splitlines(keepends=True)) + >>> diff = list(diff) + >>> print(''.join(restore(diff, 1)), end="") + one + two + three + >>> print(''.join(restore(diff, 2)), end="") + ore + tree + emu + 'u' + Generate one of the two sequences that generated a delta. + + Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract + lines originating from file 1 or 2 (parameter `which`), stripping off line + prefixes. + + Examples: + + >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True), + ... 'ore\ntree\nemu\n'.splitlines(keepends=True)) + >>> diff = list(diff) + >>> print(''.join(restore(diff, 1)), end="") + one + two + three + >>> print(''.join(restore(diff, 2)), end="") + ore + tree + emu + 'b'unknown delta choice (must be 1 or 2): %r'u'unknown delta choice (must be 1 or 2): %r'u'difflib'distutils.dir_util + +Utility functions for manipulating directories and directory trees.DistutilsInternalError_path_createdCreate a directory and any missing ancestor directories. + + If the directory already exists (or if 'name' is the empty string, which + means the current directory, which of course exists), then do nothing. + Raise DistutilsFileError if unable to create some directory along the way + (eg. some sub-path exists, but is a file rather than a directory). + If 'verbose' is true, print a one-line summary of each mkdir to stdout. + Return the list of directories actually created. + mkpath: 'name' must be a string (got %r)normpathcreated_dirstailsabs_headcreating %sEEXISTcould not create '%s': %screate_treebase_dirfilesCreate all the empty directories under 'base_dir' needed to put 'files' + there. + + 'base_dir' is just the name of a directory which doesn't necessarily + exist yet; 'files' is a list of filenames to be interpreted relative to + 'base_dir'. 'base_dir' + the directory portion of every file in 'files' + will be created if it doesn't already exist. 'mode', 'verbose' and + 'dry_run' flags are as for 'mkpath()'. + need_dircopy_treepreserve_modepreserve_timespreserve_symlinksCopy an entire directory tree 'src' to a new location 'dst'. + + Both 'src' and 'dst' must be directory names. If 'src' is not a + directory, raise DistutilsFileError. If 'dst' does not exist, it is + created with 'mkpath()'. The end result of the copy is that every + file in 'src' is copied to 'dst', and directories under 'src' are + recursively copied to 'dst'. Return the list of files that were + copied or might have been copied, using their output name. The + return value is unaffected by 'update' or 'dry_run': it is simply + the list of all files under 'src', with the names changed to be + under 'dst'. + + 'preserve_mode' and 'preserve_times' are the same as for + 'copy_file'; note that they only apply to regular files, not to + directories. If 'preserve_symlinks' is true, symlinks will be + copied as symlinks (on platforms that support them!); otherwise + (the default), the destination of the symlink will be copied. + 'update' and 'verbose' are the same as for 'copy_file'. + copy_filecannot copy tree '%s': not a directoryerror listing files in '%s': %soutputsdst_name.nfsislinkreadlinklink_destlinking %s -> %s_build_cmdtuplecmdtuplesHelper for remove_tree().real_fremove_treeRecursively remove an entire directory tree. + + Any errors are ignored (apart from being reported to stdout if 'verbose' + is true). + removing '%s' (and everything under it)error removing %s: %sensure_relativeTake the full path 'path', and make it a relative path. + + This is useful to make 'path' the second argument to os.path.join(). + drive# cache for by mkpath() -- in addition to cheapening redundant calls,# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode# I don't use os.makedirs because a) it's new to Python 1.5.2, and# b) it blows up if the directory already exists (I want to silently# succeed in that case).# Detect a common bug -- name is None# XXX what's the better way to handle verbosity? print as we create# each directory in the path (the current behaviour), or only announce# the creation of the whole path? (quite easy to do the latter since# we're not using a recursive algorithm)# stack of lone dirs to create# push next higher dir onto stack# now 'head' contains the deepest directory that already exists# (that is, the child of 'head' in 'name' is the highest directory# that does *not* exist)#print "head = %s, d = %s: " % (head, d),# First get the list of directories to create# Now create them# skip NFS rename files# remove dir from cache if it's already thereb'distutils.dir_util + +Utility functions for manipulating directories and directory trees.'u'distutils.dir_util + +Utility functions for manipulating directories and directory trees.'b'Create a directory and any missing ancestor directories. + + If the directory already exists (or if 'name' is the empty string, which + means the current directory, which of course exists), then do nothing. + Raise DistutilsFileError if unable to create some directory along the way + (eg. some sub-path exists, but is a file rather than a directory). + If 'verbose' is true, print a one-line summary of each mkdir to stdout. + Return the list of directories actually created. + 'u'Create a directory and any missing ancestor directories. + + If the directory already exists (or if 'name' is the empty string, which + means the current directory, which of course exists), then do nothing. + Raise DistutilsFileError if unable to create some directory along the way + (eg. some sub-path exists, but is a file rather than a directory). + If 'verbose' is true, print a one-line summary of each mkdir to stdout. + Return the list of directories actually created. + 'b'mkpath: 'name' must be a string (got %r)'u'mkpath: 'name' must be a string (got %r)'b'creating %s'u'creating %s'b'could not create '%s': %s'u'could not create '%s': %s'b'Create all the empty directories under 'base_dir' needed to put 'files' + there. + + 'base_dir' is just the name of a directory which doesn't necessarily + exist yet; 'files' is a list of filenames to be interpreted relative to + 'base_dir'. 'base_dir' + the directory portion of every file in 'files' + will be created if it doesn't already exist. 'mode', 'verbose' and + 'dry_run' flags are as for 'mkpath()'. + 'u'Create all the empty directories under 'base_dir' needed to put 'files' + there. + + 'base_dir' is just the name of a directory which doesn't necessarily + exist yet; 'files' is a list of filenames to be interpreted relative to + 'base_dir'. 'base_dir' + the directory portion of every file in 'files' + will be created if it doesn't already exist. 'mode', 'verbose' and + 'dry_run' flags are as for 'mkpath()'. + 'b'Copy an entire directory tree 'src' to a new location 'dst'. + + Both 'src' and 'dst' must be directory names. If 'src' is not a + directory, raise DistutilsFileError. If 'dst' does not exist, it is + created with 'mkpath()'. The end result of the copy is that every + file in 'src' is copied to 'dst', and directories under 'src' are + recursively copied to 'dst'. Return the list of files that were + copied or might have been copied, using their output name. The + return value is unaffected by 'update' or 'dry_run': it is simply + the list of all files under 'src', with the names changed to be + under 'dst'. + + 'preserve_mode' and 'preserve_times' are the same as for + 'copy_file'; note that they only apply to regular files, not to + directories. If 'preserve_symlinks' is true, symlinks will be + copied as symlinks (on platforms that support them!); otherwise + (the default), the destination of the symlink will be copied. + 'update' and 'verbose' are the same as for 'copy_file'. + 'u'Copy an entire directory tree 'src' to a new location 'dst'. + + Both 'src' and 'dst' must be directory names. If 'src' is not a + directory, raise DistutilsFileError. If 'dst' does not exist, it is + created with 'mkpath()'. The end result of the copy is that every + file in 'src' is copied to 'dst', and directories under 'src' are + recursively copied to 'dst'. Return the list of files that were + copied or might have been copied, using their output name. The + return value is unaffected by 'update' or 'dry_run': it is simply + the list of all files under 'src', with the names changed to be + under 'dst'. + + 'preserve_mode' and 'preserve_times' are the same as for + 'copy_file'; note that they only apply to regular files, not to + directories. If 'preserve_symlinks' is true, symlinks will be + copied as symlinks (on platforms that support them!); otherwise + (the default), the destination of the symlink will be copied. + 'update' and 'verbose' are the same as for 'copy_file'. + 'b'cannot copy tree '%s': not a directory'u'cannot copy tree '%s': not a directory'b'error listing files in '%s': %s'u'error listing files in '%s': %s'b'.nfs'u'.nfs'b'linking %s -> %s'u'linking %s -> %s'b'Helper for remove_tree().'u'Helper for remove_tree().'b'Recursively remove an entire directory tree. + + Any errors are ignored (apart from being reported to stdout if 'verbose' + is true). + 'u'Recursively remove an entire directory tree. + + Any errors are ignored (apart from being reported to stdout if 'verbose' + is true). + 'b'removing '%s' (and everything under it)'u'removing '%s' (and everything under it)'b'error removing %s: %s'u'error removing %s: %s'b'Take the full path 'path', and make it a relative path. + + This is useful to make 'path' the second argument to os.path.join(). + 'u'Take the full path 'path', and make it a relative path. + + This is useful to make 'path' the second argument to os.path.join(). + 'u'distutils.dir_util'u'dir_util'Disassembler of Python byte code into mnemonics.opcode_opcodes_allcode_infodisdisassembledistbdiscofindlinestartsfindlabelsshow_codeget_instructionsInstructionBytecode_have_codeFORMAT_VALUEFORMAT_VALUE_CONVERTERSMAKE_FUNCTIONkwdefaultsclosureMAKE_FUNCTION_FLAGS_try_compileAttempts to compile the given source, first as an expression and + then as a statement if the first approach fails. + + Utility function to accept strings in functions that otherwise + expect code objects + Disassemble classes, methods, functions, and other compiled objects. + + With no argument, disassemble the last traceback. + + Compiled objects currently include generator objects, async generator + objects, and coroutine objects, all of which store their code object + in a special attribute. + ag_codeDisassembly of %s:Sorry:co_code_disassemble_recursive_disassemble_bytes_disassemble_strdon't know how to disassemble %s objectsDisassemble a traceback (default: last traceback).no last traceback to disassembletb_lastiOPTIMIZEDNEWLOCALSVARARGSVARKEYWORDSNESTEDGENERATORNOFREECOROUTINEITERABLE_COROUTINEASYNC_GENERATORCOMPILER_FLAG_NAMESpretty_flagsReturn pretty representation of code flags._get_code_objectHelper to handle methods, compiled or raw code objects, and strings.Formatted details of methods, functions, or code._format_code_infoName: %sFilename: %sArgument count: %sco_argcountPositional-only arguments: %sco_posonlyargcountKw-only arguments: %sco_kwonlyargcountNumber of locals: %sco_nlocalsStack size: %sco_stacksizeFlags: %sco_constsConstants:i_c%4d: %rco_namesNames:i_n%4d: %sco_varnamesVariable names:co_freevarsFree variables:co_cellvarsCell variables:Print details of methods, functions, or code to *file*. + + If *file* is not provided, the output is printed on stdout. + _Instructionopname opcode arg argval argrepr offset starts_line is_jump_targetHuman readable name for operationopnameNumeric code for operationNumeric argument to operation (if any), otherwise NoneResolved arg value (if known), otherwise same as argargvalHuman readable description of operation argumentargreprStart index of operation within bytecode sequenceLine started by this opcode (if any), otherwise Nonestarts_lineTrue if other code jumps to here, otherwise Falseis_jump_target_OPNAME_WIDTH_OPARG_WIDTHDetails for a bytecode operation + + Defined fields: + opname - human readable name for operation + opcode - numeric code for operation + arg - numeric argument to operation (if any), otherwise None + argval - resolved arg value (if known), otherwise same as arg + argrepr - human readable description of operation argument + offset - start index of operation within bytecode sequence + starts_line - line started by this opcode (if any), otherwise None + is_jump_target - True if other code jumps to here, otherwise False + _disassemblelineno_widthmark_as_currentoffset_widthFormat instruction details for inclusion in disassembly output + + *lineno_width* sets the width of the line number field (0 omits it) + *mark_as_current* inserts a '-->' marker arrow as part of the line + *offset_width* sets the width of the instruction offset field + %%%ddlineno_fmt >>first_lineIterator for the opcodes in methods, functions or code + + Generates a series of Instruction named tuples giving the details of + each operations in the supplied code. + + If *first_line* is not None, it indicates the line number that should + be reported for the first source line in the disassembled code. + Otherwise, the source line information (if any) is taken directly from + the disassembled code object. + cell_nameslinestartsline_offset_get_instructions_bytes_get_const_infoconst_indexconst_listHelper to get optional details about const references + + Returns the dereferenced constant and its repr if the constant + list is defined. + Otherwise returns the constant index and its repr(). + _get_name_infoname_indexname_listHelper to get optional details about named references + + Returns the dereferenced name as both value and repr if the name + list is defined. + Otherwise returns the name index and its repr(). + varnamesIterate over the instructions in a bytecode string. + + Generates a sequence of Instruction namedtuples giving the details of each + opcode. Additional information about the code's runtime environment + (e.g. variable names, constants) can be specified using optional + arguments. + + labels_unpack_opargshasconsthasnamehasjrelto haslocalhascomparecmp_ophasfreewith formatlastiDisassemble a code object.Disassembly of %r:show_linenomaxlinenomaxoffset10000instrnew_source_lineis_current_instrCompile the source string, then disassemble the code object.extended_argHAVE_ARGUMENTEXTENDED_ARGDetect all offsets in a byte code which are jump targets. + + Return the list of offsets. + + hasjabsFind the offsets in a byte code which are start of lines in the source. + + Generate pairs (offset, lineno) as described in Python/compile.c. + + co_lnotabbyte_incrementsline_incrementsbytecode_lenlastlinenobyte_incrline_incr0x100The bytecode operations of a piece of code + + Instantiate this with a function, method, other compiled object, string of + code, or a code object (as returned by compile()). + + Iterating over this yields the bytecode operations as Instruction instances. + current_offsetcodeobj_line_offset_cell_names_linestarts_original_object{}({!r})from_traceback Construct a Bytecode from the given traceback Return formatted information about the code object.Return a formatted view of the bytecode operations.Simple test program to disassemble a file.infile# Extract functions from methods.# Extract compiled code objects from...# ...a function, or#...a generator object, or#...an asynchronous generator object, or#...a coroutine.# Perform the disassembly.# Class or module# Code object# Raw bytecode# Source code# The inspect module interrogates this dictionary to build its# list of CO_* constants. It is also used by pretty_flags to# turn the co_flags field into a human readable list.# Handle source code.# By now, if we don't have a code object, we can't disassemble x.# Column: Source code line number# Column: Current instruction indicator# Column: Jump target marker# Column: Instruction offset from start of code sequence# Column: Opcode name# Column: Opcode argument# Column: Opcode argument details# Set argval to the dereferenced value of the argument when# available, and argrepr to the string representation of argval.# _disassemble_bytes needs the string repr of the# raw name index for LOAD_GLOBAL, LOAD_CONST, etc.# Omit the line number column entirely if we have no line number info# XXX For backwards compatibility# The rest of the lnotab byte offsets are past the end of# the bytecode, so the lines were optimized away.# line_increments is an array of 8-bit signed integersb'Disassembler of Python byte code into mnemonics.'u'Disassembler of Python byte code into mnemonics.'b'code_info'u'code_info'b'dis'u'dis'b'disassemble'u'disassemble'b'distb'u'distb'b'disco'u'disco'b'findlinestarts'u'findlinestarts'b'findlabels'u'findlabels'b'show_code'u'show_code'b'get_instructions'u'get_instructions'b'Instruction'u'Instruction'b'Bytecode'u'Bytecode'b'FORMAT_VALUE'u'FORMAT_VALUE'b'MAKE_FUNCTION'u'MAKE_FUNCTION'b'defaults'b'kwdefaults'u'kwdefaults'b'closure'u'closure'b'Attempts to compile the given source, first as an expression and + then as a statement if the first approach fails. + + Utility function to accept strings in functions that otherwise + expect code objects + 'u'Attempts to compile the given source, first as an expression and + then as a statement if the first approach fails. + + Utility function to accept strings in functions that otherwise + expect code objects + 'b'Disassemble classes, methods, functions, and other compiled objects. + + With no argument, disassemble the last traceback. + + Compiled objects currently include generator objects, async generator + objects, and coroutine objects, all of which store their code object + in a special attribute. + 'u'Disassemble classes, methods, functions, and other compiled objects. + + With no argument, disassemble the last traceback. + + Compiled objects currently include generator objects, async generator + objects, and coroutine objects, all of which store their code object + in a special attribute. + 'b'__func__'u'__func__'b'__code__'u'__code__'b'ag_code'u'ag_code'b'Disassembly of %s:'u'Disassembly of %s:'b'Sorry:'u'Sorry:'b'co_code'u'co_code'b'don't know how to disassemble %s objects'u'don't know how to disassemble %s objects'b'Disassemble a traceback (default: last traceback).'u'Disassemble a traceback (default: last traceback).'b'no last traceback to disassemble'u'no last traceback to disassemble'b'OPTIMIZED'u'OPTIMIZED'b'NEWLOCALS'u'NEWLOCALS'b'VARARGS'u'VARARGS'b'VARKEYWORDS'u'VARKEYWORDS'b'NESTED'u'NESTED'b'GENERATOR'u'GENERATOR'b'NOFREE'u'NOFREE'b'COROUTINE'u'COROUTINE'b'ITERABLE_COROUTINE'u'ITERABLE_COROUTINE'b'ASYNC_GENERATOR'u'ASYNC_GENERATOR'b'Return pretty representation of code flags.'u'Return pretty representation of code flags.'b'Helper to handle methods, compiled or raw code objects, and strings.'u'Helper to handle methods, compiled or raw code objects, and strings.'b''u''b'Formatted details of methods, functions, or code.'u'Formatted details of methods, functions, or code.'b'Name: %s'u'Name: %s'b'Filename: %s'u'Filename: %s'b'Argument count: %s'u'Argument count: %s'b'Positional-only arguments: %s'u'Positional-only arguments: %s'b'Kw-only arguments: %s'u'Kw-only arguments: %s'b'Number of locals: %s'u'Number of locals: %s'b'Stack size: %s'u'Stack size: %s'b'Flags: %s'u'Flags: %s'b'Constants:'u'Constants:'b'%4d: %r'u'%4d: %r'b'Names:'u'Names:'b'%4d: %s'u'%4d: %s'b'Variable names:'u'Variable names:'b'Free variables:'u'Free variables:'b'Cell variables:'u'Cell variables:'b'Print details of methods, functions, or code to *file*. + + If *file* is not provided, the output is printed on stdout. + 'u'Print details of methods, functions, or code to *file*. + + If *file* is not provided, the output is printed on stdout. + 'b'_Instruction'u'_Instruction'b'opname opcode arg argval argrepr offset starts_line is_jump_target'u'opname opcode arg argval argrepr offset starts_line is_jump_target'b'Human readable name for operation'u'Human readable name for operation'b'Numeric code for operation'u'Numeric code for operation'b'Numeric argument to operation (if any), otherwise None'u'Numeric argument to operation (if any), otherwise None'b'Resolved arg value (if known), otherwise same as arg'u'Resolved arg value (if known), otherwise same as arg'b'Human readable description of operation argument'u'Human readable description of operation argument'b'Start index of operation within bytecode sequence'u'Start index of operation within bytecode sequence'b'Line started by this opcode (if any), otherwise None'u'Line started by this opcode (if any), otherwise None'b'True if other code jumps to here, otherwise False'u'True if other code jumps to here, otherwise False'b'Details for a bytecode operation + + Defined fields: + opname - human readable name for operation + opcode - numeric code for operation + arg - numeric argument to operation (if any), otherwise None + argval - resolved arg value (if known), otherwise same as arg + argrepr - human readable description of operation argument + offset - start index of operation within bytecode sequence + starts_line - line started by this opcode (if any), otherwise None + is_jump_target - True if other code jumps to here, otherwise False + 'u'Details for a bytecode operation + + Defined fields: + opname - human readable name for operation + opcode - numeric code for operation + arg - numeric argument to operation (if any), otherwise None + argval - resolved arg value (if known), otherwise same as arg + argrepr - human readable description of operation argument + offset - start index of operation within bytecode sequence + starts_line - line started by this opcode (if any), otherwise None + is_jump_target - True if other code jumps to here, otherwise False + 'b'Format instruction details for inclusion in disassembly output + + *lineno_width* sets the width of the line number field (0 omits it) + *mark_as_current* inserts a '-->' marker arrow as part of the line + *offset_width* sets the width of the instruction offset field + 'u'Format instruction details for inclusion in disassembly output + + *lineno_width* sets the width of the line number field (0 omits it) + *mark_as_current* inserts a '-->' marker arrow as part of the line + *offset_width* sets the width of the instruction offset field + 'b'%%%dd'u'%%%dd'b' 'u' 'b'>>'u'>>'b'Iterator for the opcodes in methods, functions or code + + Generates a series of Instruction named tuples giving the details of + each operations in the supplied code. + + If *first_line* is not None, it indicates the line number that should + be reported for the first source line in the disassembled code. + Otherwise, the source line information (if any) is taken directly from + the disassembled code object. + 'u'Iterator for the opcodes in methods, functions or code + + Generates a series of Instruction named tuples giving the details of + each operations in the supplied code. + + If *first_line* is not None, it indicates the line number that should + be reported for the first source line in the disassembled code. + Otherwise, the source line information (if any) is taken directly from + the disassembled code object. + 'b'Helper to get optional details about const references + + Returns the dereferenced constant and its repr if the constant + list is defined. + Otherwise returns the constant index and its repr(). + 'u'Helper to get optional details about const references + + Returns the dereferenced constant and its repr if the constant + list is defined. + Otherwise returns the constant index and its repr(). + 'b'Helper to get optional details about named references + + Returns the dereferenced name as both value and repr if the name + list is defined. + Otherwise returns the name index and its repr(). + 'u'Helper to get optional details about named references + + Returns the dereferenced name as both value and repr if the name + list is defined. + Otherwise returns the name index and its repr(). + 'b'Iterate over the instructions in a bytecode string. + + Generates a sequence of Instruction namedtuples giving the details of each + opcode. Additional information about the code's runtime environment + (e.g. variable names, constants) can be specified using optional + arguments. + + 'u'Iterate over the instructions in a bytecode string. + + Generates a sequence of Instruction namedtuples giving the details of each + opcode. Additional information about the code's runtime environment + (e.g. variable names, constants) can be specified using optional + arguments. + + 'b'to 'u'to 'b'with format'u'with format'b'Disassemble a code object.'u'Disassemble a code object.'b'Disassembly of %r:'u'Disassembly of %r:'b'Compile the source string, then disassemble the code object.'u'Compile the source string, then disassemble the code object.'b''u''b'Detect all offsets in a byte code which are jump targets. + + Return the list of offsets. + + 'u'Detect all offsets in a byte code which are jump targets. + + Return the list of offsets. + + 'b'Find the offsets in a byte code which are start of lines in the source. + + Generate pairs (offset, lineno) as described in Python/compile.c. + + 'u'Find the offsets in a byte code which are start of lines in the source. + + Generate pairs (offset, lineno) as described in Python/compile.c. + + 'b'The bytecode operations of a piece of code + + Instantiate this with a function, method, other compiled object, string of + code, or a code object (as returned by compile()). + + Iterating over this yields the bytecode operations as Instruction instances. + 'u'The bytecode operations of a piece of code + + Instantiate this with a function, method, other compiled object, string of + code, or a code object (as returned by compile()). + + Iterating over this yields the bytecode operations as Instruction instances. + 'b'{}({!r})'u'{}({!r})'b' Construct a Bytecode from the given traceback 'u' Construct a Bytecode from the given traceback 'b'Return formatted information about the code object.'u'Return formatted information about the code object.'b'Return a formatted view of the bytecode operations.'u'Return a formatted view of the bytecode operations.'b'Simple test program to disassemble a file.'u'Simple test program to disassemble a file.'b'infile'u'infile'Module doctest -- a framework for running examples in docstrings. + +In simplest use, end each module M to be tested with: + +def _test(): + import doctest + doctest.testmod() + +if __name__ == "__main__": + _test() + +Then running the module as a script will cause the examples in the +docstrings to get executed and verified: + +python M.py + +This won't display anything unless an example fails, in which case the +failing example(s) and the cause(s) of the failure(s) are printed to stdout +(why not stderr? because stderr is a lame hack <0.2 wink>), and the final +line of output is "Test failed.". + +Run it with the -v switch instead: + +python M.py -v + +and a detailed report of all examples tried is printed to stdout, along +with assorted summaries at the end. + +You can force verbose mode by passing "verbose=True" to testmod, or prohibit +it by passing "verbose=False". In either of those cases, sys.argv is not +examined by testmod. + +There are a variety of other ways to run doctests, including integration +with the unittest framework, and support for running non-Python text +files containing doctests. There are also many ways to override parts +of doctest's default behaviors. See the Library Reference Manual for +details. +reStructuredText en__docformat__register_optionflagDONT_ACCEPT_TRUE_FOR_1DONT_ACCEPT_BLANKLINENORMALIZE_WHITESPACEELLIPSISSKIPIGNORE_EXCEPTION_DETAILCOMPARISON_FLAGSREPORT_UDIFFREPORT_CDIFFREPORT_NDIFFREPORT_ONLY_FIRST_FAILUREREPORTING_FLAGSFAIL_FASTExampleDocTestDocTestParserDocTestFinderDocTestRunnerOutputCheckerDocTestFailureUnexpectedExceptionDebugRunnertestfilerun_docstring_examplesDocTestSuiteDocFileSuiteset_unittest_reportflagsscript_from_examplestestsourcedebug_srcpdbTestResultsfailed attemptedOPTIONFLAGS_BY_NAMEBLANKLINE_MARKERELLIPSIS_MARKER_extract_future_flagsglobs + Return the compiler-flags associated with the future features that + have been imported into the given namespace (globs). + _normalize_module + Return the module specified by `module`. In particular: + - If `module` is a module, then return module. + - If `module` is a string, then import and return the + module with that name. + - If `module` is None, then return the calling module. + The calling module is assumed to be the module of + the stack frame at the given depth in the call stack. + ismoduleExpected a module, string, or None_newline_convert_load_testfilemodule_relative_module_relative_pathfile_contents + Add the given number of space characters to the beginning of + every non-blank line in `s`, and return the result. + (?m)^(?!$)_exception_traceback + Return a string containing a traceback message for the given + exc_info tuple (as returned by sys.exc_info()). + excout_SpoofOut_ellipsis_matchwantgot + Essentially the only subtle case: + >>> _ellipsis_match('aa...aa', 'aaa') + False + startposendpos_comment_lineReturn a commented form of the given line_strip_exception_details_OutputRedirectingPdbPdb + A specialized version of the python debugger that redirects stdout + to a given stream when interacting with the user. Stdout is *not* + redirected when traced code is executed. + __out__debugger_usednosigintsave_stdouttest_pathExpected a module: %rModule-relative files may not have absolute pathsbasedirfullpathCan't resolve paths relative to the module %r (it has no __file__)"Can't resolve paths relative to the module ""%r (it has no __file__)" + A single doctest example, consisting of source code and expected + output. `Example` defines the following attributes: + + - source: A single Python statement, always ending with a newline. + The constructor adds a newline if needed. + + - want: The expected output from running the source code (either + from stdout, or a traceback in case of exception). `want` ends + with a newline unless it's empty, in which case it's an empty + string. The constructor adds a newline if needed. + + - exc_msg: The exception message generated by the example, if + the example is expected to generate an exception; or `None` if + it is not expected to generate an exception. This exception + message is compared against the return value of + `traceback.format_exception_only()`. `exc_msg` ends with a + newline unless it's `None`. The constructor adds a newline + if needed. + + - lineno: The line number within the DocTest string containing + this Example where the Example begins. This line number is + zero-based, with respect to the beginning of the DocTest. + + - indent: The example's indentation in the DocTest string. + I.e., the number of space characters that precede the + example's first prompt. + + - options: A dictionary mapping from option flags to True or + False, which is used to override default options for this + example. Any option flags not contained in this dictionary + are left at their default value (as specified by the + DocTestRunner's optionflags). By default, no options are set. + exc_msg + A collection of doctest examples that should be run in a single + namespace. Each `DocTest` defines the following attributes: + + - examples: the list of examples. + + - globs: The namespace (aka globals) that the examples should + be run in. + + - name: A name identifying the DocTest (typically, the name of + the object whose docstring this DocTest was extracted from). + + - filename: The name of the file that this DocTest was extracted + from, or `None` if the filename is unknown. + + - lineno: The line number within filename where this DocTest + begins, or `None` if the line number is unavailable. This + line number is zero-based, with respect to the beginning of + the file. + + - docstring: The string that the examples were extracted from, + or `None` if the string is unavailable. + examplesdocstring + Create a new DocTest containing the given examples. The + DocTest's globals are initialized with a copy of `globs`. + DocTest no longer accepts str; use DocTestParser insteadno examples1 example%d examples<%s %s from %s:%s (%s)> + A class used to parse strings containing doctest examples. + + # Source consists of a PS1 line followed by zero or more PS2 lines. + (?P + (?:^(?P [ ]*) >>> .*) # PS1 line + (?:\n [ ]* \.\.\. .*)*) # PS2 lines + \n? + # Want consists of any non-blank lines that do not start with PS1. + (?P (?:(?![ ]*$) # Not a blank line + (?![ ]*>>>) # Not a line starting with PS1 + .+$\n? # But any other line + )*) + r'''MULTILINE_EXAMPLE_RE + # Grab the traceback header. Different versions of Python have + # said different things on the first traceback line. + ^(?P Traceback\ \( + (?: most\ recent\ call\ last + | innermost\ last + ) \) : + ) + \s* $ # toss trailing whitespace on the header. + (?P .*?) # don't blink: absorb stuff until... + ^ (?P \w+ .*) # a line *starts* with alphanum. + _EXCEPTION_RE^[ ]*(#.*)?$_IS_BLANK_OR_COMMENT + Divide the given string into examples and intervening text, + and return them as a list of alternating Examples and strings. + Line numbers for the Examples are 0-based. The optional + argument `name` is a name identifying this string, and is only + used for error messages. + _min_indentmin_indentcharno_parse_exampleget_doctest + Extract all doctest examples from the given string, and + collect them into a `DocTest` object. + + `globs`, `name`, `filename`, and `lineno` are attributes for + the new `DocTest` object. See the documentation for `DocTest` + for more information. + get_examples + Extract all doctest examples from the given string, and return + them as a list of `Example` objects. Line numbers are + 0-based, because it's most common in doctests that nothing + interesting appears on the same line as opening triple-quote, + and so the first interesting line is called "line 1" then. + + The optional argument `name` is a name identifying this + string, and is only used for error messages. + + Given a regular expression match from `_EXAMPLE_RE` (`m`), + return a pair `(source, want)`, where `source` is the matched + example's source code (with prompts and indentation stripped); + and `want` is the example's expected output (with indentation + stripped). + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + source_lines_check_prompt_blank_check_prefixslwant_lines *$wl_find_options#\s*doctest:\s*([^\n\'"]*)$_OPTION_DIRECTIVE_RE + Return a dictionary containing option overrides extracted from + option directives in the given source string. + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + line %r of the doctest for %s has an invalid option: %r'line %r of the doctest for %s ''has an invalid option: %r'line %r of the doctest for %s has an option directive on a line with no example: %r'line %r of the doctest for %s has an option ''directive on a line with no example: %r'^([ ]*)(?=\S)_INDENT_REReturn the minimum indentation of any non-blank line in `s`indents + Given the lines of a source string (including prompts and + leading indentation), check to make sure that every prompt is + followed by a space character. If any line is not followed by + a space character, then raise ValueError. + line %r of the docstring for %s lacks blank after %s: %r'line %r of the docstring for %s ''lacks blank after %s: %r' + Check that every line in the given list starts with the given + prefix; if any line does not, then raise a ValueError. + line %r of the docstring for %s has inconsistent leading whitespace: %r'line %r of the docstring for %s has ''inconsistent leading whitespace: %r' + A class used to extract the DocTests that are relevant to a given + object, from its docstring and the docstrings of its contained + objects. Doctests can currently be extracted from the following + object types: modules, functions, classes, methods, staticmethods, + classmethods, and properties. + recurseexclude_empty + Create a new doctest finder. + + The optional argument `parser` specifies a class or + function that should be used to create new DocTest objects (or + objects that implement the same interface as DocTest). The + signature for this factory function should match the signature + of the DocTest constructor. + + If the optional argument `recurse` is false, then `find` will + only examine the given object, and not any contained objects. + + If the optional argument `exclude_empty` is false, then `find` + will include tests for objects with empty docstrings. + _verbose_recurse_exclude_emptyextraglobs + Return a list of the DocTests that are defined by the given + object's docstring, or by any of its contained objects' + docstrings. + + The optional parameter `module` is the module that contains + the given object. If the module is not specified or is None, then + the test finder will attempt to automatically determine the + correct module. The object's module is used: + + - As a default namespace, if `globs` is not specified. + - To prevent the DocTestFinder from extracting DocTests + from objects that are imported from other modules. + - To find the name of the file containing the object. + - To help find the line number of the object within its + file. + + Contained objects whose module does not match `module` are ignored. + + If `module` is False, no attempt to find the module will be made. + This is obscure, of use mostly in tests: if `module` is False, or + is None but cannot be found automatically, then all objects are + considered to belong to the (non-existent) module, so all contained + objects will (recursively) be searched for doctests. + + The globals for each DocTest is formed by combining `globs` + and `extraglobs` (bindings in `extraglobs` override bindings + in `globs`). A new copy of the globals dictionary is created + for each DocTest. If `globs` is not specified, then it + defaults to the module's `__dict__`, if specified, or {} + otherwise. If `extraglobs` is not specified, then it defaults + to {}. + + DocTestFinder.find: name must be given when obj.__name__ doesn't exist: %r"DocTestFinder.find: name must be given ""when obj.__name__ doesn't exist: %r"getmodulegetsourcefilegetfile<]>getlines_find_from_module + Return true if the given object is defined in the given + module. + isfunctionismethoddescriptorobj_modisclassobject must be a class or function + Find tests for the given object and any contained objects, and + add them to `tests`. + Finding tests in %s_get_testvalnameisroutineunwrap__test__DocTestFinder.find: __test__ keys must be strings: %r"DocTestFinder.find: __test__ keys ""must be strings: %r"DocTestFinder.find: __test__ values must be strings, functions, methods, classes, or modules: %r"DocTestFinder.find: __test__ values ""must be strings, functions, methods, ""classes, or modules: %r"%s.__test__.%s + Return a DocTest for the given object, if it defines a docstring; + otherwise, return None. + _find_lineno + Return a line number of the given object's docstring. Note: + this method assumes that the object has a docstring. + ^\s*class\s*%s\bismethodistracebackisframeiscode(^|.*:)\s*\w*("|\') + A class used to run DocTest test cases, and accumulate statistics. + The `run` method is used to process a single DocTest case. It + returns a tuple `(f, t)`, where `t` is the number of test cases + tried, and `f` is the number of test cases that failed. + + >>> tests = DocTestFinder().find(_TestClass) + >>> runner = DocTestRunner(verbose=False) + >>> tests.sort(key = lambda test: test.name) + >>> for test in tests: + ... print(test.name, '->', runner.run(test)) + _TestClass -> TestResults(failed=0, attempted=2) + _TestClass.__init__ -> TestResults(failed=0, attempted=2) + _TestClass.get -> TestResults(failed=0, attempted=2) + _TestClass.square -> TestResults(failed=0, attempted=1) + + The `summarize` method prints a summary of all the test cases that + have been run by the runner, and returns an aggregated `(f, t)` + tuple: + + >>> runner.summarize(verbose=1) + 4 items passed all tests: + 2 tests in _TestClass + 2 tests in _TestClass.__init__ + 2 tests in _TestClass.get + 1 tests in _TestClass.square + 7 tests in 4 items. + 7 passed and 0 failed. + Test passed. + TestResults(failed=0, attempted=7) + + The aggregated number of tried examples and failed examples is + also available via the `tries` and `failures` attributes: + + >>> runner.tries + 7 + >>> runner.failures + 0 + + The comparison between expected outputs and actual outputs is done + by an `OutputChecker`. This comparison may be customized with a + number of option flags; see the documentation for `testmod` for + more information. If the option flags are insufficient, then the + comparison may also be customized by passing a subclass of + `OutputChecker` to the constructor. + + The test runner's display output can be controlled in two ways. + First, an output function (`out) can be passed to + `TestRunner.run`; this function will be called with strings that + should be displayed. It defaults to `sys.stdout.write`. If + capturing the output is not sufficient, then the display output + can be also customized by subclassing DocTestRunner, and + overriding the methods `report_start`, `report_success`, + `report_unexpected_exception`, and `report_failure`. + DIVIDERchecker + Create a new test runner. + + Optional keyword arg `checker` is the `OutputChecker` that + should be used to compare the expected outputs and actual + outputs of doctest examples. + + Optional keyword arg 'verbose' prints lots of stuff if true, + only failures if false; by default, it's true iff '-v' is in + sys.argv. + + Optional argument `optionflags` can be used to control how the + test runner compares expected output to actual output, and how + it displays failures. See the documentation for `testmod` for + more information. + _checker-voriginal_optionflagstries_name2ft_fakeoutreport_startexample + Report that the test runner is about to process the given + example. (Only displays a message if verbose=True) + Trying: +Expecting: +Expecting nothing +report_success + Report that the given example ran successfully. (Only + displays a message if verbose=True) + ok +report_failure + Report that the given example failed. + _failure_headeroutput_differencereport_unexpected_exception + Report that the given example raised an unexpected exception. + Exception raised: +File "%s", line %s, in %sLine %s, in %sFailed example:__runcompileflags + Run the examples in `test`. Write the outcome of each example + with one of the `DocTestRunner.report_*` methods, using the + writer function `out`. `compileflags` is the set of compiler + flags that should be used to execute examples. Return a tuple + `(f, t)`, where `t` is the number of examples tried, and `f` + is the number of examples that failed. The examples are run + in the namespace `test.globs`. + SUCCESSBOOMexamplenumoptionflagdebuggerunknown outcome__record_outcome + Record the fact that the given DocTest (`test`) generated `f` + failures out of `t` tried examples. + f2.+)\[(?P\d+)\]>$r'.+)'r'\[(?P\d+)\]>$'__LINECACHE_FILENAME_RE__patched_linecache_getlinesmodule_globalssave_linecache_getlinesclear_globs + Run the examples in `test`, and display the results using the + writer function `out`. + + The examples are run in the namespace `test.globs`. If + `clear_globs` is true (the default), then this namespace will + be cleared after the test runs, to help with garbage + collection. If you would like to examine the namespace after + the test completes, then use `clear_globs=False`. + + `compileflags` gives the set of flags that should be used by + the Python compiler when running the examples. If not + specified, then it will default to the set of future-import + flags that apply to `globs`. + + The output of each example is checked using + `DocTestRunner.check_output`, and the results are formatted by + the `DocTestRunner.report_*` methods. + save_tracesave_set_tracesave_displayhooksummarize + Print a summary of all the test cases that have been run by + this DocTestRunner, and return a tuple `(f, t)`, where `f` is + the total number of failed examples, and `t` is the total + number of tried examples. + + The optional `verbose` argument controls how detailed the + summary is. If the verbosity is not specified, then the + DocTestRunner's verbosity is used. + notestspassedfailedtotalttotalfitems had no tests:items passed all tests: %3d tests in %sitems had failures: %3d of %3d in %stests initems.passed andfailed.***Test Failed***failures.Test passed.merge + A class used to check the whether the actual output from a doctest + example matches the expected output. `OutputChecker` defines two + methods: `check_output`, which compares a given pair of outputs, + and returns true if they match; and `output_difference`, which + returns a string describing the differences between two outputs. + _toAscii + Convert string to hex-escaped ASCII string. + + Return True iff the actual output from an example (`got`) + matches the expected output (`want`). These strings are + always considered to match if they are identical; but + depending on what option flags the test runner is using, + several non-exact match types are also possible. See the + documentation for `TestRunner` for more information about + option flags. + True +1 +False +0 +(?m)^%s\s*?$(?m)^[^\S\n]+$_do_a_fancy_diff + Return a string describing the differences between the + expected output for a given example (`example`) and the actual + output (`got`). `optionflags` is the set of option flags used + to compare `want` and `got`. + (?m)^[ ]*(?= +)got_linesunified diff with -expected +actualcontext diff with expected followed by actualenginendiff with -expected +actualBad diff optionDifferences (%s): +Expected: +%sGot: +%sExpected: +%sGot nothing +Expected nothing +Got: +%sExpected nothing +Got nothing +A DocTest example has failed in debugging mode. + + The exception instance has variables: + + - test: the DocTest object being run + + - example: the Example object that failed + + - got: the actual output + A DocTest example has encountered an unexpected exception + + The exception instance has variables: + + - test: the DocTest object being run + + - example: the Example object that failed + + - exc_info: the exception info + Run doc tests but raise an exception as soon as there is a failure. + + If an unexpected exception occurs, an UnexpectedException is raised. + It contains the test, the example, and the original exception: + + >>> runner = DebugRunner(verbose=False) + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> try: + ... runner.run(test) + ... except UnexpectedException as f: + ... failure = f + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[1] # Already has the traceback + Traceback (most recent call last): + ... + KeyError + + We wrap the original exception to give the calling application + access to the test and example information. + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> try: + ... runner.run(test) + ... except DocTestFailure as f: + ... failure = f + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + If a failure or error occurs, the globals are left intact: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 1} + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... >>> raise KeyError + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + Traceback (most recent call last): + ... + doctest.UnexpectedException: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 2} + + But the globals are cleared if there is no error: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + TestResults(failed=0, attempted=1) + + >>> test.globs + {} + + reportraise_on_errorm=None, name=None, globs=None, verbose=None, report=True, + optionflags=0, extraglobs=None, raise_on_error=False, + exclude_empty=False + + Test examples in docstrings in functions and classes reachable + from module m (or the current module if m is not supplied), starting + with m.__doc__. + + Also test examples reachable from dict m.__test__ if it exists and is + not None. m.__test__ maps names to functions, classes and strings; + function and class docstrings are tested even if the name is private; + strings are tested directly, as if they were docstrings. + + Return (#failures, #tests). + + See help(doctest) for an overview. + + Optional keyword arg "name" gives the name of the module; by default + use m.__name__. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use m.__dict__. A copy of this + dict is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. This is new in 2.4. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. This is new in 2.3. Possible values (see the + docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + SKIP + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + testmod: module required; %r + Test examples in the given file. Return (#failures, #tests). + + Optional keyword arg "module_relative" specifies how filenames + should be interpreted: + + - If "module_relative" is True (the default), then "filename" + specifies a module-relative path. By default, this path is + relative to the calling module's directory; but if the + "package" argument is specified, then it is relative to that + package. To ensure os-independence, "filename" should use + "/" characters to separate path segments, and should not + be an absolute path (i.e., it may not begin with "/"). + + - If "module_relative" is False, then "filename" specifies an + os-specific path. The path may be absolute or relative (to + the current working directory). + + Optional keyword arg "name" gives the name of the test; by default + use the file's basename. + + Optional keyword argument "package" is a Python package or the + name of a Python package whose directory should be used as the + base directory for a module relative filename. If no package is + specified, then the calling module's directory is used as the base + directory for module relative filenames. It is an error to + specify "package" if "module_relative" is False. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use {}. A copy of this dict + is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. Possible values (see the docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + SKIP + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Optional keyword arg "parser" specifies a DocTestParser (or + subclass) that should be used to extract tests from the files. + + Optional keyword arg "encoding" specifies an encoding that should + be used to convert the file to unicode. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + Package may only be specified for module-relative paths."Package may only be specified for module-""relative paths."NoName + Test examples in the given object's docstring (`f`), using `globs` + as globals. Optional argument `name` is used in failure messages. + If the optional argument `verbose` is true, then generate output + even if there are no failures. + + `compileflags` gives the set of flags that should be used by the + Python compiler when running the examples. If not specified, then + it will default to the set of future-import flags that apply to + `globs`. + + Optional keyword arg `optionflags` specifies options for the + testing and output. See the documentation for `testmod` for more + information. + _unittest_reportflagsSets the unittest option flags. + + The old flag is returned so that a runner could restore the old + value if it wished to: + + >>> import doctest + >>> old = doctest._unittest_reportflags + >>> doctest.set_unittest_reportflags(REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) == old + True + + >>> doctest._unittest_reportflags == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + + Only reporting flags can be set: + + >>> doctest.set_unittest_reportflags(ELLIPSIS) + Traceback (most recent call last): + ... + ValueError: ('Only reporting flags allowed', 8) + + >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + Only reporting flags allowedDocTestCase_dt_optionflags_dt_checker_dt_test_dt_setUp_dt_tearDownformat_failureunknown line numberlnameFailed doctest test for %s + File "%s", line %s, in %s + +%s'Failed doctest test for %s\n'' File "%s", line %s, in %s\n\n%s'Run the test case without results and without catching exceptions + + The unit test framework includes a debug method on test cases + and test suites to support post-mortem debugging. The test code + is run in such a way that errors are not caught. This way a + caller can catch the errors and initiate post-mortem debugging. + + The DocTestCase provides a debug method that raises + UnexpectedException errors if there is an unexpected + exception: + + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + >>> try: + ... case.debug() + ... except UnexpectedException as f: + ... failure = f + + The UnexpectedException contains the test, the example, and + the original exception: + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[1] # Already has the traceback + Traceback (most recent call last): + ... + KeyError + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + + >>> try: + ... case.debug() + ... except DocTestFailure as f: + ... failure = f + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + Doctest: SkipDocTestCaseDocTestSuite will not work with -O2 and abovetest_skipSkipping tests from %s_DocTestSuite_removeTestAtIndextest_finder + Convert doctest tests for a module to a unittest test suite. + + This converts each documentation string in a module that + contains doctest tests to a unittest test case. If any of the + tests in a doc string fail, then the test case fails. An exception + is raised showing the name of the file containing the test and a + (sometimes approximate) line number. + + The `module` argument provides the module to be tested. The argument + can be either a module or a module name. + + If no argument is given, the calling module is used. + + A number of options may be provided as keyword arguments: + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + DocFileCaseFailed doctest test for %s + File "%s", line 0 + +%sDocFileTestA unittest suite for one or more doctest files. + + The path to each doctest file is given as a string; the + interpretation of that string depends on the keyword argument + "module_relative". + + A number of options may be provided as keyword arguments: + + module_relative + If "module_relative" is True, then the given file paths are + interpreted as os-independent module-relative paths. By + default, these paths are relative to the calling module's + directory; but if the "package" argument is specified, then + they are relative to that package. To ensure os-independence, + "filename" should use "/" characters to separate path + segments, and may not be an absolute path (i.e., it may not + begin with "/"). + + If "module_relative" is False, then the given file paths are + interpreted as os-specific paths. These paths may be absolute + or relative (to the current working directory). + + package + A Python package or the name of a Python package whose directory + should be used as the base directory for module relative paths. + If "package" is not specified, then the calling module's + directory is used as the base directory for module relative + filenames. It is an error to specify "package" if + "module_relative" is False. + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + + parser + A DocTestParser (or subclass) that should be used to extract + tests from the files. + + encoding + An encoding that will be used to convert the files to unicode. + Extract script from text with examples. + + Converts text with examples to a Python script. Example input is + converted to regular code. Example output and all other words + are converted to comments: + + >>> text = ''' + ... Here are examples of simple math. + ... + ... Python has super accurate integer addition + ... + ... >>> 2 + 2 + ... 5 + ... + ... And very friendly error messages: + ... + ... >>> 1/0 + ... To Infinity + ... And + ... Beyond + ... + ... You can use logic if you want: + ... + ... >>> if 0: + ... ... blah + ... ... blah + ... ... + ... + ... Ho hum + ... ''' + + >>> print(script_from_examples(text)) + # Here are examples of simple math. + # + # Python has super accurate integer addition + # + 2 + 2 + # Expected: + ## 5 + # + # And very friendly error messages: + # + 1/0 + # Expected: + ## To Infinity + ## And + ## Beyond + # + # You can use logic if you want: + # + if 0: + blah + blah + # + # Ho hum + + piece# Expected:## Extract the test sources from a doctest docstring as a script. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the doc string with tests to be debugged. + not found in teststestsrcpmDebug a single doctest docstring, in argument `src`'debug_scriptDebug a test script. `src` is the script, as a string.interactionexec(%r)Debug a single doctest docstring. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the docstring with tests to be debugged. + _TestClass + A pointless class, for sanity-checking of docstring testing. + + Methods: + square() + get() + + >>> _TestClass(13).get() + _TestClass(-12).get() + 1 + >>> hex(_TestClass(13).square().get()) + '0xa9' + val -> _TestClass object with associated value val. + + >>> t = _TestClass(123) + >>> print(t.get()) + 123 + squaresquare() -> square TestClass's associated value + + >>> _TestClass(13).square().get() + 169 + get() -> return TestClass's associated value. + + >>> x = _TestClass(-42) + >>> print(x.get()) + -42 + + Example of a string object, searched as-is. + >>> x = 1; y = 2 + >>> x + y, x * y + (3, 2) + + In 2.2, boolean expressions displayed + 0 or 1. By default, we still accept + them. This can be disabled by passing + DONT_ACCEPT_TRUE_FOR_1 to the new + optionflags argument. + >>> 4 == 4 + 1 + >>> 4 == 4 + True + >>> 4 > 4 + 0 + >>> 4 > 4 + False + bool-int equivalence + Blank lines can be marked with : + >>> print('foo\n\nbar\n') + foo + + bar + + blank lines + If the ellipsis flag is used, then '...' can be used to + elide substrings in the desired output: + >>> print(list(range(1000))) #doctest: +ELLIPSIS + [0, 1, 2, ..., 999] + + If the whitespace normalization flag is used, then + differences in whitespace are ignored. + >>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29] + whitespace normalizationdoctest runner--verboseprint very verbose output for all tests-o--optionspecify a doctest option flag to apply to the test run; may be specified more than once to apply multiple options'specify a doctest option flag to apply'' to the test run; may be specified more'' than once to apply multiple options'-f--fail-faststop running tests after first failure (this is a shorthand for -o FAIL_FAST, and is in addition to any other -o options)'stop running tests after first failure (this'' is a shorthand for -o FAIL_FAST, and is'' in addition to any other -o options)'file containing the tests to runtestfilesfail_fast# Module doctest.# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).# Major enhancements and refactoring by:# Jim Fulton# Edward Loper# Provided as-is; use at your own risk; no warranty; no promises; enjoy!# 0, Option Flags# 1. Utility Functions# 2. Example & DocTest# 3. Doctest Parser# 4. Doctest Finder# 5. Doctest Runner# 6. Test Functions# 7. Unittest Support# 8. Debugging Support# There are 4 basic classes:# - Example: a pair, plus an intra-docstring line number.# - DocTest: a collection of examples, parsed from a docstring, plus# info about where the docstring came from (name, filename, lineno).# - DocTestFinder: extracts DocTests from a given object's docstring and# its contained objects' docstrings.# - DocTestRunner: runs DocTest cases, and accumulates statistics.# So the basic picture is:# list of:# +------+ +---------+ +-------+# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|# | Example |# | ... |# +---------+# Option constants.# Create a new flag unless `name` is already known.# Special string markers for use in `want` strings:######################################################################## Table of Contents# 1. Utility Functions# 2. Example & DocTest -- store test cases# 3. DocTest Parser -- extracts examples from strings# 4. DocTest Finder -- extracts test cases from objects# 5. DocTest Runner -- runs test cases# 6. Test Functions -- convenient wrappers for testing# 7. Unittest Support# 8. Debugging Support# 9. Example Usage## 1. Utility Functions# We have two cases to cover and we need to make sure we do# them in the right order# get_data() opens files as 'rb', so one must do the equivalent# conversion as universal newlines would do.# This regexp matches the start of non-blank lines:# Get a traceback message.# Override some StringIO methods.# If anything at all was written, make sure there's a trailing# newline. There's no way for the expected output to indicate# that a trailing newline is missing.# Worst-case linear-time ellipsis matching.# Find "the real" strings.# Deal with exact matches possibly needed at one or both ends.# starts with exact match# ends with exact match# Exact end matches required more characters than we have, as in# _ellipsis_match('aa...aa', 'aaa')# For the rest, we only need to find the leftmost non-overlapping# match for each piece. If there's no overall match that way alone,# there's no overall match period.# w may be '' at times, if there are consecutive ellipses, or# due to an ellipsis at the start or end of `want`. That's OK.# Search for an empty string succeeds, and doesn't change startpos.# Support for IGNORE_EXCEPTION_DETAIL.# Get rid of everything except the exception name; in particular, drop# the possibly dotted module path (if any) and the exception message (if# any). We assume that a colon is never part of a dotted name, or of an# exception name.# E.g., given# "foo.bar.MyError: la di da"# return "MyError"# Or for "abc.def" or "abc.def:\n" return "def".# The exception name must appear on the first line.# retain up to the first colon (if any)# retain just the exception name# do not play signal games in the pdb# still use input() to get user input# Calling set_continue unconditionally would break unit test# coverage reporting, as Bdb.set_continue calls sys.settrace(None).# Redirect stdout to the given stream.# Call Pdb's trace dispatch method.# [XX] Normalize with respect to os.path.pardir?# Normalize the path. On Windows, replace "/" with "\".# Find the base directory for the path.# A normal module/package# An interactive session.# A module w/o __file__ (this includes builtins)# Combine the base directory and the test path.## 2. Example & DocTest## - An "example" is a pair, where "source" is a## fragment of source code, and "want" is the expected output for## "source." The Example class also includes information about## where the example was extracted from.## - A "doctest" is a collection of examples, typically extracted from## a string (such as an object's docstring). The DocTest class also## includes information about where the string was extracted from.# Normalize inputs.# Store properties.# This lets us sort tests by name:## 3. DocTestParser# This regular expression is used to find doctest examples in a# string. It defines three groups: `source` is the source code# (including leading indentation and prompts); `indent` is the# indentation of the first (PS1) line of the source code; and# `want` is the expected output (including leading indentation).# A regular expression for handling `want` strings that contain# expected exceptions. It divides `want` into three pieces:# - the traceback header line (`hdr`)# - the traceback stack (`stack`)# - the exception message (`msg`), as generated by# traceback.format_exception_only()# `msg` may have multiple lines. We assume/require that the# exception message is the first non-indented line starting with a word# character following the traceback header line.# A callable returning a true value iff its argument is a blank line# or contains a single comment.# If all lines begin with the same indentation, then strip it.# Find all doctest examples in the string:# Add the pre-example text to `output`.# Update lineno (lines before this example)# Extract info from the regexp match.# Create an Example, and add it to the list.# Update lineno (lines inside this example)# Update charno.# Add any remaining post-example text to `output`.# Get the example's indentation level.# Divide source into lines; check that they're properly# indented; and then strip their indentation & prompts.# Divide want into lines; check that it's properly indented; and# then strip the indentation. Spaces before the last newline should# be preserved, so plain rstrip() isn't good enough.# forget final newline & spaces after it# If `want` contains a traceback message, then extract it.# Extract options from the source.# This regular expression looks for option directives in the# source code of an example. Option directives are comments# starting with "doctest:". Warning: this may give false# positives for string-literals that contain the string# "#doctest:". Eliminating these false positives would require# actually parsing the string; but we limit them by ignoring any# line containing "#doctest:" that is *followed* by a quote mark.# (note: with the current regexp, this will match at most once:)# This regular expression finds the indentation of every non-blank# line in a string.## 4. DocTest Finder# If name was not specified, then extract it from the object.# Find the module that contains the given object (if obj is# a module, then module=obj.). Note: this may fail, in which# case module will be None.# Read the module's source code. This is used by# DocTestFinder._find_lineno to find the line number for a# given object's docstring.# Check to see if it's one of our special internal "files"# (see __patched_linecache_getlines).# Supply the module globals in case the module was# originally loaded via a PEP 302 loader and# file is not a valid filesystem path# No access to a loader, so assume it's a normal# filesystem path# Initialize globals, and merge in extraglobs.# provide a default module name# Recursively explore `obj`, extracting DocTests.# Sort the tests by alpha order of names, for consistency in# verbose-mode output. This was a feature of doctest in Pythons# <= 2.3 that got lost by accident in 2.4. It was repaired in# 2.4.4 and 2.5.# [XX] no easy way to tell otherwise# [XX] no way not be sure.# If we've already processed this object, then ignore it.# Find a test for this object, and add it to the list of tests.# Look for tests in a module's contained objects.# Recurse to functions & classes.# Look for tests in a module's __test__ dictionary.# Look for tests in a class's contained objects.# Special handling for staticmethod/classmethod.# Recurse to methods, properties, and nested classes.# Extract the object's docstring. If it doesn't have one,# then return None (no test for this object).# Find the docstring's location in the file.# Don't bother if the docstring is empty.# Return a DocTest for this object.# __file__ can be None for namespace packages.# Find the line number for modules.# Find the line number for classes.# Note: this could be fooled if a class is defined multiple# times in a single file.# Find the line number for functions & methods.# Find the line number where the docstring starts. Assume# that it's the first line that begins with a quote mark.# Note: this could be fooled by a multiline function# signature, where a continuation line begins with a quote# mark.# We couldn't find the line number.## 5. DocTest Runner# This divider string is used to separate failure messages, and to# separate sections of the summary.# Keep track of the examples we've run.# Create a fake output target for capturing doctest output.#/////////////////////////////////////////////////////////////////# Reporting methods# DocTest Running# Keep track of the number of failures and tries.# Save the option flags (since option directives can be used# to modify them).# `outcome` state# Process each example.# If REPORT_ONLY_FIRST_FAILURE is set, then suppress# reporting after the first failure.# Merge in the example's options.# If 'SKIP' is set, then skip this example.# Record that we started this example.# Use a special filename for compile(), so we can retrieve# the source code during interactive debugging (see# __patched_linecache_getlines).# Run the example in the given context (globs), and record# any exception that gets raised. (But don't intercept# keyboard interrupts.)# Don't blink! This is where the user's code gets run.# ==== Example Finished ====# the actual output# guilty until proved innocent or insane# If the example executed without raising any exceptions,# verify its output.# The example raised an exception: check if it was expected.# If `example.exc_msg` is None, then we weren't expecting# an exception.# We expected an exception: see whether it matches.# Another chance if they didn't care about the detail.# Report the outcome.# Restore the option flags (in case they were modified)# Record and return the number of failures and tries.# Use backslashreplace error handling on write# Patch pdb.set_trace to restore sys.stdout during interactive# debugging (so it's not still redirected to self._fakeout).# Note that the interactive output will go to *our*# save_stdout, even if that's not the real sys.stdout; this# allows us to write test cases for the set_trace behavior.# Patch linecache.getlines, so we can see the example's source# when we're inside the debugger.# Make sure sys.displayhook just prints the value to stdout# Summarization# Backward compatibility cruft to maintain doctest.master.# Don't print here by default, since doing# so breaks some of the buildbots#print("*** DocTestRunner.merge: '" + name + "' in both" \# " testers; summing outcomes.")# If `want` contains hex-escaped character such as "\u1234",# then `want` is a string of six characters(e.g. [\,u,1,2,3,4]).# On the other hand, `got` could be another sequence of# characters such as [\u1234], so `want` and `got` should# be folded to hex-escaped ASCII string to compare.# Handle the common case first, for efficiency:# if they're string-identical, always return true.# The values True and False replaced 1 and 0 as the return# value for boolean comparisons in Python 2.3.# can be used as a special sequence to signify a# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.# Replace in want with a blank line.# If a line in got contains only spaces, then remove the# spaces.# This flag causes doctest to ignore any differences in the# contents of whitespace strings. Note that this can be used# in conjunction with the ELLIPSIS flag.# The ELLIPSIS flag says to let the sequence "..." in `want`# match any substring in `got`.# We didn't find any match; return false.# Should we do a fancy diff?# Not unless they asked for a fancy diff.# If expected output uses ellipsis, a meaningful fancy diff is# too hard ... or maybe not. In two real-life failures Tim saw,# a diff was a major help anyway, so this is commented out.# [todo] _ellipsis_match() knows which pieces do and don't match,# and could be the basis for a kick-ass diff in this case.##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:## return False# ndiff does intraline difference marking, so can be useful even# for 1-line differences.# The other diff types need at least a few lines to be helpful.# If s are being used, then replace blank lines# with in the actual output string.# Check if we should use diff.# Split want & got into lines.# Use difflib to find their differences.# strip the diff header# If we're not using diff, then simply list the expected# output followed by the actual output.## 6. Test Functions# These should be backwards compatible.# For backward compatibility, a global instance of a DocTestRunner# class, updated by testmod.# If no module was given, then use __main__.# DWA - m will still be None if this wasn't invoked from the command# line, in which case the following TypeError is about as good an error# as we should expect# Check that we were actually given a module.# If no name was given, then use the module's name.# Find, parse, and run all tests in the given module.# Relativize the path# If no name was given, then use the file's name.# Assemble the globals.# Read the file, convert it to a test, and run it.## 7. Unittest Support# The option flags don't include any reporting flags,# so add the default reporting flags# Skip doctests when running with -O2# Relativize the path.# Find the file and read it.# Convert it to a test, and wrap it in a DocFileCase.# We do this here so that _normalize_module is called at the right# level. If it were called in DocFileTest, then this function# would be the caller and we might guess the package incorrectly.## 8. Debugging Support# Add the example's source code (strip trailing NL)# Add the expected output:# Add non-example text.# Trim junk on both ends.# Combine the output, and return it.# Add a courtesy newline to prevent exec from choking (see bug #1172785)## 9. Example Usage# Verbose used to be handled by the "inspect argv" magic in DocTestRunner,# but since we are using argparse we are passing it manually now.# It is a module -- insert its dir into sys.path and try to# import it. If it is part of a package, that possibly# won't work because of package imports.b'Module doctest -- a framework for running examples in docstrings. + +In simplest use, end each module M to be tested with: + +def _test(): + import doctest + doctest.testmod() + +if __name__ == "__main__": + _test() + +Then running the module as a script will cause the examples in the +docstrings to get executed and verified: + +python M.py + +This won't display anything unless an example fails, in which case the +failing example(s) and the cause(s) of the failure(s) are printed to stdout +(why not stderr? because stderr is a lame hack <0.2 wink>), and the final +line of output is "Test failed.". + +Run it with the -v switch instead: + +python M.py -v + +and a detailed report of all examples tried is printed to stdout, along +with assorted summaries at the end. + +You can force verbose mode by passing "verbose=True" to testmod, or prohibit +it by passing "verbose=False". In either of those cases, sys.argv is not +examined by testmod. + +There are a variety of other ways to run doctests, including integration +with the unittest framework, and support for running non-Python text +files containing doctests. There are also many ways to override parts +of doctest's default behaviors. See the Library Reference Manual for +details. +'u'Module doctest -- a framework for running examples in docstrings. + +In simplest use, end each module M to be tested with: + +def _test(): + import doctest + doctest.testmod() + +if __name__ == "__main__": + _test() + +Then running the module as a script will cause the examples in the +docstrings to get executed and verified: + +python M.py + +This won't display anything unless an example fails, in which case the +failing example(s) and the cause(s) of the failure(s) are printed to stdout +(why not stderr? because stderr is a lame hack <0.2 wink>), and the final +line of output is "Test failed.". + +Run it with the -v switch instead: + +python M.py -v + +and a detailed report of all examples tried is printed to stdout, along +with assorted summaries at the end. + +You can force verbose mode by passing "verbose=True" to testmod, or prohibit +it by passing "verbose=False". In either of those cases, sys.argv is not +examined by testmod. + +There are a variety of other ways to run doctests, including integration +with the unittest framework, and support for running non-Python text +files containing doctests. There are also many ways to override parts +of doctest's default behaviors. See the Library Reference Manual for +details. +'b'reStructuredText en'u'reStructuredText en'b'register_optionflag'u'register_optionflag'b'DONT_ACCEPT_TRUE_FOR_1'u'DONT_ACCEPT_TRUE_FOR_1'b'DONT_ACCEPT_BLANKLINE'u'DONT_ACCEPT_BLANKLINE'b'NORMALIZE_WHITESPACE'u'NORMALIZE_WHITESPACE'b'ELLIPSIS'u'ELLIPSIS'b'SKIP'u'SKIP'b'IGNORE_EXCEPTION_DETAIL'u'IGNORE_EXCEPTION_DETAIL'b'COMPARISON_FLAGS'u'COMPARISON_FLAGS'b'REPORT_UDIFF'u'REPORT_UDIFF'b'REPORT_CDIFF'u'REPORT_CDIFF'b'REPORT_NDIFF'u'REPORT_NDIFF'b'REPORT_ONLY_FIRST_FAILURE'u'REPORT_ONLY_FIRST_FAILURE'b'REPORTING_FLAGS'u'REPORTING_FLAGS'b'FAIL_FAST'u'FAIL_FAST'b'Example'u'Example'b'DocTest'u'DocTest'b'DocTestParser'u'DocTestParser'b'DocTestFinder'u'DocTestFinder'b'DocTestRunner'u'DocTestRunner'b'OutputChecker'u'OutputChecker'b'DocTestFailure'u'DocTestFailure'b'UnexpectedException'u'UnexpectedException'b'DebugRunner'u'DebugRunner'b'testmod'u'testmod'b'testfile'u'testfile'b'run_docstring_examples'u'run_docstring_examples'b'DocTestSuite'u'DocTestSuite'b'DocFileSuite'u'DocFileSuite'b'set_unittest_reportflags'u'set_unittest_reportflags'b'script_from_examples'u'script_from_examples'b'testsource'u'testsource'b'debug_src'u'debug_src'b'TestResults'u'TestResults'b'failed attempted'u'failed attempted'b''u''b' + Return the compiler-flags associated with the future features that + have been imported into the given namespace (globs). + 'u' + Return the compiler-flags associated with the future features that + have been imported into the given namespace (globs). + 'b' + Return the module specified by `module`. In particular: + - If `module` is a module, then return module. + - If `module` is a string, then import and return the + module with that name. + - If `module` is None, then return the calling module. + The calling module is assumed to be the module of + the stack frame at the given depth in the call stack. + 'u' + Return the module specified by `module`. In particular: + - If `module` is a module, then return module. + - If `module` is a string, then import and return the + module with that name. + - If `module` is None, then return the calling module. + The calling module is assumed to be the module of + the stack frame at the given depth in the call stack. + 'b'Expected a module, string, or None'u'Expected a module, string, or None'b'get_data'u'get_data'b' + Add the given number of space characters to the beginning of + every non-blank line in `s`, and return the result. + 'u' + Add the given number of space characters to the beginning of + every non-blank line in `s`, and return the result. + 'b'(?m)^(?!$)'u'(?m)^(?!$)'b' + Return a string containing a traceback message for the given + exc_info tuple (as returned by sys.exc_info()). + 'u' + Return a string containing a traceback message for the given + exc_info tuple (as returned by sys.exc_info()). + 'b' + Essentially the only subtle case: + >>> _ellipsis_match('aa...aa', 'aaa') + False + 'u' + Essentially the only subtle case: + >>> _ellipsis_match('aa...aa', 'aaa') + False + 'b'Return a commented form of the given line'u'Return a commented form of the given line'b' + A specialized version of the python debugger that redirects stdout + to a given stream when interacting with the user. Stdout is *not* + redirected when traced code is executed. + 'u' + A specialized version of the python debugger that redirects stdout + to a given stream when interacting with the user. Stdout is *not* + redirected when traced code is executed. + 'b'Expected a module: %r'u'Expected a module: %r'b'Module-relative files may not have absolute paths'u'Module-relative files may not have absolute paths'b'Can't resolve paths relative to the module %r (it has no __file__)'u'Can't resolve paths relative to the module %r (it has no __file__)'b' + A single doctest example, consisting of source code and expected + output. `Example` defines the following attributes: + + - source: A single Python statement, always ending with a newline. + The constructor adds a newline if needed. + + - want: The expected output from running the source code (either + from stdout, or a traceback in case of exception). `want` ends + with a newline unless it's empty, in which case it's an empty + string. The constructor adds a newline if needed. + + - exc_msg: The exception message generated by the example, if + the example is expected to generate an exception; or `None` if + it is not expected to generate an exception. This exception + message is compared against the return value of + `traceback.format_exception_only()`. `exc_msg` ends with a + newline unless it's `None`. The constructor adds a newline + if needed. + + - lineno: The line number within the DocTest string containing + this Example where the Example begins. This line number is + zero-based, with respect to the beginning of the DocTest. + + - indent: The example's indentation in the DocTest string. + I.e., the number of space characters that precede the + example's first prompt. + + - options: A dictionary mapping from option flags to True or + False, which is used to override default options for this + example. Any option flags not contained in this dictionary + are left at their default value (as specified by the + DocTestRunner's optionflags). By default, no options are set. + 'u' + A single doctest example, consisting of source code and expected + output. `Example` defines the following attributes: + + - source: A single Python statement, always ending with a newline. + The constructor adds a newline if needed. + + - want: The expected output from running the source code (either + from stdout, or a traceback in case of exception). `want` ends + with a newline unless it's empty, in which case it's an empty + string. The constructor adds a newline if needed. + + - exc_msg: The exception message generated by the example, if + the example is expected to generate an exception; or `None` if + it is not expected to generate an exception. This exception + message is compared against the return value of + `traceback.format_exception_only()`. `exc_msg` ends with a + newline unless it's `None`. The constructor adds a newline + if needed. + + - lineno: The line number within the DocTest string containing + this Example where the Example begins. This line number is + zero-based, with respect to the beginning of the DocTest. + + - indent: The example's indentation in the DocTest string. + I.e., the number of space characters that precede the + example's first prompt. + + - options: A dictionary mapping from option flags to True or + False, which is used to override default options for this + example. Any option flags not contained in this dictionary + are left at their default value (as specified by the + DocTestRunner's optionflags). By default, no options are set. + 'b' + A collection of doctest examples that should be run in a single + namespace. Each `DocTest` defines the following attributes: + + - examples: the list of examples. + + - globs: The namespace (aka globals) that the examples should + be run in. + + - name: A name identifying the DocTest (typically, the name of + the object whose docstring this DocTest was extracted from). + + - filename: The name of the file that this DocTest was extracted + from, or `None` if the filename is unknown. + + - lineno: The line number within filename where this DocTest + begins, or `None` if the line number is unavailable. This + line number is zero-based, with respect to the beginning of + the file. + + - docstring: The string that the examples were extracted from, + or `None` if the string is unavailable. + 'u' + A collection of doctest examples that should be run in a single + namespace. Each `DocTest` defines the following attributes: + + - examples: the list of examples. + + - globs: The namespace (aka globals) that the examples should + be run in. + + - name: A name identifying the DocTest (typically, the name of + the object whose docstring this DocTest was extracted from). + + - filename: The name of the file that this DocTest was extracted + from, or `None` if the filename is unknown. + + - lineno: The line number within filename where this DocTest + begins, or `None` if the line number is unavailable. This + line number is zero-based, with respect to the beginning of + the file. + + - docstring: The string that the examples were extracted from, + or `None` if the string is unavailable. + 'b' + Create a new DocTest containing the given examples. The + DocTest's globals are initialized with a copy of `globs`. + 'u' + Create a new DocTest containing the given examples. The + DocTest's globals are initialized with a copy of `globs`. + 'b'DocTest no longer accepts str; use DocTestParser instead'u'DocTest no longer accepts str; use DocTestParser instead'b'no examples'u'no examples'b'1 example'u'1 example'b'%d examples'u'%d examples'b'<%s %s from %s:%s (%s)>'u'<%s %s from %s:%s (%s)>'b' + A class used to parse strings containing doctest examples. + 'u' + A class used to parse strings containing doctest examples. + 'b' + # Source consists of a PS1 line followed by zero or more PS2 lines. + (?P + (?:^(?P [ ]*) >>> .*) # PS1 line + (?:\n [ ]* \.\.\. .*)*) # PS2 lines + \n? + # Want consists of any non-blank lines that do not start with PS1. + (?P (?:(?![ ]*$) # Not a blank line + (?![ ]*>>>) # Not a line starting with PS1 + .+$\n? # But any other line + )*) + 'u' + # Source consists of a PS1 line followed by zero or more PS2 lines. + (?P + (?:^(?P [ ]*) >>> .*) # PS1 line + (?:\n [ ]* \.\.\. .*)*) # PS2 lines + \n? + # Want consists of any non-blank lines that do not start with PS1. + (?P (?:(?![ ]*$) # Not a blank line + (?![ ]*>>>) # Not a line starting with PS1 + .+$\n? # But any other line + )*) + 'b' + # Grab the traceback header. Different versions of Python have + # said different things on the first traceback line. + ^(?P Traceback\ \( + (?: most\ recent\ call\ last + | innermost\ last + ) \) : + ) + \s* $ # toss trailing whitespace on the header. + (?P .*?) # don't blink: absorb stuff until... + ^ (?P \w+ .*) # a line *starts* with alphanum. + 'u' + # Grab the traceback header. Different versions of Python have + # said different things on the first traceback line. + ^(?P Traceback\ \( + (?: most\ recent\ call\ last + | innermost\ last + ) \) : + ) + \s* $ # toss trailing whitespace on the header. + (?P .*?) # don't blink: absorb stuff until... + ^ (?P \w+ .*) # a line *starts* with alphanum. + 'b'^[ ]*(#.*)?$'u'^[ ]*(#.*)?$'b' + Divide the given string into examples and intervening text, + and return them as a list of alternating Examples and strings. + Line numbers for the Examples are 0-based. The optional + argument `name` is a name identifying this string, and is only + used for error messages. + 'u' + Divide the given string into examples and intervening text, + and return them as a list of alternating Examples and strings. + Line numbers for the Examples are 0-based. The optional + argument `name` is a name identifying this string, and is only + used for error messages. + 'b'indent'u'indent'b' + Extract all doctest examples from the given string, and + collect them into a `DocTest` object. + + `globs`, `name`, `filename`, and `lineno` are attributes for + the new `DocTest` object. See the documentation for `DocTest` + for more information. + 'u' + Extract all doctest examples from the given string, and + collect them into a `DocTest` object. + + `globs`, `name`, `filename`, and `lineno` are attributes for + the new `DocTest` object. See the documentation for `DocTest` + for more information. + 'b' + Extract all doctest examples from the given string, and return + them as a list of `Example` objects. Line numbers are + 0-based, because it's most common in doctests that nothing + interesting appears on the same line as opening triple-quote, + and so the first interesting line is called "line 1" then. + + The optional argument `name` is a name identifying this + string, and is only used for error messages. + 'u' + Extract all doctest examples from the given string, and return + them as a list of `Example` objects. Line numbers are + 0-based, because it's most common in doctests that nothing + interesting appears on the same line as opening triple-quote, + and so the first interesting line is called "line 1" then. + + The optional argument `name` is a name identifying this + string, and is only used for error messages. + 'b' + Given a regular expression match from `_EXAMPLE_RE` (`m`), + return a pair `(source, want)`, where `source` is the matched + example's source code (with prompts and indentation stripped); + and `want` is the example's expected output (with indentation + stripped). + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + 'u' + Given a regular expression match from `_EXAMPLE_RE` (`m`), + return a pair `(source, want)`, where `source` is the matched + example's source code (with prompts and indentation stripped); + and `want` is the example's expected output (with indentation + stripped). + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + 'b'want'u'want'b' *$'u' *$'b'#\s*doctest:\s*([^\n\'"]*)$'u'#\s*doctest:\s*([^\n\'"]*)$'b' + Return a dictionary containing option overrides extracted from + option directives in the given source string. + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + 'u' + Return a dictionary containing option overrides extracted from + option directives in the given source string. + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + 'b'line %r of the doctest for %s has an invalid option: %r'u'line %r of the doctest for %s has an invalid option: %r'b'line %r of the doctest for %s has an option directive on a line with no example: %r'u'line %r of the doctest for %s has an option directive on a line with no example: %r'b'^([ ]*)(?=\S)'u'^([ ]*)(?=\S)'b'Return the minimum indentation of any non-blank line in `s`'u'Return the minimum indentation of any non-blank line in `s`'b' + Given the lines of a source string (including prompts and + leading indentation), check to make sure that every prompt is + followed by a space character. If any line is not followed by + a space character, then raise ValueError. + 'u' + Given the lines of a source string (including prompts and + leading indentation), check to make sure that every prompt is + followed by a space character. If any line is not followed by + a space character, then raise ValueError. + 'b'line %r of the docstring for %s lacks blank after %s: %r'u'line %r of the docstring for %s lacks blank after %s: %r'b' + Check that every line in the given list starts with the given + prefix; if any line does not, then raise a ValueError. + 'u' + Check that every line in the given list starts with the given + prefix; if any line does not, then raise a ValueError. + 'b'line %r of the docstring for %s has inconsistent leading whitespace: %r'u'line %r of the docstring for %s has inconsistent leading whitespace: %r'b' + A class used to extract the DocTests that are relevant to a given + object, from its docstring and the docstrings of its contained + objects. Doctests can currently be extracted from the following + object types: modules, functions, classes, methods, staticmethods, + classmethods, and properties. + 'u' + A class used to extract the DocTests that are relevant to a given + object, from its docstring and the docstrings of its contained + objects. Doctests can currently be extracted from the following + object types: modules, functions, classes, methods, staticmethods, + classmethods, and properties. + 'b' + Create a new doctest finder. + + The optional argument `parser` specifies a class or + function that should be used to create new DocTest objects (or + objects that implement the same interface as DocTest). The + signature for this factory function should match the signature + of the DocTest constructor. + + If the optional argument `recurse` is false, then `find` will + only examine the given object, and not any contained objects. + + If the optional argument `exclude_empty` is false, then `find` + will include tests for objects with empty docstrings. + 'u' + Create a new doctest finder. + + The optional argument `parser` specifies a class or + function that should be used to create new DocTest objects (or + objects that implement the same interface as DocTest). The + signature for this factory function should match the signature + of the DocTest constructor. + + If the optional argument `recurse` is false, then `find` will + only examine the given object, and not any contained objects. + + If the optional argument `exclude_empty` is false, then `find` + will include tests for objects with empty docstrings. + 'b' + Return a list of the DocTests that are defined by the given + object's docstring, or by any of its contained objects' + docstrings. + + The optional parameter `module` is the module that contains + the given object. If the module is not specified or is None, then + the test finder will attempt to automatically determine the + correct module. The object's module is used: + + - As a default namespace, if `globs` is not specified. + - To prevent the DocTestFinder from extracting DocTests + from objects that are imported from other modules. + - To find the name of the file containing the object. + - To help find the line number of the object within its + file. + + Contained objects whose module does not match `module` are ignored. + + If `module` is False, no attempt to find the module will be made. + This is obscure, of use mostly in tests: if `module` is False, or + is None but cannot be found automatically, then all objects are + considered to belong to the (non-existent) module, so all contained + objects will (recursively) be searched for doctests. + + The globals for each DocTest is formed by combining `globs` + and `extraglobs` (bindings in `extraglobs` override bindings + in `globs`). A new copy of the globals dictionary is created + for each DocTest. If `globs` is not specified, then it + defaults to the module's `__dict__`, if specified, or {} + otherwise. If `extraglobs` is not specified, then it defaults + to {}. + + 'u' + Return a list of the DocTests that are defined by the given + object's docstring, or by any of its contained objects' + docstrings. + + The optional parameter `module` is the module that contains + the given object. If the module is not specified or is None, then + the test finder will attempt to automatically determine the + correct module. The object's module is used: + + - As a default namespace, if `globs` is not specified. + - To prevent the DocTestFinder from extracting DocTests + from objects that are imported from other modules. + - To find the name of the file containing the object. + - To help find the line number of the object within its + file. + + Contained objects whose module does not match `module` are ignored. + + If `module` is False, no attempt to find the module will be made. + This is obscure, of use mostly in tests: if `module` is False, or + is None but cannot be found automatically, then all objects are + considered to belong to the (non-existent) module, so all contained + objects will (recursively) be searched for doctests. + + The globals for each DocTest is formed by combining `globs` + and `extraglobs` (bindings in `extraglobs` override bindings + in `globs`). A new copy of the globals dictionary is created + for each DocTest. If `globs` is not specified, then it + defaults to the module's `__dict__`, if specified, or {} + otherwise. If `extraglobs` is not specified, then it defaults + to {}. + + 'b'DocTestFinder.find: name must be given when obj.__name__ doesn't exist: %r'u'DocTestFinder.find: name must be given when obj.__name__ doesn't exist: %r'b'<]>'u'<]>'b' + Return true if the given object is defined in the given + module. + 'u' + Return true if the given object is defined in the given + module. + 'b'__objclass__'u'__objclass__'b'object must be a class or function'u'object must be a class or function'b' + Find tests for the given object and any contained objects, and + add them to `tests`. + 'u' + Find tests for the given object and any contained objects, and + add them to `tests`. + 'b'Finding tests in %s'u'Finding tests in %s'b'__test__'u'__test__'b'DocTestFinder.find: __test__ keys must be strings: %r'u'DocTestFinder.find: __test__ keys must be strings: %r'b'DocTestFinder.find: __test__ values must be strings, functions, methods, classes, or modules: %r'u'DocTestFinder.find: __test__ values must be strings, functions, methods, classes, or modules: %r'b'%s.__test__.%s'u'%s.__test__.%s'b' + Return a DocTest for the given object, if it defines a docstring; + otherwise, return None. + 'u' + Return a DocTest for the given object, if it defines a docstring; + otherwise, return None. + 'b' + Return a line number of the given object's docstring. Note: + this method assumes that the object has a docstring. + 'u' + Return a line number of the given object's docstring. Note: + this method assumes that the object has a docstring. + 'b'^\s*class\s*%s\b'u'^\s*class\s*%s\b'b'co_firstlineno'u'co_firstlineno'b'(^|.*:)\s*\w*("|\')'u'(^|.*:)\s*\w*("|\')'b' + A class used to run DocTest test cases, and accumulate statistics. + The `run` method is used to process a single DocTest case. It + returns a tuple `(f, t)`, where `t` is the number of test cases + tried, and `f` is the number of test cases that failed. + + >>> tests = DocTestFinder().find(_TestClass) + >>> runner = DocTestRunner(verbose=False) + >>> tests.sort(key = lambda test: test.name) + >>> for test in tests: + ... print(test.name, '->', runner.run(test)) + _TestClass -> TestResults(failed=0, attempted=2) + _TestClass.__init__ -> TestResults(failed=0, attempted=2) + _TestClass.get -> TestResults(failed=0, attempted=2) + _TestClass.square -> TestResults(failed=0, attempted=1) + + The `summarize` method prints a summary of all the test cases that + have been run by the runner, and returns an aggregated `(f, t)` + tuple: + + >>> runner.summarize(verbose=1) + 4 items passed all tests: + 2 tests in _TestClass + 2 tests in _TestClass.__init__ + 2 tests in _TestClass.get + 1 tests in _TestClass.square + 7 tests in 4 items. + 7 passed and 0 failed. + Test passed. + TestResults(failed=0, attempted=7) + + The aggregated number of tried examples and failed examples is + also available via the `tries` and `failures` attributes: + + >>> runner.tries + 7 + >>> runner.failures + 0 + + The comparison between expected outputs and actual outputs is done + by an `OutputChecker`. This comparison may be customized with a + number of option flags; see the documentation for `testmod` for + more information. If the option flags are insufficient, then the + comparison may also be customized by passing a subclass of + `OutputChecker` to the constructor. + + The test runner's display output can be controlled in two ways. + First, an output function (`out) can be passed to + `TestRunner.run`; this function will be called with strings that + should be displayed. It defaults to `sys.stdout.write`. If + capturing the output is not sufficient, then the display output + can be also customized by subclassing DocTestRunner, and + overriding the methods `report_start`, `report_success`, + `report_unexpected_exception`, and `report_failure`. + 'u' + A class used to run DocTest test cases, and accumulate statistics. + The `run` method is used to process a single DocTest case. It + returns a tuple `(f, t)`, where `t` is the number of test cases + tried, and `f` is the number of test cases that failed. + + >>> tests = DocTestFinder().find(_TestClass) + >>> runner = DocTestRunner(verbose=False) + >>> tests.sort(key = lambda test: test.name) + >>> for test in tests: + ... print(test.name, '->', runner.run(test)) + _TestClass -> TestResults(failed=0, attempted=2) + _TestClass.__init__ -> TestResults(failed=0, attempted=2) + _TestClass.get -> TestResults(failed=0, attempted=2) + _TestClass.square -> TestResults(failed=0, attempted=1) + + The `summarize` method prints a summary of all the test cases that + have been run by the runner, and returns an aggregated `(f, t)` + tuple: + + >>> runner.summarize(verbose=1) + 4 items passed all tests: + 2 tests in _TestClass + 2 tests in _TestClass.__init__ + 2 tests in _TestClass.get + 1 tests in _TestClass.square + 7 tests in 4 items. + 7 passed and 0 failed. + Test passed. + TestResults(failed=0, attempted=7) + + The aggregated number of tried examples and failed examples is + also available via the `tries` and `failures` attributes: + + >>> runner.tries + 7 + >>> runner.failures + 0 + + The comparison between expected outputs and actual outputs is done + by an `OutputChecker`. This comparison may be customized with a + number of option flags; see the documentation for `testmod` for + more information. If the option flags are insufficient, then the + comparison may also be customized by passing a subclass of + `OutputChecker` to the constructor. + + The test runner's display output can be controlled in two ways. + First, an output function (`out) can be passed to + `TestRunner.run`; this function will be called with strings that + should be displayed. It defaults to `sys.stdout.write`. If + capturing the output is not sufficient, then the display output + can be also customized by subclassing DocTestRunner, and + overriding the methods `report_start`, `report_success`, + `report_unexpected_exception`, and `report_failure`. + 'b' + Create a new test runner. + + Optional keyword arg `checker` is the `OutputChecker` that + should be used to compare the expected outputs and actual + outputs of doctest examples. + + Optional keyword arg 'verbose' prints lots of stuff if true, + only failures if false; by default, it's true iff '-v' is in + sys.argv. + + Optional argument `optionflags` can be used to control how the + test runner compares expected output to actual output, and how + it displays failures. See the documentation for `testmod` for + more information. + 'u' + Create a new test runner. + + Optional keyword arg `checker` is the `OutputChecker` that + should be used to compare the expected outputs and actual + outputs of doctest examples. + + Optional keyword arg 'verbose' prints lots of stuff if true, + only failures if false; by default, it's true iff '-v' is in + sys.argv. + + Optional argument `optionflags` can be used to control how the + test runner compares expected output to actual output, and how + it displays failures. See the documentation for `testmod` for + more information. + 'b'-v'b' + Report that the test runner is about to process the given + example. (Only displays a message if verbose=True) + 'u' + Report that the test runner is about to process the given + example. (Only displays a message if verbose=True) + 'b'Trying: +'u'Trying: +'b'Expecting: +'u'Expecting: +'b'Expecting nothing +'u'Expecting nothing +'b' + Report that the given example ran successfully. (Only + displays a message if verbose=True) + 'u' + Report that the given example ran successfully. (Only + displays a message if verbose=True) + 'b'ok +'u'ok +'b' + Report that the given example failed. + 'u' + Report that the given example failed. + 'b' + Report that the given example raised an unexpected exception. + 'u' + Report that the given example raised an unexpected exception. + 'b'Exception raised: +'u'Exception raised: +'b'File "%s", line %s, in %s'u'File "%s", line %s, in %s'b'Line %s, in %s'u'Line %s, in %s'b'Failed example:'u'Failed example:'b' + Run the examples in `test`. Write the outcome of each example + with one of the `DocTestRunner.report_*` methods, using the + writer function `out`. `compileflags` is the set of compiler + flags that should be used to execute examples. Return a tuple + `(f, t)`, where `t` is the number of examples tried, and `f` + is the number of examples that failed. The examples are run + in the namespace `test.globs`. + 'u' + Run the examples in `test`. Write the outcome of each example + with one of the `DocTestRunner.report_*` methods, using the + writer function `out`. `compileflags` is the set of compiler + flags that should be used to execute examples. Return a tuple + `(f, t)`, where `t` is the number of examples tried, and `f` + is the number of examples that failed. The examples are run + in the namespace `test.globs`. + 'b''u''b'unknown outcome'u'unknown outcome'b' + Record the fact that the given DocTest (`test`) generated `f` + failures out of `t` tried examples. + 'u' + Record the fact that the given DocTest (`test`) generated `f` + failures out of `t` tried examples. + 'b'.+)\[(?P\d+)\]>$'u'.+)\[(?P\d+)\]>$'b'examplenum'u'examplenum'b' + Run the examples in `test`, and display the results using the + writer function `out`. + + The examples are run in the namespace `test.globs`. If + `clear_globs` is true (the default), then this namespace will + be cleared after the test runs, to help with garbage + collection. If you would like to examine the namespace after + the test completes, then use `clear_globs=False`. + + `compileflags` gives the set of flags that should be used by + the Python compiler when running the examples. If not + specified, then it will default to the set of future-import + flags that apply to `globs`. + + The output of each example is checked using + `DocTestRunner.check_output`, and the results are formatted by + the `DocTestRunner.report_*` methods. + 'u' + Run the examples in `test`, and display the results using the + writer function `out`. + + The examples are run in the namespace `test.globs`. If + `clear_globs` is true (the default), then this namespace will + be cleared after the test runs, to help with garbage + collection. If you would like to examine the namespace after + the test completes, then use `clear_globs=False`. + + `compileflags` gives the set of flags that should be used by + the Python compiler when running the examples. If not + specified, then it will default to the set of future-import + flags that apply to `globs`. + + The output of each example is checked using + `DocTestRunner.check_output`, and the results are formatted by + the `DocTestRunner.report_*` methods. + 'b' + Print a summary of all the test cases that have been run by + this DocTestRunner, and return a tuple `(f, t)`, where `f` is + the total number of failed examples, and `t` is the total + number of tried examples. + + The optional `verbose` argument controls how detailed the + summary is. If the verbosity is not specified, then the + DocTestRunner's verbosity is used. + 'u' + Print a summary of all the test cases that have been run by + this DocTestRunner, and return a tuple `(f, t)`, where `f` is + the total number of failed examples, and `t` is the total + number of tried examples. + + The optional `verbose` argument controls how detailed the + summary is. If the verbosity is not specified, then the + DocTestRunner's verbosity is used. + 'b'items had no tests:'u'items had no tests:'b'items passed all tests:'u'items passed all tests:'b' %3d tests in %s'u' %3d tests in %s'b'items had failures:'u'items had failures:'b' %3d of %3d in %s'u' %3d of %3d in %s'b'tests in'u'tests in'b'items.'u'items.'b'passed and'u'passed and'b'failed.'u'failed.'b'***Test Failed***'u'***Test Failed***'b'failures.'u'failures.'b'Test passed.'u'Test passed.'b' + A class used to check the whether the actual output from a doctest + example matches the expected output. `OutputChecker` defines two + methods: `check_output`, which compares a given pair of outputs, + and returns true if they match; and `output_difference`, which + returns a string describing the differences between two outputs. + 'u' + A class used to check the whether the actual output from a doctest + example matches the expected output. `OutputChecker` defines two + methods: `check_output`, which compares a given pair of outputs, + and returns true if they match; and `output_difference`, which + returns a string describing the differences between two outputs. + 'b' + Convert string to hex-escaped ASCII string. + 'u' + Convert string to hex-escaped ASCII string. + 'b'ASCII'u'ASCII'b' + Return True iff the actual output from an example (`got`) + matches the expected output (`want`). These strings are + always considered to match if they are identical; but + depending on what option flags the test runner is using, + several non-exact match types are also possible. See the + documentation for `TestRunner` for more information about + option flags. + 'u' + Return True iff the actual output from an example (`got`) + matches the expected output (`want`). These strings are + always considered to match if they are identical; but + depending on what option flags the test runner is using, + several non-exact match types are also possible. See the + documentation for `TestRunner` for more information about + option flags. + 'b'True +'u'True +'b'1 +'u'1 +'b'False +'u'False +'b'0 +'u'0 +'b'(?m)^%s\s*?$'u'(?m)^%s\s*?$'b'(?m)^[^\S\n]+$'u'(?m)^[^\S\n]+$'b' + Return a string describing the differences between the + expected output for a given example (`example`) and the actual + output (`got`). `optionflags` is the set of option flags used + to compare `want` and `got`. + 'u' + Return a string describing the differences between the + expected output for a given example (`example`) and the actual + output (`got`). `optionflags` is the set of option flags used + to compare `want` and `got`. + 'b'(?m)^[ ]*(?= +)'u'(?m)^[ ]*(?= +)'b'unified diff with -expected +actual'u'unified diff with -expected +actual'b'context diff with expected followed by actual'u'context diff with expected followed by actual'b'ndiff with -expected +actual'u'ndiff with -expected +actual'b'Bad diff option'u'Bad diff option'b'Differences (%s): +'u'Differences (%s): +'b'Expected: +%sGot: +%s'u'Expected: +%sGot: +%s'b'Expected: +%sGot nothing +'u'Expected: +%sGot nothing +'b'Expected nothing +Got: +%s'u'Expected nothing +Got: +%s'b'Expected nothing +Got nothing +'u'Expected nothing +Got nothing +'b'A DocTest example has failed in debugging mode. + + The exception instance has variables: + + - test: the DocTest object being run + + - example: the Example object that failed + + - got: the actual output + 'u'A DocTest example has failed in debugging mode. + + The exception instance has variables: + + - test: the DocTest object being run + + - example: the Example object that failed + + - got: the actual output + 'b'A DocTest example has encountered an unexpected exception + + The exception instance has variables: + + - test: the DocTest object being run + + - example: the Example object that failed + + - exc_info: the exception info + 'u'A DocTest example has encountered an unexpected exception + + The exception instance has variables: + + - test: the DocTest object being run + + - example: the Example object that failed + + - exc_info: the exception info + 'b'Run doc tests but raise an exception as soon as there is a failure. + + If an unexpected exception occurs, an UnexpectedException is raised. + It contains the test, the example, and the original exception: + + >>> runner = DebugRunner(verbose=False) + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> try: + ... runner.run(test) + ... except UnexpectedException as f: + ... failure = f + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[1] # Already has the traceback + Traceback (most recent call last): + ... + KeyError + + We wrap the original exception to give the calling application + access to the test and example information. + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> try: + ... runner.run(test) + ... except DocTestFailure as f: + ... failure = f + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + If a failure or error occurs, the globals are left intact: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 1} + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... >>> raise KeyError + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + Traceback (most recent call last): + ... + doctest.UnexpectedException: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 2} + + But the globals are cleared if there is no error: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + TestResults(failed=0, attempted=1) + + >>> test.globs + {} + + 'u'Run doc tests but raise an exception as soon as there is a failure. + + If an unexpected exception occurs, an UnexpectedException is raised. + It contains the test, the example, and the original exception: + + >>> runner = DebugRunner(verbose=False) + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> try: + ... runner.run(test) + ... except UnexpectedException as f: + ... failure = f + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[1] # Already has the traceback + Traceback (most recent call last): + ... + KeyError + + We wrap the original exception to give the calling application + access to the test and example information. + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> try: + ... runner.run(test) + ... except DocTestFailure as f: + ... failure = f + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + If a failure or error occurs, the globals are left intact: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 1} + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... >>> raise KeyError + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + Traceback (most recent call last): + ... + doctest.UnexpectedException: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 2} + + But the globals are cleared if there is no error: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + TestResults(failed=0, attempted=1) + + >>> test.globs + {} + + 'b'm=None, name=None, globs=None, verbose=None, report=True, + optionflags=0, extraglobs=None, raise_on_error=False, + exclude_empty=False + + Test examples in docstrings in functions and classes reachable + from module m (or the current module if m is not supplied), starting + with m.__doc__. + + Also test examples reachable from dict m.__test__ if it exists and is + not None. m.__test__ maps names to functions, classes and strings; + function and class docstrings are tested even if the name is private; + strings are tested directly, as if they were docstrings. + + Return (#failures, #tests). + + See help(doctest) for an overview. + + Optional keyword arg "name" gives the name of the module; by default + use m.__name__. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use m.__dict__. A copy of this + dict is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. This is new in 2.4. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. This is new in 2.3. Possible values (see the + docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + SKIP + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + 'u'm=None, name=None, globs=None, verbose=None, report=True, + optionflags=0, extraglobs=None, raise_on_error=False, + exclude_empty=False + + Test examples in docstrings in functions and classes reachable + from module m (or the current module if m is not supplied), starting + with m.__doc__. + + Also test examples reachable from dict m.__test__ if it exists and is + not None. m.__test__ maps names to functions, classes and strings; + function and class docstrings are tested even if the name is private; + strings are tested directly, as if they were docstrings. + + Return (#failures, #tests). + + See help(doctest) for an overview. + + Optional keyword arg "name" gives the name of the module; by default + use m.__name__. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use m.__dict__. A copy of this + dict is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. This is new in 2.4. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. This is new in 2.3. Possible values (see the + docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + SKIP + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + 'b'testmod: module required; %r'u'testmod: module required; %r'b' + Test examples in the given file. Return (#failures, #tests). + + Optional keyword arg "module_relative" specifies how filenames + should be interpreted: + + - If "module_relative" is True (the default), then "filename" + specifies a module-relative path. By default, this path is + relative to the calling module's directory; but if the + "package" argument is specified, then it is relative to that + package. To ensure os-independence, "filename" should use + "/" characters to separate path segments, and should not + be an absolute path (i.e., it may not begin with "/"). + + - If "module_relative" is False, then "filename" specifies an + os-specific path. The path may be absolute or relative (to + the current working directory). + + Optional keyword arg "name" gives the name of the test; by default + use the file's basename. + + Optional keyword argument "package" is a Python package or the + name of a Python package whose directory should be used as the + base directory for a module relative filename. If no package is + specified, then the calling module's directory is used as the base + directory for module relative filenames. It is an error to + specify "package" if "module_relative" is False. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use {}. A copy of this dict + is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. Possible values (see the docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + SKIP + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Optional keyword arg "parser" specifies a DocTestParser (or + subclass) that should be used to extract tests from the files. + + Optional keyword arg "encoding" specifies an encoding that should + be used to convert the file to unicode. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + 'u' + Test examples in the given file. Return (#failures, #tests). + + Optional keyword arg "module_relative" specifies how filenames + should be interpreted: + + - If "module_relative" is True (the default), then "filename" + specifies a module-relative path. By default, this path is + relative to the calling module's directory; but if the + "package" argument is specified, then it is relative to that + package. To ensure os-independence, "filename" should use + "/" characters to separate path segments, and should not + be an absolute path (i.e., it may not begin with "/"). + + - If "module_relative" is False, then "filename" specifies an + os-specific path. The path may be absolute or relative (to + the current working directory). + + Optional keyword arg "name" gives the name of the test; by default + use the file's basename. + + Optional keyword argument "package" is a Python package or the + name of a Python package whose directory should be used as the + base directory for a module relative filename. If no package is + specified, then the calling module's directory is used as the base + directory for module relative filenames. It is an error to + specify "package" if "module_relative" is False. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use {}. A copy of this dict + is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. Possible values (see the docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + SKIP + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Optional keyword arg "parser" specifies a DocTestParser (or + subclass) that should be used to extract tests from the files. + + Optional keyword arg "encoding" specifies an encoding that should + be used to convert the file to unicode. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + 'b'Package may only be specified for module-relative paths.'u'Package may only be specified for module-relative paths.'b'NoName'u'NoName'b' + Test examples in the given object's docstring (`f`), using `globs` + as globals. Optional argument `name` is used in failure messages. + If the optional argument `verbose` is true, then generate output + even if there are no failures. + + `compileflags` gives the set of flags that should be used by the + Python compiler when running the examples. If not specified, then + it will default to the set of future-import flags that apply to + `globs`. + + Optional keyword arg `optionflags` specifies options for the + testing and output. See the documentation for `testmod` for more + information. + 'u' + Test examples in the given object's docstring (`f`), using `globs` + as globals. Optional argument `name` is used in failure messages. + If the optional argument `verbose` is true, then generate output + even if there are no failures. + + `compileflags` gives the set of flags that should be used by the + Python compiler when running the examples. If not specified, then + it will default to the set of future-import flags that apply to + `globs`. + + Optional keyword arg `optionflags` specifies options for the + testing and output. See the documentation for `testmod` for more + information. + 'b'Sets the unittest option flags. + + The old flag is returned so that a runner could restore the old + value if it wished to: + + >>> import doctest + >>> old = doctest._unittest_reportflags + >>> doctest.set_unittest_reportflags(REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) == old + True + + >>> doctest._unittest_reportflags == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + + Only reporting flags can be set: + + >>> doctest.set_unittest_reportflags(ELLIPSIS) + Traceback (most recent call last): + ... + ValueError: ('Only reporting flags allowed', 8) + + >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + 'u'Sets the unittest option flags. + + The old flag is returned so that a runner could restore the old + value if it wished to: + + >>> import doctest + >>> old = doctest._unittest_reportflags + >>> doctest.set_unittest_reportflags(REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) == old + True + + >>> doctest._unittest_reportflags == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + + Only reporting flags can be set: + + >>> doctest.set_unittest_reportflags(ELLIPSIS) + Traceback (most recent call last): + ... + ValueError: ('Only reporting flags allowed', 8) + + >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + 'b'Only reporting flags allowed'u'Only reporting flags allowed'b'unknown line number'u'unknown line number'b'Failed doctest test for %s + File "%s", line %s, in %s + +%s'u'Failed doctest test for %s + File "%s", line %s, in %s + +%s'b'Run the test case without results and without catching exceptions + + The unit test framework includes a debug method on test cases + and test suites to support post-mortem debugging. The test code + is run in such a way that errors are not caught. This way a + caller can catch the errors and initiate post-mortem debugging. + + The DocTestCase provides a debug method that raises + UnexpectedException errors if there is an unexpected + exception: + + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + >>> try: + ... case.debug() + ... except UnexpectedException as f: + ... failure = f + + The UnexpectedException contains the test, the example, and + the original exception: + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[1] # Already has the traceback + Traceback (most recent call last): + ... + KeyError + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + + >>> try: + ... case.debug() + ... except DocTestFailure as f: + ... failure = f + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + 'u'Run the test case without results and without catching exceptions + + The unit test framework includes a debug method on test cases + and test suites to support post-mortem debugging. The test code + is run in such a way that errors are not caught. This way a + caller can catch the errors and initiate post-mortem debugging. + + The DocTestCase provides a debug method that raises + UnexpectedException errors if there is an unexpected + exception: + + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + >>> try: + ... case.debug() + ... except UnexpectedException as f: + ... failure = f + + The UnexpectedException contains the test, the example, and + the original exception: + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[1] # Already has the traceback + Traceback (most recent call last): + ... + KeyError + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + + >>> try: + ... case.debug() + ... except DocTestFailure as f: + ... failure = f + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + 'b'Doctest: 'u'Doctest: 'b'DocTestSuite will not work with -O2 and above'u'DocTestSuite will not work with -O2 and above'b'Skipping tests from %s'u'Skipping tests from %s'b' + Convert doctest tests for a module to a unittest test suite. + + This converts each documentation string in a module that + contains doctest tests to a unittest test case. If any of the + tests in a doc string fail, then the test case fails. An exception + is raised showing the name of the file containing the test and a + (sometimes approximate) line number. + + The `module` argument provides the module to be tested. The argument + can be either a module or a module name. + + If no argument is given, the calling module is used. + + A number of options may be provided as keyword arguments: + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + 'u' + Convert doctest tests for a module to a unittest test suite. + + This converts each documentation string in a module that + contains doctest tests to a unittest test case. If any of the + tests in a doc string fail, then the test case fails. An exception + is raised showing the name of the file containing the test and a + (sometimes approximate) line number. + + The `module` argument provides the module to be tested. The argument + can be either a module or a module name. + + If no argument is given, the calling module is used. + + A number of options may be provided as keyword arguments: + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + 'b'Failed doctest test for %s + File "%s", line 0 + +%s'u'Failed doctest test for %s + File "%s", line 0 + +%s'b'A unittest suite for one or more doctest files. + + The path to each doctest file is given as a string; the + interpretation of that string depends on the keyword argument + "module_relative". + + A number of options may be provided as keyword arguments: + + module_relative + If "module_relative" is True, then the given file paths are + interpreted as os-independent module-relative paths. By + default, these paths are relative to the calling module's + directory; but if the "package" argument is specified, then + they are relative to that package. To ensure os-independence, + "filename" should use "/" characters to separate path + segments, and may not be an absolute path (i.e., it may not + begin with "/"). + + If "module_relative" is False, then the given file paths are + interpreted as os-specific paths. These paths may be absolute + or relative (to the current working directory). + + package + A Python package or the name of a Python package whose directory + should be used as the base directory for module relative paths. + If "package" is not specified, then the calling module's + directory is used as the base directory for module relative + filenames. It is an error to specify "package" if + "module_relative" is False. + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + + parser + A DocTestParser (or subclass) that should be used to extract + tests from the files. + + encoding + An encoding that will be used to convert the files to unicode. + 'u'A unittest suite for one or more doctest files. + + The path to each doctest file is given as a string; the + interpretation of that string depends on the keyword argument + "module_relative". + + A number of options may be provided as keyword arguments: + + module_relative + If "module_relative" is True, then the given file paths are + interpreted as os-independent module-relative paths. By + default, these paths are relative to the calling module's + directory; but if the "package" argument is specified, then + they are relative to that package. To ensure os-independence, + "filename" should use "/" characters to separate path + segments, and may not be an absolute path (i.e., it may not + begin with "/"). + + If "module_relative" is False, then the given file paths are + interpreted as os-specific paths. These paths may be absolute + or relative (to the current working directory). + + package + A Python package or the name of a Python package whose directory + should be used as the base directory for module relative paths. + If "package" is not specified, then the calling module's + directory is used as the base directory for module relative + filenames. It is an error to specify "package" if + "module_relative" is False. + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + + parser + A DocTestParser (or subclass) that should be used to extract + tests from the files. + + encoding + An encoding that will be used to convert the files to unicode. + 'b'module_relative'u'module_relative'b'package'u'package'b'Extract script from text with examples. + + Converts text with examples to a Python script. Example input is + converted to regular code. Example output and all other words + are converted to comments: + + >>> text = ''' + ... Here are examples of simple math. + ... + ... Python has super accurate integer addition + ... + ... >>> 2 + 2 + ... 5 + ... + ... And very friendly error messages: + ... + ... >>> 1/0 + ... To Infinity + ... And + ... Beyond + ... + ... You can use logic if you want: + ... + ... >>> if 0: + ... ... blah + ... ... blah + ... ... + ... + ... Ho hum + ... ''' + + >>> print(script_from_examples(text)) + # Here are examples of simple math. + # + # Python has super accurate integer addition + # + 2 + 2 + # Expected: + ## 5 + # + # And very friendly error messages: + # + 1/0 + # Expected: + ## To Infinity + ## And + ## Beyond + # + # You can use logic if you want: + # + if 0: + blah + blah + # + # Ho hum + + 'u'Extract script from text with examples. + + Converts text with examples to a Python script. Example input is + converted to regular code. Example output and all other words + are converted to comments: + + >>> text = ''' + ... Here are examples of simple math. + ... + ... Python has super accurate integer addition + ... + ... >>> 2 + 2 + ... 5 + ... + ... And very friendly error messages: + ... + ... >>> 1/0 + ... To Infinity + ... And + ... Beyond + ... + ... You can use logic if you want: + ... + ... >>> if 0: + ... ... blah + ... ... blah + ... ... + ... + ... Ho hum + ... ''' + + >>> print(script_from_examples(text)) + # Here are examples of simple math. + # + # Python has super accurate integer addition + # + 2 + 2 + # Expected: + ## 5 + # + # And very friendly error messages: + # + 1/0 + # Expected: + ## To Infinity + ## And + ## Beyond + # + # You can use logic if you want: + # + if 0: + blah + blah + # + # Ho hum + + 'b'# Expected:'u'# Expected:'b'## 'u'## 'b'Extract the test sources from a doctest docstring as a script. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the doc string with tests to be debugged. + 'u'Extract the test sources from a doctest docstring as a script. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the doc string with tests to be debugged. + 'b'not found in tests'u'not found in tests'b'Debug a single doctest docstring, in argument `src`''u'Debug a single doctest docstring, in argument `src`''b'Debug a test script. `src` is the script, as a string.'u'Debug a test script. `src` is the script, as a string.'b'exec(%r)'u'exec(%r)'b'Debug a single doctest docstring. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the docstring with tests to be debugged. + 'u'Debug a single doctest docstring. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the docstring with tests to be debugged. + 'b' + A pointless class, for sanity-checking of docstring testing. + + Methods: + square() + get() + + >>> _TestClass(13).get() + _TestClass(-12).get() + 1 + >>> hex(_TestClass(13).square().get()) + '0xa9' + 'u' + A pointless class, for sanity-checking of docstring testing. + + Methods: + square() + get() + + >>> _TestClass(13).get() + _TestClass(-12).get() + 1 + >>> hex(_TestClass(13).square().get()) + '0xa9' + 'b'val -> _TestClass object with associated value val. + + >>> t = _TestClass(123) + >>> print(t.get()) + 123 + 'u'val -> _TestClass object with associated value val. + + >>> t = _TestClass(123) + >>> print(t.get()) + 123 + 'b'square() -> square TestClass's associated value + + >>> _TestClass(13).square().get() + 169 + 'u'square() -> square TestClass's associated value + + >>> _TestClass(13).square().get() + 169 + 'b'get() -> return TestClass's associated value. + + >>> x = _TestClass(-42) + >>> print(x.get()) + -42 + 'u'get() -> return TestClass's associated value. + + >>> x = _TestClass(-42) + >>> print(x.get()) + -42 + 'b'_TestClass'u'_TestClass'b' + Example of a string object, searched as-is. + >>> x = 1; y = 2 + >>> x + y, x * y + (3, 2) + 'u' + Example of a string object, searched as-is. + >>> x = 1; y = 2 + >>> x + y, x * y + (3, 2) + 'b' + In 2.2, boolean expressions displayed + 0 or 1. By default, we still accept + them. This can be disabled by passing + DONT_ACCEPT_TRUE_FOR_1 to the new + optionflags argument. + >>> 4 == 4 + 1 + >>> 4 == 4 + True + >>> 4 > 4 + 0 + >>> 4 > 4 + False + 'u' + In 2.2, boolean expressions displayed + 0 or 1. By default, we still accept + them. This can be disabled by passing + DONT_ACCEPT_TRUE_FOR_1 to the new + optionflags argument. + >>> 4 == 4 + 1 + >>> 4 == 4 + True + >>> 4 > 4 + 0 + >>> 4 > 4 + False + 'b'bool-int equivalence'u'bool-int equivalence'b' + Blank lines can be marked with : + >>> print('foo\n\nbar\n') + foo + + bar + + 'u' + Blank lines can be marked with : + >>> print('foo\n\nbar\n') + foo + + bar + + 'b'blank lines'u'blank lines'b' + If the ellipsis flag is used, then '...' can be used to + elide substrings in the desired output: + >>> print(list(range(1000))) #doctest: +ELLIPSIS + [0, 1, 2, ..., 999] + 'u' + If the ellipsis flag is used, then '...' can be used to + elide substrings in the desired output: + >>> print(list(range(1000))) #doctest: +ELLIPSIS + [0, 1, 2, ..., 999] + 'b'ellipsis'u'ellipsis'b' + If the whitespace normalization flag is used, then + differences in whitespace are ignored. + >>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29] + 'u' + If the whitespace normalization flag is used, then + differences in whitespace are ignored. + >>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29] + 'b'whitespace normalization'u'whitespace normalization'b'doctest runner'u'doctest runner'b'--verbose'u'--verbose'b'print very verbose output for all tests'u'print very verbose output for all tests'b'-o'u'-o'b'--option'u'--option'b'specify a doctest option flag to apply to the test run; may be specified more than once to apply multiple options'u'specify a doctest option flag to apply to the test run; may be specified more than once to apply multiple options'b'-f'u'-f'b'--fail-fast'u'--fail-fast'b'stop running tests after first failure (this is a shorthand for -o FAIL_FAST, and is in addition to any other -o options)'u'stop running tests after first failure (this is a shorthand for -o FAIL_FAST, and is in addition to any other -o options)'b'file'u'file'b'file containing the tests to run'u'file containing the tests to run'u'doctest'Parser driver. + +This provides a high-level interface to parse a file into a syntax tree. + +Guido van Rossum Driverload_grammarpkgutilpgenconvertparse_tokensParse a series of tokens and return the syntax tree.setupline_textquintuples_linenos_columnCOMMENTOP%s %r (prefix=%r)tok_nameaddtokenStop.incomplete inputparse_stream_rawParse a stream and return the syntax tree.generate_tokensparse_streamparse_fileParse a file and return the syntax tree.parse_stringParse a string and return the syntax tree._generate_pickle_name.txt.pickleGrammar.txtgpLoad the grammar (maybe from a pickle)._newerGenerating grammar tables from %sgenerate_grammarWriting grammar tables to %sWriting failed: %sGrammarInquire whether file a was written since file b.getmtimeload_packaged_grammargrammar_sourceNormally, loads a pickled grammar by doing + pkgutil.get_data(package, pickled_grammar) + where *pickled_grammar* is computed from *grammar_source* by adding the + Python version and using a ``.pickle`` extension. + + However, if *grammar_source* is an extant file, load_grammar(grammar_source) + is called instead. This facilitates using a packaged grammar file when needed + but preserves load_grammar's automatic regeneration behavior when possible. + + pickled_nameMain program, when run as a script: produce grammar pickle files. + + Calls load_grammar for each argument, a path to a grammar text file. + # Modifications:# Copyright 2006 Google, Inc. All Rights Reserved.# Python imports# Pgen imports# XXX Move the prefix computation into a wrapper around tokenize.# We never broke out -- EOF is too soon (how can this happen???)b'Parser driver. + +This provides a high-level interface to parse a file into a syntax tree. + +'u'Parser driver. + +This provides a high-level interface to parse a file into a syntax tree. + +'b'Guido van Rossum 'u'Guido van Rossum 'b'Driver'u'Driver'b'load_grammar'u'load_grammar'b'Parse a series of tokens and return the syntax tree.'u'Parse a series of tokens and return the syntax tree.'b'%s %r (prefix=%r)'u'%s %r (prefix=%r)'b'Stop.'u'Stop.'b'incomplete input'u'incomplete input'b'Parse a stream and return the syntax tree.'u'Parse a stream and return the syntax tree.'b'Parse a file and return the syntax tree.'u'Parse a file and return the syntax tree.'b'Parse a string and return the syntax tree.'u'Parse a string and return the syntax tree.'b'.txt'u'.txt'b'.pickle'u'.pickle'b'Grammar.txt'u'Grammar.txt'b'Load the grammar (maybe from a pickle).'u'Load the grammar (maybe from a pickle).'b'Generating grammar tables from %s'u'Generating grammar tables from %s'b'Writing grammar tables to %s'u'Writing grammar tables to %s'b'Writing failed: %s'u'Writing failed: %s'b'Inquire whether file a was written since file b.'u'Inquire whether file a was written since file b.'b'Normally, loads a pickled grammar by doing + pkgutil.get_data(package, pickled_grammar) + where *pickled_grammar* is computed from *grammar_source* by adding the + Python version and using a ``.pickle`` extension. + + However, if *grammar_source* is an extant file, load_grammar(grammar_source) + is called instead. This facilitates using a packaged grammar file when needed + but preserves load_grammar's automatic regeneration behavior when possible. + + 'u'Normally, loads a pickled grammar by doing + pkgutil.get_data(package, pickled_grammar) + where *pickled_grammar* is computed from *grammar_source* by adding the + Python version and using a ``.pickle`` extension. + + However, if *grammar_source* is an extant file, load_grammar(grammar_source) + is called instead. This facilitates using a packaged grammar file when needed + but preserves load_grammar's automatic regeneration behavior when possible. + + 'b'Main program, when run as a script: produce grammar pickle files. + + Calls load_grammar for each argument, a path to a grammar text file. + 'u'Main program, when run as a script: produce grammar pickle files. + + Calls load_grammar for each argument, a path to a grammar text file. + 'u'lib2to3.pgen2.driver'u'pgen2.driver'u'driver' +dyld emulation +ctypes.macholib.frameworkframework_infoctypes.macholib.dylibdylib_infodyld_findframework_findexpanduser~/Library/Frameworks/Library/Frameworks/Network/Library/Frameworks/System/Library/FrameworksDEFAULT_FRAMEWORK_FALLBACK~/lib/usr/local/lib/usr/libDEFAULT_LIBRARY_FALLBACKdyld_envrvaldyld_image_suffixDYLD_IMAGE_SUFFIXdyld_framework_pathDYLD_FRAMEWORK_PATHdyld_library_pathDYLD_LIBRARY_PATHdyld_fallback_framework_pathDYLD_FALLBACK_FRAMEWORK_PATHdyld_fallback_library_pathDYLD_FALLBACK_LIBRARY_PATHdyld_image_suffix_searchFor a potential path iterator, add DYLD_IMAGE_SUFFIX semantics_inject.dylibdyld_override_searchframeworkdyld_executable_path_searchexecutable_path@executable_path/dyld_default_searchfallback_framework_pathfallback_library_path + Find a library or framework using dyld semantics + dylib %s could not be found + Find a framework using dyld semantics in a very loose manner. + + Will take input such as: + Python + Python.framework + Python.framework/Versions/Current + .frameworkfmwk_indextest_dyld_findlibSystem.dylib/usr/lib/libSystem.dylibSystem.framework/System/System/Library/Frameworks/System.framework/System# These are the defaults as per man dyld(1)# If DYLD_FRAMEWORK_PATH is set and this dylib_name is a# framework name, use the first file that exists in the framework# path if any. If there is none go on to search the DYLD_LIBRARY_PATH# if any.# If DYLD_LIBRARY_PATH is set then use the first file that exists# in the path. If none use the original name.# If we haven't done any searching and found a library and the# dylib_name starts with "@executable_path/" then construct the# library name.b' +dyld emulation +'u' +dyld emulation +'b'dyld_find'u'dyld_find'b'framework_find'u'framework_find'b'framework_info'u'framework_info'b'dylib_info'u'dylib_info'b'~/Library/Frameworks'u'~/Library/Frameworks'b'/Library/Frameworks'u'/Library/Frameworks'b'/Network/Library/Frameworks'u'/Network/Library/Frameworks'b'/System/Library/Frameworks'u'/System/Library/Frameworks'b'~/lib'u'~/lib'b'/usr/local/lib'u'/usr/local/lib'b'/usr/lib'u'/usr/lib'b'DYLD_IMAGE_SUFFIX'u'DYLD_IMAGE_SUFFIX'b'DYLD_FRAMEWORK_PATH'u'DYLD_FRAMEWORK_PATH'b'DYLD_LIBRARY_PATH'u'DYLD_LIBRARY_PATH'b'DYLD_FALLBACK_FRAMEWORK_PATH'u'DYLD_FALLBACK_FRAMEWORK_PATH'b'DYLD_FALLBACK_LIBRARY_PATH'u'DYLD_FALLBACK_LIBRARY_PATH'b'For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics'u'For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics'b'.dylib'u'.dylib'b'@executable_path/'u'@executable_path/'b' + Find a library or framework using dyld semantics + 'u' + Find a library or framework using dyld semantics + 'b'dylib %s could not be found'u'dylib %s could not be found'b' + Find a framework using dyld semantics in a very loose manner. + + Will take input such as: + Python + Python.framework + Python.framework/Versions/Current + 'u' + Find a framework using dyld semantics in a very loose manner. + + Will take input such as: + Python + Python.framework + Python.framework/Versions/Current + 'b'.framework'u'.framework'b'libSystem.dylib'u'libSystem.dylib'b'/usr/lib/libSystem.dylib'u'/usr/lib/libSystem.dylib'b'System.framework/System'u'System.framework/System'b'/System/Library/Frameworks/System.framework/System'u'/System/Library/Frameworks/System.framework/System'u'ctypes.macholib.dyld'u'macholib.dyld'u'dyld' +Generic dylib path manipulation +(?x) +(?P^.*)(?:^|/) +(?P + (?P\w+?) + (?:\.(?P[^._]+))? + (?:_(?P[^._]+))? + \.dylib$ +) +DYLIB_RE + A dylib name can take one of the following four forms: + Location/Name.SomeVersion_Suffix.dylib + Location/Name.SomeVersion.dylib + Location/Name_Suffix.dylib + Location/Name.dylib + + returns None if not found or a mapping equivalent to: + dict( + location='Location', + name='Name.SomeVersion_Suffix.dylib', + shortname='Name', + version='SomeVersion', + suffix='Suffix', + ) + + Note that SomeVersion and Suffix are optional and may be None + if not present. + is_dylibtest_dylib_infoshortnamecompletely/invalidcompletely/invalide_debugP/Foo.dylibFoo.dylibFooP/Foo_debug.dylibFoo_debug.dylibP/Foo.A.dylibFoo.A.dylibP/Foo_debug.A.dylibFoo_debug.A.dylibFoo_debugP/Foo.A_debug.dylibFoo.A_debug.dylibb' +Generic dylib path manipulation +'u' +Generic dylib path manipulation +'b'(?x) +(?P^.*)(?:^|/) +(?P + (?P\w+?) + (?:\.(?P[^._]+))? + (?:_(?P[^._]+))? + \.dylib$ +) +'u'(?x) +(?P^.*)(?:^|/) +(?P + (?P\w+?) + (?:\.(?P[^._]+))? + (?:_(?P[^._]+))? + \.dylib$ +) +'b' + A dylib name can take one of the following four forms: + Location/Name.SomeVersion_Suffix.dylib + Location/Name.SomeVersion.dylib + Location/Name_Suffix.dylib + Location/Name.dylib + + returns None if not found or a mapping equivalent to: + dict( + location='Location', + name='Name.SomeVersion_Suffix.dylib', + shortname='Name', + version='SomeVersion', + suffix='Suffix', + ) + + Note that SomeVersion and Suffix are optional and may be None + if not present. + 'u' + A dylib name can take one of the following four forms: + Location/Name.SomeVersion_Suffix.dylib + Location/Name.SomeVersion.dylib + Location/Name_Suffix.dylib + Location/Name.dylib + + returns None if not found or a mapping equivalent to: + dict( + location='Location', + name='Name.SomeVersion_Suffix.dylib', + shortname='Name', + version='SomeVersion', + suffix='Suffix', + ) + + Note that SomeVersion and Suffix are optional and may be None + if not present. + 'b'completely/invalid'u'completely/invalid'b'completely/invalide_debug'u'completely/invalide_debug'b'P/Foo.dylib'u'P/Foo.dylib'b'Foo.dylib'u'Foo.dylib'b'Foo'u'Foo'b'P/Foo_debug.dylib'u'P/Foo_debug.dylib'b'Foo_debug.dylib'u'Foo_debug.dylib'b'P/Foo.A.dylib'u'P/Foo.A.dylib'b'Foo.A.dylib'u'Foo.A.dylib'b'P/Foo_debug.A.dylib'u'P/Foo_debug.A.dylib'b'Foo_debug.A.dylib'u'Foo_debug.A.dylib'b'Foo_debug'u'Foo_debug'b'P/Foo.A_debug.dylib'u'P/Foo.A_debug.dylib'b'Foo.A_debug.dylib'u'Foo.A_debug.dylib'u'ctypes.macholib.dylib'u'macholib.dylib'Encodings and related functions.encode_base64encode_noopencode_quopri_bencode_encodestring_qencodequotetabs=20Encode the message's payload in Base64. + + Also, add an appropriate Content-Transfer-Encoding header. + get_payloadencdataset_payloadContent-Transfer-EncodingEncode the message's payload in quoted-printable. + + Also, add an appropriate Content-Transfer-Encoding header. + Set the Content-Transfer-Encoding header to 7bit or 8bit.Do nothing.# Copyright (C) 2001-2006 Python Software Foundation# Must encode spaces, which quopri.encodestring() doesn't do# There's no payload. For backwards compatibility we use 7bit# We play a trick to make this go fast. If decoding from ASCII succeeds,# we know the data must be 7bit, otherwise treat it as 8bit.b'Encodings and related functions.'u'Encodings and related functions.'b'encode_7or8bit'u'encode_7or8bit'b'encode_base64'u'encode_base64'b'encode_noop'u'encode_noop'b'encode_quopri'u'encode_quopri'b'=20'b'Encode the message's payload in Base64. + + Also, add an appropriate Content-Transfer-Encoding header. + 'u'Encode the message's payload in Base64. + + Also, add an appropriate Content-Transfer-Encoding header. + 'b'Content-Transfer-Encoding'u'Content-Transfer-Encoding'b'Encode the message's payload in quoted-printable. + + Also, add an appropriate Content-Transfer-Encoding header. + 'u'Encode the message's payload in quoted-printable. + + Also, add an appropriate Content-Transfer-Encoding header. + 'b'Set the Content-Transfer-Encoding header to 7bit or 8bit.'u'Set the Content-Transfer-Encoding header to 7bit or 8bit.'b'Do nothing.'u'Do nothing.'u'email.encoders'HTML character entity references.name2codepointcodepoint2nameentitydefs1980x00c6AElig1930x00c1Aacute1940x00c2Acirc1920x00c0Agrave9130x0391Alpha1970x00c5Aring1950x00c3Atilde1960x00c4Auml9140x0392Beta1990x00c7Ccedil9350x03a7Chi82250x2021Dagger9160x0394Delta0x00d0ETH0x00c9Eacute0x00caEcirc0x00c8Egrave9170x0395Epsilon9190x0397Eta0x00cbEuml9150x0393Gamma0x00cdIacute0x00ceIcirc0x00ccIgrave9210x0399Iota0x00cfIuml9220x039aKappa9230x039b9240x039cMu2090x00d1Ntilde9250x039dNu3380x0152OElig2110x00d3Oacute2120x00d4Ocirc2100x00d2Ograve9370x03a9Omega9270x039fOmicron2160x00d8Oslash2130x00d5Otilde2140x00d6Ouml9340x03a6Phi9280x03a0Pi82430x2033Prime0x03a8Psi9290x03a1Rho3520x0160Scaron9310x03a3Sigma2220x00deTHORN0x03a4Tau9200x0398Theta2180x00daUacute2190x00dbUcirc2170x00d9Ugrave9330x03a5Upsilon2200x00dcUuml9260x039eXi2210x00ddYacute3760x0178Yuml9180x0396Zeta2250x00e1aacute0x00e2acirc1800x00b4acute2300x00e6aelig2240x00e0agrave85010x2135alefsym9450x03b10x0026amp87430x2227and87360x2220ang2290x00e5aring87760x2248asymp2270x00e3atilde2280x00e4auml82220x201ebdquo9460x03b21660x00a6brvbar82260x2022bull87450x2229cap0x00e7ccedil1840x00b8cedil1620x00a2cent9670x03c7chi7100x02c6circ98270x2663clubs87730x2245cong1690x00a986290x21b5crarr87460x222acup1640x00a4curren86590x21d3dArr82240x2020dagger85950x2193darr1760x00b0deg9480x03b498300x2666diams2470x00f72330x00e9eacute2340x00eaecirc2320x00e8egrave87090x220581950x2003emsp81940x2002ensp0x03b588010x2261equiv9510x03b7eta2400x00f0eth2350x00ebeuml83640x20aceuro87070x2203exist0x0192fnof87040x2200forall1890x00bdfrac121880x00bcfrac141900x00befrac3482600x2044frasl9470x03b3gamma88050x2265620x003e86600x21d4hArr85960x2194harr98290x2665hearts82300x2026hellip2370x00ediacute2380x00eeicirc1610x00a1iexcl2360x00ecigrave84650x211187340x221einfin87470x222b9530x03b9iota1910x00bfiquest87120x2208isin2390x00efiuml9540x03bakappa86560x21d0lArr9550x03bb90010x23291710x00ablaquo85920x2190larr89680x2308lceil82200x201cldquo88040x226489700x230alfloor87270x2217lowast96740x25caloz82060x200elrm82490x2039lsaquo82160x2018lsquo0x003c1750x00afmacr82120x2014mdash1810x00b51830x00b7middot87220x22129560x03bcmu87110x2207nabla1600x00a0nbsp82110x2013ndash88000x226087150x220bni1720x00ac87130x2209notin88360x2284nsub2410x00f1ntilde9570x03bdnu2430x00f3oacute2440x00f4ocirc3390x0153oelig2420x00f2ograve82540x203eoline9690x03c9omega9590x03bfomicron88530x2295oplus87440x2228or1700x00aaordf1860x00baordm2480x00f8oslash2450x00f5otilde88550x2297otimes2460x00f6ouml1820x00b6para87060x220282400x2030permil88690x22a5perp9660x03c6phi9600x03c09820x03d6piv1770x00b1plusmn1630x00a3pound82420x2032prime87190x220fprod87330x221dprop9680x03c8psi0x002286580x21d2rArr87300x221aradic90020x232arang1870x00bbraquo85940x2192rarr89690x2309rceil82210x201drdquo84760x211c1740x00aereg89710x230brfloor9610x03c1rho82070x200frlm82500x203arsaquo82170x2019rsquo82180x201asbquo3530x0161scaron89010x22c5sdot1670x00a7sect1730x00adshy9630x03c3sigma9620x03c2sigmaf87640x223csim98240x2660spades88340x228288380x2286sube87210x221188350x22831850x00b9sup11780x00b2sup21790x00b3sup388390x2287supe2230x00dfszlig9640x03c4tau87560x2234there49520x03b8theta9770x03d1thetasym82010x2009thinsp2540x00fethorn7320x02dctilde2150x00d7times84820x2122trade86570x21d1uArr2500x00fauacute85930x2191uarr2510x00fbucirc2490x00f9ugrave1680x00a8uml9780x03d2upsih9650x03c5upsilon2520x00fcuuml84720x2118weierp9580x03bexi2530x00fdyacute1650x00a5yen0x00ffyuml0x03b6zeta82050x200dzwj82040x200czwnjÁáAacute;aacute;ĂAbreve;ăabreve;∾ac;∿acd;∾̳acE;ÂâAcirc;acirc;´acute;АAcy;аacy;ÆAElig;aelig;⁡af;Afr;afr;ÀàAgrave;agrave;ℵalefsym;aleph;ΑAlpha;αalpha;ĀAmacr;āamacr;⨿amalg;AMPAMP;amp;⩓And;∧and;⩕andand;⩜andd;⩘andslope;⩚andv;∠ang;⦤ange;angle;∡angmsd;⦨angmsdaa;⦩angmsdab;⦪angmsdac;⦫angmsdad;⦬angmsdae;⦭angmsdaf;⦮angmsdag;⦯angmsdah;∟angrt;⊾angrtvb;⦝angrtvbd;∢angsph;Åangst;⍼angzarr;ĄAogon;ąaogon;Aopf;aopf;≈ap;⩯apacir;⩰apE;≊ape;≋apid;apos;ApplyFunction;approx;approxeq;åAring;aring;Ascr;ascr;≔Assign;ast;asymp;≍asympeq;ÃãAtilde;atilde;ÄäAuml;auml;∳awconint;⨑awint;≌backcong;϶backepsilon;‵backprime;∽backsim;⋍backsimeq;∖Backslash;⫧Barv;⊽barvee;⌆Barwed;⌅barwed;barwedge;⎵bbrk;⎶bbrktbrk;bcong;БBcy;бbcy;bdquo;∵becaus;Because;because;⦰bemptyv;bepsi;ℬbernou;Bernoullis;ΒBeta;βbeta;ℶbeth;≬between;Bfr;bfr;⋂bigcap;◯bigcirc;⋃bigcup;⨀bigodot;⨁bigoplus;⨂bigotimes;⨆bigsqcup;★bigstar;▽bigtriangledown;△bigtriangleup;⨄biguplus;⋁bigvee;⋀bigwedge;⤍bkarow;⧫blacklozenge;▪blacksquare;▴blacktriangle;▾blacktriangledown;◂blacktriangleleft;▸blacktriangleright;␣blank;▒blk12;░blk14;▓blk34;█block;=⃥bne;≡⃥bnequiv;⫭bNot;⌐bnot;Bopf;bopf;⊥bot;bottom;⋈bowtie;⧉boxbox;╗boxDL;╖boxDl;╕boxdL;┐boxdl;╔boxDR;╓boxDr;╒boxdR;┌boxdr;═boxH;─boxh;╦boxHD;╤boxHd;╥boxhD;┬boxhd;╩boxHU;╧boxHu;╨boxhU;┴boxhu;⊟boxminus;⊞boxplus;⊠boxtimes;╝boxUL;╜boxUl;╛boxuL;┘boxul;╚boxUR;╙boxUr;╘boxuR;└boxur;║boxV;│boxv;╬boxVH;╫boxVh;╪boxvH;┼boxvh;╣boxVL;╢boxVl;╡boxvL;┤boxvl;╠boxVR;╟boxVr;╞boxvR;├boxvr;bprime;˘Breve;breve;¦brvbar;Bscr;bscr;⁏bsemi;bsim;bsime;bsol;⧅bsolb;⟈bsolhsub;bull;bullet;≎bump;⪮bumpE;≏bumpe;Bumpeq;bumpeq;ĆCacute;ćcacute;⋒Cap;∩cap;⩄capand;⩉capbrcup;⩋capcap;⩇capcup;⩀capdot;ⅅCapitalDifferentialD;∩︀caps;⁁caret;ˇcaron;ℭCayleys;⩍ccaps;ČCcaron;čccaron;ÇçCcedil;ccedil;ĈCcirc;ĉccirc;∰Cconint;⩌ccups;⩐ccupssm;ĊCdot;ċcdot;¸cedil;Cedilla;⦲cemptyv;¢cent;·CenterDot;centerdot;Cfr;cfr;ЧCHcy;чchcy;✓check;checkmark;ΧChi;χchi;○cir;circ;≗circeq;↺circlearrowleft;↻circlearrowright;⊛circledast;⊚circledcirc;⊝circleddash;⊙CircleDot;®circledR;ⓈcircledS;⊖CircleMinus;⊕CirclePlus;⊗CircleTimes;⧃cirE;cire;⨐cirfnint;⫯cirmid;⧂cirscir;∲ClockwiseContourIntegral;CloseCurlyDoubleQuote;CloseCurlyQuote;♣clubs;clubsuit;∷Colon;colon;⩴Colone;colone;coloneq;comma;commat;∁comp;∘compfn;complement;ℂcomplexes;≅cong;⩭congdot;≡Congruent;∯Conint;∮conint;ContourIntegral;Copf;copf;∐coprod;Coproduct;©COPYCOPY;copy;℗copysr;CounterClockwiseContourIntegral;↵crarr;⨯Cross;✗cross;Cscr;cscr;⫏csub;⫑csube;⫐csup;⫒csupe;⋯ctdot;⤸cudarrl;⤵cudarrr;⋞cuepr;⋟cuesc;↶cularr;⤽cularrp;⋓Cup;∪cup;⩈cupbrcap;CupCap;⩆cupcap;⩊cupcup;⊍cupdot;⩅cupor;∪︀cups;↷curarr;⤼curarrm;curlyeqprec;curlyeqsucc;⋎curlyvee;⋏curlywedge;¤curren;curvearrowleft;curvearrowright;cuvee;cuwed;cwconint;∱cwint;⌭cylcty;Dagger;dagger;ℸdaleth;↡Darr;⇓dArr;↓darr;‐dash;⫤Dashv;⊣dashv;⤏dbkarow;˝dblac;ĎDcaron;ďdcaron;ДDcy;дdcy;DD;ⅆdd;ddagger;⇊ddarr;⤑DDotrahd;⩷ddotseq;°deg;∇Del;ΔDelta;δdelta;⦱demptyv;⥿dfisht;Dfr;dfr;⥥dHar;⇃dharl;⇂dharr;DiacriticalAcute;˙DiacriticalDot;DiacriticalDoubleAcute;`DiacriticalGrave;DiacriticalTilde;⋄diam;Diamond;diamond;♦diamondsuit;diams;¨die;DifferentialD;ϝdigamma;⋲disin;÷div;divide;⋇divideontimes;divonx;ЂDJcy;ђdjcy;⌞dlcorn;⌍dlcrop;dollar;Dopf;dopf;Dot;dot;⃜DotDot;≐doteq;≑doteqdot;DotEqual;∸dotminus;∔dotplus;⊡dotsquare;doublebarwedge;DoubleContourIntegral;DoubleDot;DoubleDownArrow;⇐DoubleLeftArrow;⇔DoubleLeftRightArrow;DoubleLeftTee;⟸DoubleLongLeftArrow;⟺DoubleLongLeftRightArrow;⟹DoubleLongRightArrow;⇒DoubleRightArrow;⊨DoubleRightTee;⇑DoubleUpArrow;⇕DoubleUpDownArrow;∥DoubleVerticalBar;DownArrow;Downarrow;downarrow;⤓DownArrowBar;⇵DownArrowUpArrow;̑DownBreve;downdownarrows;downharpoonleft;downharpoonright;⥐DownLeftRightVector;⥞DownLeftTeeVector;↽DownLeftVector;⥖DownLeftVectorBar;⥟DownRightTeeVector;⇁DownRightVector;⥗DownRightVectorBar;⊤DownTee;↧DownTeeArrow;⤐drbkarow;⌟drcorn;⌌drcrop;Dscr;dscr;ЅDScy;ѕdscy;⧶dsol;ĐDstrok;đdstrok;⋱dtdot;▿dtri;dtrif;duarr;⥯duhar;⦦dwangle;ЏDZcy;џdzcy;⟿dzigrarr;ÉéEacute;eacute;⩮easter;ĚEcaron;ěecaron;≖ecir;ÊêEcirc;ecirc;≕ecolon;ЭEcy;эecy;eDDot;ĖEdot;eDot;ėedot;ⅇee;≒efDot;Efr;efr;⪚eg;ÈèEgrave;egrave;⪖egs;⪘egsdot;⪙el;∈Element;⏧elinters;ℓell;⪕els;⪗elsdot;ĒEmacr;ēemacr;∅empty;emptyset;◻EmptySmallSquare;emptyv;▫EmptyVerySmallSquare; emsp13; emsp14; emsp;ŊENG;ŋeng; ensp;ĘEogon;ęeogon;Eopf;eopf;⋕epar;⧣eparsl;⩱eplus;εepsi;ΕEpsilon;epsilon;ϵepsiv;eqcirc;eqcolon;≂eqsim;eqslantgtr;eqslantless;⩵Equal;equals;EqualTilde;≟equest;⇌Equilibrium;equiv;⩸equivDD;⧥eqvparsl;⥱erarr;≓erDot;ℰEscr;ℯescr;esdot;⩳Esim;esim;ΗEta;ηeta;ÐðETH;eth;ËëEuml;euml;euro;excl;∃exist;Exists;expectation;ExponentialE;exponentiale;fallingdotseq;ФFcy;фfcy;♀female;ffiffilig;fffflig;fflffllig;Ffr;ffr;fifilig;◼FilledSmallSquare;FilledVerySmallSquare;fjfjlig;♭flat;flfllig;▱fltns;fnof;Fopf;fopf;∀ForAll;forall;⋔fork;⫙forkv;ℱFouriertrf;⨍fpartint;½frac12;⅓frac13;¼frac14;⅕frac15;⅙frac16;⅛frac18;⅔frac23;⅖frac25;¾frac34;⅗frac35;⅜frac38;⅘frac45;⅚frac56;⅝frac58;⅞frac78;⁄frasl;⌢frown;Fscr;fscr;ǵgacute;ΓGamma;γgamma;ϜGammad;gammad;⪆gap;ĞGbreve;ğgbreve;ĢGcedil;ĜGcirc;ĝgcirc;ГGcy;гgcy;ĠGdot;ġgdot;≧gE;≥ge;⪌gEl;⋛gel;geq;geqq;⩾geqslant;ges;⪩gescc;⪀gesdot;⪂gesdoto;⪄gesdotol;⋛︀gesl;⪔gesles;Gfr;gfr;⋙Gg;≫gg;ggg;ℷgimel;ЃGJcy;ѓgjcy;≷gl;⪥gla;⪒glE;⪤glj;⪊gnap;gnapprox;≩gnE;⪈gne;gneq;gneqq;⋧gnsim;Gopf;gopf;grave;GreaterEqual;GreaterEqualLess;GreaterFullEqual;⪢GreaterGreater;GreaterLess;GreaterSlantEqual;≳GreaterTilde;Gscr;ℊgscr;gsim;⪎gsime;⪐gsiml;GTGT;Gt;gt;⪧gtcc;⩺gtcir;⋗gtdot;⦕gtlPar;⩼gtquest;gtrapprox;⥸gtrarr;gtrdot;gtreqless;gtreqqless;gtrless;gtrsim;≩︀gvertneqq;gvnE;Hacek; hairsp;half;ℋhamilt;ЪHARDcy;ъhardcy;hArr;↔harr;⥈harrcir;↭harrw;Hat;ℏhbar;ĤHcirc;ĥhcirc;♥hearts;heartsuit;hellip;⊹hercon;ℌHfr;hfr;HilbertSpace;⤥hksearow;⤦hkswarow;⇿hoarr;∻homtht;↩hookleftarrow;↪hookrightarrow;ℍHopf;hopf;―horbar;HorizontalLine;Hscr;hscr;hslash;ĦHstrok;ħhstrok;HumpDownHump;HumpEqual;⁃hybull;hyphen;ÍíIacute;iacute;⁣ic;ÎîIcirc;icirc;ИIcy;иicy;Idot;ЕIEcy;еiecy;¡iexcl;iff;ℑIfr;ifr;ÌìIgrave;igrave;ⅈii;⨌iiiint;∭iiint;⧜iinfin;℩iiota;IJIJlig;ijijlig;Im;ĪImacr;īimacr;image;ImaginaryI;ℐimagline;imagpart;ıimath;⊷imof;Ƶimped;Implies;in;℅incare;∞infin;⧝infintie;inodot;∬Int;∫int;⊺intcal;ℤintegers;Integral;intercal;Intersection;⨗intlarhk;⨼intprod;InvisibleComma;⁢InvisibleTimes;ЁIOcy;ёiocy;ĮIogon;įiogon;Iopf;iopf;ΙIota;ιiota;iprod;¿iquest;Iscr;iscr;isin;⋵isindot;⋹isinE;⋴isins;⋳isinsv;isinv;it;ĨItilde;ĩitilde;ІIukcy;іiukcy;ÏïIuml;iuml;ĴJcirc;ĵjcirc;ЙJcy;йjcy;Jfr;jfr;ȷjmath;Jopf;jopf;Jscr;jscr;ЈJsercy;јjsercy;ЄJukcy;єjukcy;ΚKappa;κkappa;ϰkappav;ĶKcedil;ķkcedil;Kcy;кkcy;Kfr;kfr;ĸkgreen;ХKHcy;хkhcy;ЌKJcy;ќkjcy;Kopf;kopf;Kscr;kscr;⇚lAarr;ĹLacute;ĺlacute;⦴laemptyv;ℒlagran;ΛLambda;λlambda;⟪Lang;⟨lang;⦑langd;langle;⪅lap;Laplacetrf;«laquo;↞Larr;lArr;←larr;⇤larrb;⤟larrbfs;⤝larrfs;larrhk;↫larrlp;⤹larrpl;⥳larrsim;↢larrtl;⪫lat;⤛lAtail;⤙latail;⪭late;⪭︀lates;⤎lBarr;⤌lbarr;❲lbbrk;lbrace;lbrack;⦋lbrke;⦏lbrksld;⦍lbrkslu;ĽLcaron;ľlcaron;ĻLcedil;ļlcedil;⌈lceil;lcub;ЛLcy;лlcy;⤶ldca;ldquo;ldquor;⥧ldrdhar;⥋ldrushar;↲ldsh;≦lE;≤le;LeftAngleBracket;LeftArrow;Leftarrow;leftarrow;LeftArrowBar;⇆LeftArrowRightArrow;leftarrowtail;LeftCeiling;⟦LeftDoubleBracket;⥡LeftDownTeeVector;LeftDownVector;⥙LeftDownVectorBar;⌊LeftFloor;leftharpoondown;↼leftharpoonup;⇇leftleftarrows;LeftRightArrow;Leftrightarrow;leftrightarrow;leftrightarrows;⇋leftrightharpoons;leftrightsquigarrow;⥎LeftRightVector;LeftTee;↤LeftTeeArrow;⥚LeftTeeVector;⋋leftthreetimes;⊲LeftTriangle;⧏LeftTriangleBar;⊴LeftTriangleEqual;⥑LeftUpDownVector;⥠LeftUpTeeVector;↿LeftUpVector;⥘LeftUpVectorBar;LeftVector;⥒LeftVectorBar;⪋lEg;⋚leg;leq;leqq;⩽leqslant;les;⪨lescc;⩿lesdot;⪁lesdoto;⪃lesdotor;⋚︀lesg;⪓lesges;lessapprox;⋖lessdot;lesseqgtr;lesseqqgtr;LessEqualGreater;LessFullEqual;≶LessGreater;lessgtr;⪡LessLess;≲lesssim;LessSlantEqual;LessTilde;⥼lfisht;lfloor;Lfr;lfr;lg;⪑lgE;⥢lHar;lhard;lharu;⥪lharul;▄lhblk;ЉLJcy;љljcy;⋘Ll;≪ll;llarr;llcorner;Lleftarrow;⥫llhard;◺lltri;ĿLmidot;ŀlmidot;⎰lmoust;lmoustache;⪉lnap;lnapprox;≨lnE;⪇lne;lneq;lneqq;⋦lnsim;⟬loang;⇽loarr;lobrk;⟵LongLeftArrow;Longleftarrow;longleftarrow;⟷LongLeftRightArrow;Longleftrightarrow;longleftrightarrow;⟼longmapsto;⟶LongRightArrow;Longrightarrow;longrightarrow;looparrowleft;↬looparrowright;⦅lopar;Lopf;lopf;⨭loplus;⨴lotimes;∗lowast;lowbar;↙LowerLeftArrow;↘LowerRightArrow;◊loz;lozenge;lozf;lpar;⦓lparlt;lrarr;lrcorner;lrhar;⥭lrhard;‎lrm;⊿lrtri;lsaquo;Lscr;lscr;↰Lsh;lsh;lsim;⪍lsime;⪏lsimg;lsqb;lsquo;lsquor;Lstrok;łlstrok;LTLT;Lt;lt;⪦ltcc;⩹ltcir;ltdot;lthree;⋉ltimes;⥶ltlarr;⩻ltquest;◃ltri;ltrie;ltrif;⦖ltrPar;⥊lurdshar;⥦luruhar;≨︀lvertneqq;lvnE;¯macr;♂male;✠malt;maltese;⤅Map;↦map;mapsto;mapstodown;mapstoleft;↥mapstoup;▮marker;⨩mcomma;МMcy;мmcy;mdash;∺mDDot;measuredangle; MediumSpace;ℳMellintrf;Mfr;mfr;℧mho;µmicro;∣mid;midast;⫰midcir;middot;−minus;minusb;minusd;⨪minusdu;∓MinusPlus;⫛mlcp;mldr;mnplus;⊧models;Mopf;mopf;mp;Mscr;mscr;mstpos;ΜMu;μmu;⊸multimap;mumap;nabla;ŃNacute;ńnacute;∠⃒nang;≉nap;⩰̸napE;≋̸napid;ʼnnapos;napprox;♮natur;natural;ℕnaturals;nbsp;≎̸nbump;≏̸nbumpe;⩃ncap;ŇNcaron;ňncaron;ŅNcedil;ņncedil;≇ncong;⩭̸ncongdot;⩂ncup;НNcy;нncy;ndash;≠ne;⤤nearhk;⇗neArr;↗nearr;nearrow;≐̸nedot;​NegativeMediumSpace;NegativeThickSpace;NegativeThinSpace;NegativeVeryThinSpace;≢nequiv;⤨nesear;≂̸nesim;NestedGreaterGreater;NestedLessLess;NewLine;∄nexist;nexists;Nfr;nfr;≧̸ngE;≱nge;ngeq;ngeqq;⩾̸ngeqslant;nges;⋙̸nGg;≵ngsim;≫⃒nGt;≯ngt;ngtr;≫̸nGtv;⇎nhArr;↮nharr;⫲nhpar;∋ni;⋼nis;⋺nisd;niv;ЊNJcy;њnjcy;⇍nlArr;↚nlarr;‥nldr;≦̸nlE;≰nle;nLeftarrow;nleftarrow;nLeftrightarrow;nleftrightarrow;nleq;nleqq;⩽̸nleqslant;nles;≮nless;⋘̸nLl;≴nlsim;≪⃒nLt;nlt;⋪nltri;⋬nltrie;≪̸nLtv;∤nmid;⁠NoBreak;NonBreakingSpace;Nopf;nopf;¬⫬Not;not;NotCongruent;≭NotCupCap;∦NotDoubleVerticalBar;∉NotElement;NotEqual;NotEqualTilde;NotExists;NotGreater;NotGreaterEqual;NotGreaterFullEqual;NotGreaterGreater;≹NotGreaterLess;NotGreaterSlantEqual;NotGreaterTilde;NotHumpDownHump;NotHumpEqual;notin;⋵̸notindot;⋹̸notinE;notinva;⋷notinvb;⋶notinvc;NotLeftTriangle;⧏̸NotLeftTriangleBar;NotLeftTriangleEqual;NotLess;NotLessEqual;≸NotLessGreater;NotLessLess;NotLessSlantEqual;NotLessTilde;⪢̸NotNestedGreaterGreater;⪡̸NotNestedLessLess;∌notni;notniva;⋾notnivb;⋽notnivc;⊀NotPrecedes;⪯̸NotPrecedesEqual;⋠NotPrecedesSlantEqual;NotReverseElement;⋫NotRightTriangle;⧐̸NotRightTriangleBar;⋭NotRightTriangleEqual;⊏̸NotSquareSubset;⋢NotSquareSubsetEqual;⊐̸NotSquareSuperset;⋣NotSquareSupersetEqual;⊂⃒NotSubset;⊈NotSubsetEqual;⊁NotSucceeds;⪰̸NotSucceedsEqual;⋡NotSucceedsSlantEqual;≿̸NotSucceedsTilde;⊃⃒NotSuperset;⊉NotSupersetEqual;≁NotTilde;≄NotTildeEqual;NotTildeFullEqual;NotTildeTilde;NotVerticalBar;npar;nparallel;⫽⃥nparsl;∂̸npart;⨔npolint;npr;nprcue;npre;nprec;npreceq;⇏nrArr;↛nrarr;⤳̸nrarrc;↝̸nrarrw;nRightarrow;nrightarrow;nrtri;nrtrie;nsc;nsccue;nsce;Nscr;nscr;nshortmid;nshortparallel;nsim;nsime;nsimeq;nsmid;nspar;nsqsube;nsqsupe;⊄nsub;⫅̸nsubE;nsube;nsubset;nsubseteq;nsubseteqq;nsucc;nsucceq;⊅nsup;⫆̸nsupE;nsupe;nsupset;nsupseteq;nsupseteqq;ntgl;ÑñNtilde;ntilde;ntlg;ntriangleleft;ntrianglelefteq;ntriangleright;ntrianglerighteq;ΝNu;νnu;num;№numero; numsp;≍⃒nvap;⊯nVDash;⊮nVdash;⊭nvDash;⊬nvdash;≥⃒nvge;>⃒nvgt;⤄nvHarr;⧞nvinfin;⤂nvlArr;≤⃒nvle;<⃒nvlt;⊴⃒nvltrie;⤃nvrArr;⊵⃒nvrtrie;∼⃒nvsim;⤣nwarhk;⇖nwArr;↖nwarr;nwarrow;⤧nwnear;ÓóOacute;oacute;oast;ocir;ÔôOcirc;ocirc;ОOcy;оocy;odash;ŐOdblac;őodblac;⨸odiv;odot;⦼odsold;OElig;oelig;⦿ofcir;Ofr;ofr;˛ogon;ÒòOgrave;ograve;⧁ogt;⦵ohbar;Ωohm;oint;olarr;⦾olcir;⦻olcross;‾oline;⧀olt;ŌOmacr;ōomacr;Omega;ωomega;ΟOmicron;οomicron;⦶omid;ominus;Oopf;oopf;⦷opar;OpenCurlyDoubleQuote;OpenCurlyQuote;⦹operp;oplus;⩔Or;∨or;orarr;⩝ord;ℴorder;orderof;ªordf;ºordm;⊶origof;⩖oror;⩗orslope;⩛orv;oS;Oscr;oscr;ØøOslash;oslash;⊘osol;ÕõOtilde;otilde;⨷Otimes;otimes;⨶otimesas;ÖöOuml;ouml;⌽ovbar;OverBar;⏞OverBrace;⎴OverBracket;⏜OverParenthesis;par;¶para;parallel;⫳parsim;⫽parsl;∂part;PartialD;ПPcy;пpcy;percnt;period;permil;perp;‱pertenk;Pfr;pfr;ΦPhi;phi;ϕphiv;phmmat;☎phone;ΠPi;πpi;pitchfork;ϖpiv;planck;ℎplanckh;plankv;plus;⨣plusacir;plusb;⨢pluscir;plusdo;⨥plusdu;⩲pluse;±PlusMinus;plusmn;⨦plussim;⨧plustwo;pm;Poincareplane;⨕pointint;ℙPopf;popf;£pound;⪻Pr;≺pr;⪷prap;≼prcue;⪳prE;⪯pre;prec;precapprox;preccurlyeq;Precedes;PrecedesEqual;PrecedesSlantEqual;≾PrecedesTilde;preceq;⪹precnapprox;⪵precneqq;⋨precnsim;precsim;″Prime;′prime;primes;prnap;prnE;prnsim;∏prod;Product;⌮profalar;⌒profline;⌓profsurf;∝prop;Proportion;Proportional;propto;prsim;⊰prurel;Pscr;pscr;ΨPsi;ψpsi; puncsp;Qfr;qfr;qint;ℚQopf;qopf;⁗qprime;Qscr;qscr;quaternions;⨖quatint;quest;questeq;QUOTQUOT;quot;⇛rAarr;∽̱race;ŔRacute;ŕracute;√radic;⦳raemptyv;⟫Rang;⟩rang;⦒rangd;⦥range;rangle;»raquo;↠Rarr;rArr;→rarr;⥵rarrap;⇥rarrb;⤠rarrbfs;⤳rarrc;⤞rarrfs;rarrhk;rarrlp;⥅rarrpl;⥴rarrsim;⤖Rarrtl;↣rarrtl;↝rarrw;⤜rAtail;⤚ratail;∶ratio;rationals;RBarr;rBarr;rbarr;❳rbbrk;rbrace;rbrack;⦌rbrke;⦎rbrksld;⦐rbrkslu;ŘRcaron;řrcaron;ŖRcedil;ŗrcedil;⌉rceil;rcub;РRcy;рrcy;⤷rdca;⥩rdldhar;rdquo;rdquor;↳rdsh;ℜRe;real;ℛrealine;realpart;ℝreals;▭rect;REGREG;reg;ReverseElement;ReverseEquilibrium;ReverseUpEquilibrium;⥽rfisht;⌋rfloor;Rfr;rfr;⥤rHar;rhard;⇀rharu;⥬rharul;ΡRho;ρrho;ϱrhov;RightAngleBracket;RightArrow;Rightarrow;rightarrow;RightArrowBar;⇄RightArrowLeftArrow;rightarrowtail;RightCeiling;⟧RightDoubleBracket;⥝RightDownTeeVector;RightDownVector;⥕RightDownVectorBar;RightFloor;rightharpoondown;rightharpoonup;rightleftarrows;rightleftharpoons;⇉rightrightarrows;rightsquigarrow;⊢RightTee;RightTeeArrow;⥛RightTeeVector;⋌rightthreetimes;⊳RightTriangle;⧐RightTriangleBar;⊵RightTriangleEqual;⥏RightUpDownVector;⥜RightUpTeeVector;↾RightUpVector;⥔RightUpVectorBar;RightVector;⥓RightVectorBar;˚ring;risingdotseq;rlarr;rlhar;‏rlm;⎱rmoust;rmoustache;⫮rnmid;⟭roang;⇾roarr;robrk;⦆ropar;Ropf;ropf;⨮roplus;⨵rotimes;⥰RoundImplies;rpar;⦔rpargt;⨒rppolint;rrarr;Rrightarrow;rsaquo;Rscr;rscr;↱Rsh;rsh;rsqb;rsquo;rsquor;rthree;⋊rtimes;▹rtri;rtrie;rtrif;⧎rtriltri;⧴RuleDelayed;⥨ruluhar;℞rx;ŚSacute;śsacute;sbquo;⪼Sc;≻sc;⪸scap;Scaron;scaron;≽sccue;⪴scE;⪰sce;ŞScedil;şscedil;ŜScirc;ŝscirc;⪺scnap;⪶scnE;⋩scnsim;⨓scpolint;≿scsim;СScy;сscy;⋅sdot;sdotb;⩦sdote;searhk;⇘seArr;searr;searrow;§sect;semi;⤩seswar;setminus;setmn;✶sext;Sfr;sfr;sfrown;♯sharp;ЩSHCHcy;щshchcy;ШSHcy;шshcy;ShortDownArrow;ShortLeftArrow;shortmid;shortparallel;ShortRightArrow;↑ShortUpArrow;­shy;ΣSigma;σsigma;ςsigmaf;sigmav;∼sim;⩪simdot;≃sime;simeq;⪞simg;⪠simgE;⪝siml;⪟simlE;≆simne;⨤simplus;⥲simrarr;slarr;SmallCircle;smallsetminus;⨳smashp;⧤smeparsl;smid;⌣smile;⪪smt;⪬smte;⪬︀smtes;ЬSOFTcy;ьsoftcy;sol;⧄solb;⌿solbar;Sopf;sopf;♠spades;spadesuit;spar;⊓sqcap;⊓︀sqcaps;⊔sqcup;⊔︀sqcups;Sqrt;⊏sqsub;⊑sqsube;sqsubset;sqsubseteq;⊐sqsup;⊒sqsupe;sqsupset;sqsupseteq;□squ;Square;square;SquareIntersection;SquareSubset;SquareSubsetEqual;SquareSuperset;SquareSupersetEqual;SquareUnion;squarf;squf;srarr;Sscr;sscr;ssetmn;ssmile;⋆sstarf;Star;☆star;starf;straightepsilon;straightphi;strns;⋐Sub;⊂sub;⪽subdot;⫅subE;⊆sube;⫃subedot;⫁submult;⫋subnE;⊊subne;⪿subplus;⥹subrarr;Subset;subset;subseteq;subseteqq;SubsetEqual;subsetneq;subsetneqq;⫇subsim;⫕subsub;⫓subsup;succ;succapprox;succcurlyeq;Succeeds;SucceedsEqual;SucceedsSlantEqual;SucceedsTilde;succeq;succnapprox;succneqq;succnsim;succsim;SuchThat;∑Sum;sum;♪sung;¹sup1;²sup2;³sup3;⋑Sup;⊃sup;⪾supdot;⫘supdsub;⫆supE;⊇supe;⫄supedot;Superset;SupersetEqual;⟉suphsol;⫗suphsub;⥻suplarr;⫂supmult;⫌supnE;⊋supne;⫀supplus;Supset;supset;supseteq;supseteqq;supsetneq;supsetneqq;⫈supsim;⫔supsub;⫖supsup;swarhk;⇙swArr;swarr;swarrow;⤪swnwar;ßszlig;Tab;⌖target;ΤTau;τtau;tbrk;ŤTcaron;ťtcaron;ŢTcedil;ţtcedil;ТTcy;тtcy;⃛tdot;⌕telrec;Tfr;tfr;∴there4;Therefore;therefore;ΘTheta;θtheta;ϑthetasym;thetav;thickapprox;thicksim;  ThickSpace; thinsp;ThinSpace;thkap;thksim;ÞþTHORN;thorn;Tilde;tilde;TildeEqual;TildeFullEqual;TildeTilde;×times;timesb;⨱timesbar;⨰timesd;tint;toea;top;⌶topbot;⫱topcir;Topf;topf;⫚topfork;tosa;‴tprime;TRADE;trade;▵triangle;triangledown;triangleleft;trianglelefteq;≜triangleq;triangleright;trianglerighteq;◬tridot;trie;⨺triminus;TripleDot;⨹triplus;⧍trisb;⨻tritime;⏢trpezium;Tscr;tscr;ЦTScy;цtscy;ЋTSHcy;ћtshcy;ŦTstrok;ŧtstrok;twixt;twoheadleftarrow;twoheadrightarrow;ÚúUacute;uacute;↟Uarr;uArr;uarr;⥉Uarrocir;ЎUbrcy;ўubrcy;ŬUbreve;ŭubreve;ÛûUcirc;ucirc;УUcy;уucy;⇅udarr;ŰUdblac;űudblac;⥮udhar;⥾ufisht;Ufr;ufr;ÙùUgrave;ugrave;⥣uHar;uharl;uharr;▀uhblk;⌜ulcorn;ulcorner;⌏ulcrop;◸ultri;ŪUmacr;ūumacr;uml;UnderBar;⏟UnderBrace;UnderBracket;⏝UnderParenthesis;Union;⊎UnionPlus;ŲUogon;ųuogon;Uopf;uopf;UpArrow;Uparrow;uparrow;⤒UpArrowBar;UpArrowDownArrow;↕UpDownArrow;Updownarrow;updownarrow;UpEquilibrium;upharpoonleft;upharpoonright;uplus;UpperLeftArrow;UpperRightArrow;ϒUpsi;υupsi;upsih;ΥUpsilon;upsilon;UpTee;UpTeeArrow;⇈upuparrows;⌝urcorn;urcorner;⌎urcrop;ŮUring;ůuring;◹urtri;Uscr;uscr;⋰utdot;ŨUtilde;ũutilde;utri;utrif;uuarr;ÜüUuml;uuml;⦧uwangle;⦜vangrt;varepsilon;varkappa;varnothing;varphi;varpi;varpropto;vArr;varr;varrho;varsigma;⊊︀varsubsetneq;⫋︀varsubsetneqq;⊋︀varsupsetneq;⫌︀varsupsetneqq;vartheta;vartriangleleft;vartriangleright;⫫Vbar;⫨vBar;⫩vBarv;ВVcy;вvcy;⊫VDash;⊩Vdash;vDash;vdash;⫦Vdashl;Vee;vee;⊻veebar;≚veeeq;⋮vellip;‖Verbar;verbar;Vert;vert;VerticalBar;VerticalLine;❘VerticalSeparator;≀VerticalTilde;VeryThinSpace;Vfr;vfr;vltri;vnsub;vnsup;Vopf;vopf;vprop;vrtri;Vscr;vscr;vsubnE;vsubne;vsupnE;vsupne;⊪Vvdash;⦚vzigzag;ŴWcirc;ŵwcirc;⩟wedbar;Wedge;wedge;≙wedgeq;℘weierp;Wfr;wfr;Wopf;wopf;wp;wr;wreath;Wscr;wscr;xcap;xcirc;xcup;xdtri;Xfr;xfr;xhArr;xharr;ΞXi;ξxi;xlArr;xlarr;xmap;⋻xnis;xodot;Xopf;xopf;xoplus;xotime;xrArr;xrarr;Xscr;xscr;xsqcup;xuplus;xutri;xvee;xwedge;ÝýYacute;yacute;ЯYAcy;яyacy;ŶYcirc;ŷycirc;ЫYcy;ыycy;¥yen;Yfr;yfr;ЇYIcy;їyicy;Yopf;yopf;Yscr;yscr;ЮYUcy;юyucy;Yuml;yuml;ŹZacute;źzacute;Zcaron;zcaron;ЗZcy;зzcy;ŻZdot;żzdot;ℨzeetrf;ZeroWidthSpace;ΖZeta;ζzeta;Zfr;zfr;ЖZHcy;жzhcy;⇝zigrarr;Zopf;zopf;Zscr;zscr;‍zwj;‌zwnj;codepoint# maps the HTML entity name to the Unicode code point# latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1# latin capital letter A with acute, U+00C1 ISOlat1# latin capital letter A with circumflex, U+00C2 ISOlat1# latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1# greek capital letter alpha, U+0391# latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1# latin capital letter A with tilde, U+00C3 ISOlat1# latin capital letter A with diaeresis, U+00C4 ISOlat1# greek capital letter beta, U+0392# latin capital letter C with cedilla, U+00C7 ISOlat1# greek capital letter chi, U+03A7# double dagger, U+2021 ISOpub# greek capital letter delta, U+0394 ISOgrk3# latin capital letter ETH, U+00D0 ISOlat1# latin capital letter E with acute, U+00C9 ISOlat1# latin capital letter E with circumflex, U+00CA ISOlat1# latin capital letter E with grave, U+00C8 ISOlat1# greek capital letter epsilon, U+0395# greek capital letter eta, U+0397# latin capital letter E with diaeresis, U+00CB ISOlat1# greek capital letter gamma, U+0393 ISOgrk3# latin capital letter I with acute, U+00CD ISOlat1# latin capital letter I with circumflex, U+00CE ISOlat1# latin capital letter I with grave, U+00CC ISOlat1# greek capital letter iota, U+0399# latin capital letter I with diaeresis, U+00CF ISOlat1# greek capital letter kappa, U+039A# greek capital letter lambda, U+039B ISOgrk3# greek capital letter mu, U+039C# latin capital letter N with tilde, U+00D1 ISOlat1# greek capital letter nu, U+039D# latin capital ligature OE, U+0152 ISOlat2# latin capital letter O with acute, U+00D3 ISOlat1# latin capital letter O with circumflex, U+00D4 ISOlat1# latin capital letter O with grave, U+00D2 ISOlat1# greek capital letter omega, U+03A9 ISOgrk3# greek capital letter omicron, U+039F# latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1# latin capital letter O with tilde, U+00D5 ISOlat1# latin capital letter O with diaeresis, U+00D6 ISOlat1# greek capital letter phi, U+03A6 ISOgrk3# greek capital letter pi, U+03A0 ISOgrk3# double prime = seconds = inches, U+2033 ISOtech# greek capital letter psi, U+03A8 ISOgrk3# greek capital letter rho, U+03A1# latin capital letter S with caron, U+0160 ISOlat2# greek capital letter sigma, U+03A3 ISOgrk3# latin capital letter THORN, U+00DE ISOlat1# greek capital letter tau, U+03A4# greek capital letter theta, U+0398 ISOgrk3# latin capital letter U with acute, U+00DA ISOlat1# latin capital letter U with circumflex, U+00DB ISOlat1# latin capital letter U with grave, U+00D9 ISOlat1# greek capital letter upsilon, U+03A5 ISOgrk3# latin capital letter U with diaeresis, U+00DC ISOlat1# greek capital letter xi, U+039E ISOgrk3# latin capital letter Y with acute, U+00DD ISOlat1# latin capital letter Y with diaeresis, U+0178 ISOlat2# greek capital letter zeta, U+0396# latin small letter a with acute, U+00E1 ISOlat1# latin small letter a with circumflex, U+00E2 ISOlat1# acute accent = spacing acute, U+00B4 ISOdia# latin small letter ae = latin small ligature ae, U+00E6 ISOlat1# latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1# alef symbol = first transfinite cardinal, U+2135 NEW# greek small letter alpha, U+03B1 ISOgrk3# ampersand, U+0026 ISOnum# logical and = wedge, U+2227 ISOtech# angle, U+2220 ISOamso# latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1# almost equal to = asymptotic to, U+2248 ISOamsr# latin small letter a with tilde, U+00E3 ISOlat1# latin small letter a with diaeresis, U+00E4 ISOlat1# double low-9 quotation mark, U+201E NEW# greek small letter beta, U+03B2 ISOgrk3# broken bar = broken vertical bar, U+00A6 ISOnum# bullet = black small circle, U+2022 ISOpub# intersection = cap, U+2229 ISOtech# latin small letter c with cedilla, U+00E7 ISOlat1# cedilla = spacing cedilla, U+00B8 ISOdia# cent sign, U+00A2 ISOnum# greek small letter chi, U+03C7 ISOgrk3# modifier letter circumflex accent, U+02C6 ISOpub# black club suit = shamrock, U+2663 ISOpub# approximately equal to, U+2245 ISOtech# copyright sign, U+00A9 ISOnum# downwards arrow with corner leftwards = carriage return, U+21B5 NEW# union = cup, U+222A ISOtech# currency sign, U+00A4 ISOnum# downwards double arrow, U+21D3 ISOamsa# dagger, U+2020 ISOpub# downwards arrow, U+2193 ISOnum# degree sign, U+00B0 ISOnum# greek small letter delta, U+03B4 ISOgrk3# black diamond suit, U+2666 ISOpub# division sign, U+00F7 ISOnum# latin small letter e with acute, U+00E9 ISOlat1# latin small letter e with circumflex, U+00EA ISOlat1# latin small letter e with grave, U+00E8 ISOlat1# empty set = null set = diameter, U+2205 ISOamso# em space, U+2003 ISOpub# en space, U+2002 ISOpub# greek small letter epsilon, U+03B5 ISOgrk3# identical to, U+2261 ISOtech# greek small letter eta, U+03B7 ISOgrk3# latin small letter eth, U+00F0 ISOlat1# latin small letter e with diaeresis, U+00EB ISOlat1# euro sign, U+20AC NEW# there exists, U+2203 ISOtech# latin small f with hook = function = florin, U+0192 ISOtech# for all, U+2200 ISOtech# vulgar fraction one half = fraction one half, U+00BD ISOnum# vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum# vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum# fraction slash, U+2044 NEW# greek small letter gamma, U+03B3 ISOgrk3# greater-than or equal to, U+2265 ISOtech# greater-than sign, U+003E ISOnum# left right double arrow, U+21D4 ISOamsa# left right arrow, U+2194 ISOamsa# black heart suit = valentine, U+2665 ISOpub# horizontal ellipsis = three dot leader, U+2026 ISOpub# latin small letter i with acute, U+00ED ISOlat1# latin small letter i with circumflex, U+00EE ISOlat1# inverted exclamation mark, U+00A1 ISOnum# latin small letter i with grave, U+00EC ISOlat1# blackletter capital I = imaginary part, U+2111 ISOamso# infinity, U+221E ISOtech# integral, U+222B ISOtech# greek small letter iota, U+03B9 ISOgrk3# inverted question mark = turned question mark, U+00BF ISOnum# element of, U+2208 ISOtech# latin small letter i with diaeresis, U+00EF ISOlat1# greek small letter kappa, U+03BA ISOgrk3# leftwards double arrow, U+21D0 ISOtech# greek small letter lambda, U+03BB ISOgrk3# left-pointing angle bracket = bra, U+2329 ISOtech# left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum# leftwards arrow, U+2190 ISOnum# left ceiling = apl upstile, U+2308 ISOamsc# left double quotation mark, U+201C ISOnum# less-than or equal to, U+2264 ISOtech# left floor = apl downstile, U+230A ISOamsc# asterisk operator, U+2217 ISOtech# lozenge, U+25CA ISOpub# left-to-right mark, U+200E NEW RFC 2070# single left-pointing angle quotation mark, U+2039 ISO proposed# left single quotation mark, U+2018 ISOnum# less-than sign, U+003C ISOnum# macron = spacing macron = overline = APL overbar, U+00AF ISOdia# em dash, U+2014 ISOpub# micro sign, U+00B5 ISOnum# middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum# minus sign, U+2212 ISOtech# greek small letter mu, U+03BC ISOgrk3# nabla = backward difference, U+2207 ISOtech# no-break space = non-breaking space, U+00A0 ISOnum# en dash, U+2013 ISOpub# not equal to, U+2260 ISOtech# contains as member, U+220B ISOtech# not sign, U+00AC ISOnum# not an element of, U+2209 ISOtech# not a subset of, U+2284 ISOamsn# latin small letter n with tilde, U+00F1 ISOlat1# greek small letter nu, U+03BD ISOgrk3# latin small letter o with acute, U+00F3 ISOlat1# latin small letter o with circumflex, U+00F4 ISOlat1# latin small ligature oe, U+0153 ISOlat2# latin small letter o with grave, U+00F2 ISOlat1# overline = spacing overscore, U+203E NEW# greek small letter omega, U+03C9 ISOgrk3# greek small letter omicron, U+03BF NEW# circled plus = direct sum, U+2295 ISOamsb# logical or = vee, U+2228 ISOtech# feminine ordinal indicator, U+00AA ISOnum# masculine ordinal indicator, U+00BA ISOnum# latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1# latin small letter o with tilde, U+00F5 ISOlat1# circled times = vector product, U+2297 ISOamsb# latin small letter o with diaeresis, U+00F6 ISOlat1# pilcrow sign = paragraph sign, U+00B6 ISOnum# partial differential, U+2202 ISOtech# per mille sign, U+2030 ISOtech# up tack = orthogonal to = perpendicular, U+22A5 ISOtech# greek small letter phi, U+03C6 ISOgrk3# greek small letter pi, U+03C0 ISOgrk3# greek pi symbol, U+03D6 ISOgrk3# plus-minus sign = plus-or-minus sign, U+00B1 ISOnum# pound sign, U+00A3 ISOnum# prime = minutes = feet, U+2032 ISOtech# n-ary product = product sign, U+220F ISOamsb# proportional to, U+221D ISOtech# greek small letter psi, U+03C8 ISOgrk3# quotation mark = APL quote, U+0022 ISOnum# rightwards double arrow, U+21D2 ISOtech# square root = radical sign, U+221A ISOtech# right-pointing angle bracket = ket, U+232A ISOtech# right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum# rightwards arrow, U+2192 ISOnum# right ceiling, U+2309 ISOamsc# right double quotation mark, U+201D ISOnum# blackletter capital R = real part symbol, U+211C ISOamso# registered sign = registered trade mark sign, U+00AE ISOnum# right floor, U+230B ISOamsc# greek small letter rho, U+03C1 ISOgrk3# right-to-left mark, U+200F NEW RFC 2070# single right-pointing angle quotation mark, U+203A ISO proposed# right single quotation mark, U+2019 ISOnum# single low-9 quotation mark, U+201A NEW# latin small letter s with caron, U+0161 ISOlat2# dot operator, U+22C5 ISOamsb# section sign, U+00A7 ISOnum# soft hyphen = discretionary hyphen, U+00AD ISOnum# greek small letter sigma, U+03C3 ISOgrk3# greek small letter final sigma, U+03C2 ISOgrk3# tilde operator = varies with = similar to, U+223C ISOtech# black spade suit, U+2660 ISOpub# subset of, U+2282 ISOtech# subset of or equal to, U+2286 ISOtech# n-ary summation, U+2211 ISOamsb# superset of, U+2283 ISOtech# superscript one = superscript digit one, U+00B9 ISOnum# superscript two = superscript digit two = squared, U+00B2 ISOnum# superscript three = superscript digit three = cubed, U+00B3 ISOnum# superset of or equal to, U+2287 ISOtech# latin small letter sharp s = ess-zed, U+00DF ISOlat1# greek small letter tau, U+03C4 ISOgrk3# therefore, U+2234 ISOtech# greek small letter theta, U+03B8 ISOgrk3# greek small letter theta symbol, U+03D1 NEW# thin space, U+2009 ISOpub# latin small letter thorn with, U+00FE ISOlat1# small tilde, U+02DC ISOdia# multiplication sign, U+00D7 ISOnum# trade mark sign, U+2122 ISOnum# upwards double arrow, U+21D1 ISOamsa# latin small letter u with acute, U+00FA ISOlat1# upwards arrow, U+2191 ISOnum# latin small letter u with circumflex, U+00FB ISOlat1# latin small letter u with grave, U+00F9 ISOlat1# diaeresis = spacing diaeresis, U+00A8 ISOdia# greek upsilon with hook symbol, U+03D2 NEW# greek small letter upsilon, U+03C5 ISOgrk3# latin small letter u with diaeresis, U+00FC ISOlat1# script capital P = power set = Weierstrass p, U+2118 ISOamso# greek small letter xi, U+03BE ISOgrk3# latin small letter y with acute, U+00FD ISOlat1# yen sign = yuan sign, U+00A5 ISOnum# latin small letter y with diaeresis, U+00FF ISOlat1# greek small letter zeta, U+03B6 ISOgrk3# zero width joiner, U+200D NEW RFC 2070# zero width non-joiner, U+200C NEW RFC 2070# maps the HTML5 named character references to the equivalent Unicode character(s)# maps the Unicode code point to the HTML entity name# maps the HTML entity name to the character# (or a character reference if the character is outside the Latin-1 range)b'HTML character entity references.'u'HTML character entity references.'b'html5'u'html5'b'name2codepoint'u'name2codepoint'b'codepoint2name'u'codepoint2name'b'entitydefs'u'entitydefs'b'AElig'u'AElig'b'Aacute'u'Aacute'b'Acirc'u'Acirc'b'Agrave'u'Agrave'b'Alpha'u'Alpha'b'Aring'u'Aring'b'Atilde'u'Atilde'b'Auml'u'Auml'b'Beta'u'Beta'b'Ccedil'u'Ccedil'b'Chi'u'Chi'b'Dagger'u'Dagger'b'Delta'u'Delta'b'ETH'u'ETH'b'Eacute'u'Eacute'b'Ecirc'u'Ecirc'b'Egrave'u'Egrave'b'Epsilon'u'Epsilon'b'Eta'u'Eta'b'Euml'u'Euml'b'Gamma'u'Gamma'b'Iacute'u'Iacute'b'Icirc'u'Icirc'b'Igrave'u'Igrave'b'Iota'u'Iota'b'Iuml'u'Iuml'b'Kappa'u'Kappa'b'Lambda'u'Lambda'b'Mu'u'Mu'b'Ntilde'u'Ntilde'b'Nu'u'Nu'b'OElig'u'OElig'b'Oacute'u'Oacute'b'Ocirc'u'Ocirc'b'Ograve'u'Ograve'b'Omega'u'Omega'b'Omicron'u'Omicron'b'Oslash'u'Oslash'b'Otilde'u'Otilde'b'Ouml'u'Ouml'b'Phi'u'Phi'b'Pi'u'Pi'b'Prime'u'Prime'b'Psi'u'Psi'b'Rho'u'Rho'b'Scaron'u'Scaron'b'Sigma'u'Sigma'b'THORN'u'THORN'b'Tau'u'Tau'b'Theta'u'Theta'b'Uacute'u'Uacute'b'Ucirc'u'Ucirc'b'Ugrave'u'Ugrave'b'Upsilon'u'Upsilon'b'Uuml'u'Uuml'b'Xi'u'Xi'b'Yacute'u'Yacute'b'Yuml'u'Yuml'b'Zeta'u'Zeta'b'aacute'u'aacute'b'acirc'u'acirc'b'acute'u'acute'b'aelig'u'aelig'b'agrave'u'agrave'b'alefsym'u'alefsym'b'amp'u'amp'b'and'u'and'b'ang'u'ang'b'aring'u'aring'b'asymp'u'asymp'b'atilde'u'atilde'b'auml'u'auml'b'bdquo'u'bdquo'b'brvbar'u'brvbar'b'bull'u'bull'b'cap'u'cap'b'ccedil'u'ccedil'b'cedil'u'cedil'b'cent'u'cent'b'chi'u'chi'b'circ'u'circ'b'clubs'u'clubs'b'cong'u'cong'b'crarr'u'crarr'b'cup'u'cup'b'curren'u'curren'b'dArr'u'dArr'b'dagger'u'dagger'b'darr'u'darr'b'deg'u'deg'b'diams'u'diams'b'divide'u'divide'b'eacute'u'eacute'b'ecirc'u'ecirc'b'egrave'u'egrave'b'empty'u'empty'b'emsp'u'emsp'b'ensp'u'ensp'b'epsilon'u'epsilon'b'equiv'u'equiv'b'eta'u'eta'b'eth'u'eth'b'euml'u'euml'b'euro'u'euro'b'exist'u'exist'b'fnof'u'fnof'b'forall'u'forall'b'frac12'u'frac12'b'frac14'u'frac14'b'frac34'u'frac34'b'frasl'u'frasl'b'gamma'u'gamma'b'ge'u'ge'b'gt'u'gt'b'hArr'u'hArr'b'harr'u'harr'b'hearts'u'hearts'b'hellip'u'hellip'b'iacute'u'iacute'b'icirc'u'icirc'b'iexcl'u'iexcl'b'igrave'u'igrave'b'infin'u'infin'b'iota'u'iota'b'iquest'u'iquest'b'isin'u'isin'b'iuml'u'iuml'b'kappa'u'kappa'b'lArr'u'lArr'b'lambda'u'lambda'b'lang'u'lang'b'laquo'u'laquo'b'larr'u'larr'b'lceil'u'lceil'b'ldquo'u'ldquo'b'le'u'le'b'lfloor'u'lfloor'b'lowast'u'lowast'b'loz'u'loz'b'lrm'u'lrm'b'lsaquo'u'lsaquo'b'lsquo'u'lsquo'b'lt'u'lt'b'macr'u'macr'b'mdash'u'mdash'b'micro'u'micro'b'middot'u'middot'b'minus'u'minus'b'mu'u'mu'b'nabla'u'nabla'b'nbsp'u'nbsp'b'ndash'u'ndash'b'ni'u'ni'b'notin'u'notin'b'nsub'u'nsub'b'ntilde'u'ntilde'b'nu'u'nu'b'oacute'u'oacute'b'ocirc'u'ocirc'b'oelig'u'oelig'b'ograve'u'ograve'b'oline'u'oline'b'omega'u'omega'b'omicron'u'omicron'b'oplus'u'oplus'b'or'u'or'b'ordf'u'ordf'b'ordm'u'ordm'b'oslash'u'oslash'b'otilde'u'otilde'b'otimes'u'otimes'b'ouml'u'ouml'b'para'u'para'b'part'u'part'b'permil'u'permil'b'perp'u'perp'b'phi'u'phi'b'piv'u'piv'b'plusmn'u'plusmn'b'pound'u'pound'b'prime'u'prime'b'prod'u'prod'b'prop'u'prop'b'psi'u'psi'b'quot'u'quot'b'rArr'u'rArr'b'radic'u'radic'b'rang'u'rang'b'raquo'u'raquo'b'rarr'u'rarr'b'rceil'u'rceil'b'rdquo'u'rdquo'b'real'u'real'b'reg'u'reg'b'rfloor'u'rfloor'b'rho'u'rho'b'rlm'u'rlm'b'rsaquo'u'rsaquo'b'rsquo'u'rsquo'b'sbquo'u'sbquo'b'scaron'u'scaron'b'sdot'u'sdot'b'sect'u'sect'b'shy'u'shy'b'sigma'u'sigma'b'sigmaf'u'sigmaf'b'sim'u'sim'b'spades'u'spades'b'sub'u'sub'b'sube'u'sube'b'sum'u'sum'b'sup'u'sup'b'sup1'u'sup1'b'sup2'u'sup2'b'sup3'u'sup3'b'supe'u'supe'b'szlig'u'szlig'b'tau'u'tau'b'there4'u'there4'b'theta'u'theta'b'thetasym'u'thetasym'b'thinsp'u'thinsp'b'thorn'u'thorn'b'tilde'u'tilde'b'times'u'times'b'trade'u'trade'b'uArr'u'uArr'b'uacute'u'uacute'b'uarr'u'uarr'b'ucirc'u'ucirc'b'ugrave'u'ugrave'b'uml'u'uml'b'upsih'u'upsih'b'upsilon'u'upsilon'b'uuml'u'uuml'b'weierp'u'weierp'b'xi'u'xi'b'yacute'u'yacute'b'yen'u'yen'b'yuml'u'yuml'b'zeta'u'zeta'b'zwj'u'zwj'b'zwnj'u'zwnj'b'Á'u'Á'b'á'u'á'b'Aacute;'u'Aacute;'b'aacute;'u'aacute;'u'Ă'b'Abreve;'u'Abreve;'u'ă'b'abreve;'u'abreve;'u'∾'b'ac;'u'ac;'u'∿'b'acd;'u'acd;'u'∾̳'b'acE;'u'acE;'b'Â'u'Â'b'â'u'â'b'Acirc;'u'Acirc;'b'acirc;'u'acirc;'b'´'u'´'b'acute;'u'acute;'u'А'b'Acy;'u'Acy;'u'а'b'acy;'u'acy;'b'Æ'u'Æ'b'AElig;'u'AElig;'b'aelig;'u'aelig;'u'⁡'b'af;'u'af;'b'Afr;'u'Afr;'b'afr;'u'afr;'b'À'u'À'b'à'u'à'b'Agrave;'u'Agrave;'b'agrave;'u'agrave;'u'ℵ'b'alefsym;'u'alefsym;'b'aleph;'u'aleph;'u'Α'b'Alpha;'u'Alpha;'u'α'b'alpha;'u'alpha;'u'Ā'b'Amacr;'u'Amacr;'u'ā'b'amacr;'u'amacr;'u'⨿'b'amalg;'u'amalg;'b'AMP'u'AMP'b'AMP;'u'AMP;'b'amp;'u'amp;'u'⩓'b'And;'u'And;'u'∧'b'and;'u'and;'u'⩕'b'andand;'u'andand;'u'⩜'b'andd;'u'andd;'u'⩘'b'andslope;'u'andslope;'u'⩚'b'andv;'u'andv;'u'∠'b'ang;'u'ang;'u'⦤'b'ange;'u'ange;'b'angle;'u'angle;'u'∡'b'angmsd;'u'angmsd;'u'⦨'b'angmsdaa;'u'angmsdaa;'u'⦩'b'angmsdab;'u'angmsdab;'u'⦪'b'angmsdac;'u'angmsdac;'u'⦫'b'angmsdad;'u'angmsdad;'u'⦬'b'angmsdae;'u'angmsdae;'u'⦭'b'angmsdaf;'u'angmsdaf;'u'⦮'b'angmsdag;'u'angmsdag;'u'⦯'b'angmsdah;'u'angmsdah;'u'∟'b'angrt;'u'angrt;'u'⊾'b'angrtvb;'u'angrtvb;'u'⦝'b'angrtvbd;'u'angrtvbd;'u'∢'b'angsph;'u'angsph;'b'Å'u'Å'b'angst;'u'angst;'u'⍼'b'angzarr;'u'angzarr;'u'Ą'b'Aogon;'u'Aogon;'u'ą'b'aogon;'u'aogon;'b'Aopf;'u'Aopf;'b'aopf;'u'aopf;'u'≈'b'ap;'u'ap;'u'⩯'b'apacir;'u'apacir;'u'⩰'b'apE;'u'apE;'u'≊'b'ape;'u'ape;'u'≋'b'apid;'u'apid;'b'apos;'u'apos;'b'ApplyFunction;'u'ApplyFunction;'b'approx;'u'approx;'b'approxeq;'u'approxeq;'b'å'u'å'b'Aring;'u'Aring;'b'aring;'u'aring;'b'Ascr;'u'Ascr;'b'ascr;'u'ascr;'u'≔'b'Assign;'u'Assign;'b'ast;'u'ast;'b'asymp;'u'asymp;'u'≍'b'asympeq;'u'asympeq;'b'Ã'u'Ã'b'ã'u'ã'b'Atilde;'u'Atilde;'b'atilde;'u'atilde;'b'Ä'u'Ä'b'ä'u'ä'b'Auml;'u'Auml;'b'auml;'u'auml;'u'∳'b'awconint;'u'awconint;'u'⨑'b'awint;'u'awint;'u'≌'b'backcong;'u'backcong;'u'϶'b'backepsilon;'u'backepsilon;'u'‵'b'backprime;'u'backprime;'u'∽'b'backsim;'u'backsim;'u'⋍'b'backsimeq;'u'backsimeq;'u'∖'b'Backslash;'u'Backslash;'u'⫧'b'Barv;'u'Barv;'u'⊽'b'barvee;'u'barvee;'u'⌆'b'Barwed;'u'Barwed;'u'⌅'b'barwed;'u'barwed;'b'barwedge;'u'barwedge;'u'⎵'b'bbrk;'u'bbrk;'u'⎶'b'bbrktbrk;'u'bbrktbrk;'b'bcong;'u'bcong;'u'Б'b'Bcy;'u'Bcy;'u'б'b'bcy;'u'bcy;'b'bdquo;'u'bdquo;'u'∵'b'becaus;'u'becaus;'b'Because;'u'Because;'b'because;'u'because;'u'⦰'b'bemptyv;'u'bemptyv;'b'bepsi;'u'bepsi;'u'ℬ'b'bernou;'u'bernou;'b'Bernoullis;'u'Bernoullis;'u'Β'b'Beta;'u'Beta;'u'β'b'beta;'u'beta;'u'ℶ'b'beth;'u'beth;'u'≬'b'between;'u'between;'b'Bfr;'u'Bfr;'b'bfr;'u'bfr;'u'⋂'b'bigcap;'u'bigcap;'u'◯'b'bigcirc;'u'bigcirc;'u'⋃'b'bigcup;'u'bigcup;'u'⨀'b'bigodot;'u'bigodot;'u'⨁'b'bigoplus;'u'bigoplus;'u'⨂'b'bigotimes;'u'bigotimes;'u'⨆'b'bigsqcup;'u'bigsqcup;'u'★'b'bigstar;'u'bigstar;'u'▽'b'bigtriangledown;'u'bigtriangledown;'u'△'b'bigtriangleup;'u'bigtriangleup;'u'⨄'b'biguplus;'u'biguplus;'u'⋁'b'bigvee;'u'bigvee;'u'⋀'b'bigwedge;'u'bigwedge;'u'⤍'b'bkarow;'u'bkarow;'u'⧫'b'blacklozenge;'u'blacklozenge;'u'▪'b'blacksquare;'u'blacksquare;'u'▴'b'blacktriangle;'u'blacktriangle;'u'▾'b'blacktriangledown;'u'blacktriangledown;'u'◂'b'blacktriangleleft;'u'blacktriangleleft;'u'▸'b'blacktriangleright;'u'blacktriangleright;'u'␣'b'blank;'u'blank;'u'▒'b'blk12;'u'blk12;'u'░'b'blk14;'u'blk14;'u'▓'b'blk34;'u'blk34;'u'█'b'block;'u'block;'u'=⃥'b'bne;'u'bne;'u'≡⃥'b'bnequiv;'u'bnequiv;'u'⫭'b'bNot;'u'bNot;'u'⌐'b'bnot;'u'bnot;'b'Bopf;'u'Bopf;'b'bopf;'u'bopf;'u'⊥'b'bot;'u'bot;'b'bottom;'u'bottom;'u'⋈'b'bowtie;'u'bowtie;'u'⧉'b'boxbox;'u'boxbox;'u'╗'b'boxDL;'u'boxDL;'u'╖'b'boxDl;'u'boxDl;'u'╕'b'boxdL;'u'boxdL;'u'┐'b'boxdl;'u'boxdl;'u'╔'b'boxDR;'u'boxDR;'u'╓'b'boxDr;'u'boxDr;'u'╒'b'boxdR;'u'boxdR;'u'┌'b'boxdr;'u'boxdr;'u'═'b'boxH;'u'boxH;'u'─'b'boxh;'u'boxh;'u'╦'b'boxHD;'u'boxHD;'u'╤'b'boxHd;'u'boxHd;'u'╥'b'boxhD;'u'boxhD;'u'┬'b'boxhd;'u'boxhd;'u'╩'b'boxHU;'u'boxHU;'u'╧'b'boxHu;'u'boxHu;'u'╨'b'boxhU;'u'boxhU;'u'┴'b'boxhu;'u'boxhu;'u'⊟'b'boxminus;'u'boxminus;'u'⊞'b'boxplus;'u'boxplus;'u'⊠'b'boxtimes;'u'boxtimes;'u'╝'b'boxUL;'u'boxUL;'u'╜'b'boxUl;'u'boxUl;'u'╛'b'boxuL;'u'boxuL;'u'┘'b'boxul;'u'boxul;'u'╚'b'boxUR;'u'boxUR;'u'╙'b'boxUr;'u'boxUr;'u'╘'b'boxuR;'u'boxuR;'u'└'b'boxur;'u'boxur;'u'║'b'boxV;'u'boxV;'u'│'b'boxv;'u'boxv;'u'╬'b'boxVH;'u'boxVH;'u'╫'b'boxVh;'u'boxVh;'u'╪'b'boxvH;'u'boxvH;'u'┼'b'boxvh;'u'boxvh;'u'╣'b'boxVL;'u'boxVL;'u'╢'b'boxVl;'u'boxVl;'u'╡'b'boxvL;'u'boxvL;'u'┤'b'boxvl;'u'boxvl;'u'╠'b'boxVR;'u'boxVR;'u'╟'b'boxVr;'u'boxVr;'u'╞'b'boxvR;'u'boxvR;'u'├'b'boxvr;'u'boxvr;'b'bprime;'u'bprime;'u'˘'b'Breve;'u'Breve;'b'breve;'u'breve;'b'¦'u'¦'b'brvbar;'u'brvbar;'b'Bscr;'u'Bscr;'b'bscr;'u'bscr;'u'⁏'b'bsemi;'u'bsemi;'b'bsim;'u'bsim;'b'bsime;'u'bsime;'b'bsol;'u'bsol;'u'⧅'b'bsolb;'u'bsolb;'u'⟈'b'bsolhsub;'u'bsolhsub;'b'bull;'u'bull;'b'bullet;'u'bullet;'u'≎'b'bump;'u'bump;'u'⪮'b'bumpE;'u'bumpE;'u'≏'b'bumpe;'u'bumpe;'b'Bumpeq;'u'Bumpeq;'b'bumpeq;'u'bumpeq;'u'Ć'b'Cacute;'u'Cacute;'u'ć'b'cacute;'u'cacute;'u'⋒'b'Cap;'u'Cap;'u'∩'b'cap;'u'cap;'u'⩄'b'capand;'u'capand;'u'⩉'b'capbrcup;'u'capbrcup;'u'⩋'b'capcap;'u'capcap;'u'⩇'b'capcup;'u'capcup;'u'⩀'b'capdot;'u'capdot;'u'ⅅ'b'CapitalDifferentialD;'u'CapitalDifferentialD;'u'∩︀'b'caps;'u'caps;'u'⁁'b'caret;'u'caret;'u'ˇ'b'caron;'u'caron;'u'ℭ'b'Cayleys;'u'Cayleys;'u'⩍'b'ccaps;'u'ccaps;'u'Č'b'Ccaron;'u'Ccaron;'u'č'b'ccaron;'u'ccaron;'b'Ç'u'Ç'b'ç'u'ç'b'Ccedil;'u'Ccedil;'b'ccedil;'u'ccedil;'u'Ĉ'b'Ccirc;'u'Ccirc;'u'ĉ'b'ccirc;'u'ccirc;'u'∰'b'Cconint;'u'Cconint;'u'⩌'b'ccups;'u'ccups;'u'⩐'b'ccupssm;'u'ccupssm;'u'Ċ'b'Cdot;'u'Cdot;'u'ċ'b'cdot;'u'cdot;'b'¸'u'¸'b'cedil;'u'cedil;'b'Cedilla;'u'Cedilla;'u'⦲'b'cemptyv;'u'cemptyv;'b'¢'u'¢'b'cent;'u'cent;'b'·'u'·'b'CenterDot;'u'CenterDot;'b'centerdot;'u'centerdot;'b'Cfr;'u'Cfr;'b'cfr;'u'cfr;'u'Ч'b'CHcy;'u'CHcy;'u'ч'b'chcy;'u'chcy;'u'✓'b'check;'u'check;'b'checkmark;'u'checkmark;'u'Χ'b'Chi;'u'Chi;'u'χ'b'chi;'u'chi;'u'○'b'cir;'u'cir;'b'circ;'u'circ;'u'≗'b'circeq;'u'circeq;'u'↺'b'circlearrowleft;'u'circlearrowleft;'u'↻'b'circlearrowright;'u'circlearrowright;'u'⊛'b'circledast;'u'circledast;'u'⊚'b'circledcirc;'u'circledcirc;'u'⊝'b'circleddash;'u'circleddash;'u'⊙'b'CircleDot;'u'CircleDot;'b'®'u'®'b'circledR;'u'circledR;'u'Ⓢ'b'circledS;'u'circledS;'u'⊖'b'CircleMinus;'u'CircleMinus;'u'⊕'b'CirclePlus;'u'CirclePlus;'u'⊗'b'CircleTimes;'u'CircleTimes;'u'⧃'b'cirE;'u'cirE;'b'cire;'u'cire;'u'⨐'b'cirfnint;'u'cirfnint;'u'⫯'b'cirmid;'u'cirmid;'u'⧂'b'cirscir;'u'cirscir;'u'∲'b'ClockwiseContourIntegral;'u'ClockwiseContourIntegral;'b'CloseCurlyDoubleQuote;'u'CloseCurlyDoubleQuote;'b'CloseCurlyQuote;'u'CloseCurlyQuote;'u'♣'b'clubs;'u'clubs;'b'clubsuit;'u'clubsuit;'u'∷'b'Colon;'u'Colon;'b'colon;'u'colon;'u'⩴'b'Colone;'u'Colone;'b'colone;'u'colone;'b'coloneq;'u'coloneq;'b'comma;'u'comma;'b'commat;'u'commat;'u'∁'b'comp;'u'comp;'u'∘'b'compfn;'u'compfn;'b'complement;'u'complement;'u'ℂ'b'complexes;'u'complexes;'u'≅'b'cong;'u'cong;'u'⩭'b'congdot;'u'congdot;'u'≡'b'Congruent;'u'Congruent;'u'∯'b'Conint;'u'Conint;'u'∮'b'conint;'u'conint;'b'ContourIntegral;'u'ContourIntegral;'b'Copf;'u'Copf;'b'copf;'u'copf;'u'∐'b'coprod;'u'coprod;'b'Coproduct;'u'Coproduct;'b'©'u'©'b'COPY'u'COPY'b'COPY;'u'COPY;'b'copy;'u'copy;'u'℗'b'copysr;'u'copysr;'b'CounterClockwiseContourIntegral;'u'CounterClockwiseContourIntegral;'u'↵'b'crarr;'u'crarr;'u'⨯'b'Cross;'u'Cross;'u'✗'b'cross;'u'cross;'b'Cscr;'u'Cscr;'b'cscr;'u'cscr;'u'⫏'b'csub;'u'csub;'u'⫑'b'csube;'u'csube;'u'⫐'b'csup;'u'csup;'u'⫒'b'csupe;'u'csupe;'u'⋯'b'ctdot;'u'ctdot;'u'⤸'b'cudarrl;'u'cudarrl;'u'⤵'b'cudarrr;'u'cudarrr;'u'⋞'b'cuepr;'u'cuepr;'u'⋟'b'cuesc;'u'cuesc;'u'↶'b'cularr;'u'cularr;'u'⤽'b'cularrp;'u'cularrp;'u'⋓'b'Cup;'u'Cup;'u'∪'b'cup;'u'cup;'u'⩈'b'cupbrcap;'u'cupbrcap;'b'CupCap;'u'CupCap;'u'⩆'b'cupcap;'u'cupcap;'u'⩊'b'cupcup;'u'cupcup;'u'⊍'b'cupdot;'u'cupdot;'u'⩅'b'cupor;'u'cupor;'u'∪︀'b'cups;'u'cups;'u'↷'b'curarr;'u'curarr;'u'⤼'b'curarrm;'u'curarrm;'b'curlyeqprec;'u'curlyeqprec;'b'curlyeqsucc;'u'curlyeqsucc;'u'⋎'b'curlyvee;'u'curlyvee;'u'⋏'b'curlywedge;'u'curlywedge;'b'¤'u'¤'b'curren;'u'curren;'b'curvearrowleft;'u'curvearrowleft;'b'curvearrowright;'u'curvearrowright;'b'cuvee;'u'cuvee;'b'cuwed;'u'cuwed;'b'cwconint;'u'cwconint;'u'∱'b'cwint;'u'cwint;'u'⌭'b'cylcty;'u'cylcty;'b'Dagger;'u'Dagger;'b'dagger;'u'dagger;'u'ℸ'b'daleth;'u'daleth;'u'↡'b'Darr;'u'Darr;'u'⇓'b'dArr;'u'dArr;'u'↓'b'darr;'u'darr;'u'‐'b'dash;'u'dash;'u'⫤'b'Dashv;'u'Dashv;'u'⊣'b'dashv;'u'dashv;'u'⤏'b'dbkarow;'u'dbkarow;'u'˝'b'dblac;'u'dblac;'u'Ď'b'Dcaron;'u'Dcaron;'u'ď'b'dcaron;'u'dcaron;'u'Д'b'Dcy;'u'Dcy;'u'д'b'dcy;'u'dcy;'b'DD;'u'DD;'u'ⅆ'b'dd;'u'dd;'b'ddagger;'u'ddagger;'u'⇊'b'ddarr;'u'ddarr;'u'⤑'b'DDotrahd;'u'DDotrahd;'u'⩷'b'ddotseq;'u'ddotseq;'b'°'u'°'b'deg;'u'deg;'u'∇'b'Del;'u'Del;'u'Δ'b'Delta;'u'Delta;'u'δ'b'delta;'u'delta;'u'⦱'b'demptyv;'u'demptyv;'u'⥿'b'dfisht;'u'dfisht;'b'Dfr;'u'Dfr;'b'dfr;'u'dfr;'u'⥥'b'dHar;'u'dHar;'u'⇃'b'dharl;'u'dharl;'u'⇂'b'dharr;'u'dharr;'b'DiacriticalAcute;'u'DiacriticalAcute;'u'˙'b'DiacriticalDot;'u'DiacriticalDot;'b'DiacriticalDoubleAcute;'u'DiacriticalDoubleAcute;'b'`'u'`'b'DiacriticalGrave;'u'DiacriticalGrave;'b'DiacriticalTilde;'u'DiacriticalTilde;'u'⋄'b'diam;'u'diam;'b'Diamond;'u'Diamond;'b'diamond;'u'diamond;'u'♦'b'diamondsuit;'u'diamondsuit;'b'diams;'u'diams;'b'¨'u'¨'b'die;'u'die;'b'DifferentialD;'u'DifferentialD;'u'ϝ'b'digamma;'u'digamma;'u'⋲'b'disin;'u'disin;'b'÷'u'÷'b'div;'u'div;'b'divide;'u'divide;'u'⋇'b'divideontimes;'u'divideontimes;'b'divonx;'u'divonx;'u'Ђ'b'DJcy;'u'DJcy;'u'ђ'b'djcy;'u'djcy;'u'⌞'b'dlcorn;'u'dlcorn;'u'⌍'b'dlcrop;'u'dlcrop;'b'dollar;'u'dollar;'b'Dopf;'u'Dopf;'b'dopf;'u'dopf;'b'Dot;'u'Dot;'b'dot;'u'dot;'u'⃜'b'DotDot;'u'DotDot;'u'≐'b'doteq;'u'doteq;'u'≑'b'doteqdot;'u'doteqdot;'b'DotEqual;'u'DotEqual;'u'∸'b'dotminus;'u'dotminus;'u'∔'b'dotplus;'u'dotplus;'u'⊡'b'dotsquare;'u'dotsquare;'b'doublebarwedge;'u'doublebarwedge;'b'DoubleContourIntegral;'u'DoubleContourIntegral;'b'DoubleDot;'u'DoubleDot;'b'DoubleDownArrow;'u'DoubleDownArrow;'u'⇐'b'DoubleLeftArrow;'u'DoubleLeftArrow;'u'⇔'b'DoubleLeftRightArrow;'u'DoubleLeftRightArrow;'b'DoubleLeftTee;'u'DoubleLeftTee;'u'⟸'b'DoubleLongLeftArrow;'u'DoubleLongLeftArrow;'u'⟺'b'DoubleLongLeftRightArrow;'u'DoubleLongLeftRightArrow;'u'⟹'b'DoubleLongRightArrow;'u'DoubleLongRightArrow;'u'⇒'b'DoubleRightArrow;'u'DoubleRightArrow;'u'⊨'b'DoubleRightTee;'u'DoubleRightTee;'u'⇑'b'DoubleUpArrow;'u'DoubleUpArrow;'u'⇕'b'DoubleUpDownArrow;'u'DoubleUpDownArrow;'u'∥'b'DoubleVerticalBar;'u'DoubleVerticalBar;'b'DownArrow;'u'DownArrow;'b'Downarrow;'u'Downarrow;'b'downarrow;'u'downarrow;'u'⤓'b'DownArrowBar;'u'DownArrowBar;'u'⇵'b'DownArrowUpArrow;'u'DownArrowUpArrow;'u'̑'b'DownBreve;'u'DownBreve;'b'downdownarrows;'u'downdownarrows;'b'downharpoonleft;'u'downharpoonleft;'b'downharpoonright;'u'downharpoonright;'u'⥐'b'DownLeftRightVector;'u'DownLeftRightVector;'u'⥞'b'DownLeftTeeVector;'u'DownLeftTeeVector;'u'↽'b'DownLeftVector;'u'DownLeftVector;'u'⥖'b'DownLeftVectorBar;'u'DownLeftVectorBar;'u'⥟'b'DownRightTeeVector;'u'DownRightTeeVector;'u'⇁'b'DownRightVector;'u'DownRightVector;'u'⥗'b'DownRightVectorBar;'u'DownRightVectorBar;'u'⊤'b'DownTee;'u'DownTee;'u'↧'b'DownTeeArrow;'u'DownTeeArrow;'u'⤐'b'drbkarow;'u'drbkarow;'u'⌟'b'drcorn;'u'drcorn;'u'⌌'b'drcrop;'u'drcrop;'b'Dscr;'u'Dscr;'b'dscr;'u'dscr;'u'Ѕ'b'DScy;'u'DScy;'u'ѕ'b'dscy;'u'dscy;'u'⧶'b'dsol;'u'dsol;'u'Đ'b'Dstrok;'u'Dstrok;'u'đ'b'dstrok;'u'dstrok;'u'⋱'b'dtdot;'u'dtdot;'u'▿'b'dtri;'u'dtri;'b'dtrif;'u'dtrif;'b'duarr;'u'duarr;'u'⥯'b'duhar;'u'duhar;'u'⦦'b'dwangle;'u'dwangle;'u'Џ'b'DZcy;'u'DZcy;'u'џ'b'dzcy;'u'dzcy;'u'⟿'b'dzigrarr;'u'dzigrarr;'b'É'u'É'b'é'u'é'b'Eacute;'u'Eacute;'b'eacute;'u'eacute;'u'⩮'b'easter;'u'easter;'u'Ě'b'Ecaron;'u'Ecaron;'u'ě'b'ecaron;'u'ecaron;'u'≖'b'ecir;'u'ecir;'b'Ê'u'Ê'b'ê'u'ê'b'Ecirc;'u'Ecirc;'b'ecirc;'u'ecirc;'u'≕'b'ecolon;'u'ecolon;'u'Э'b'Ecy;'u'Ecy;'u'э'b'ecy;'u'ecy;'b'eDDot;'u'eDDot;'u'Ė'b'Edot;'u'Edot;'b'eDot;'u'eDot;'u'ė'b'edot;'u'edot;'u'ⅇ'b'ee;'u'ee;'u'≒'b'efDot;'u'efDot;'b'Efr;'u'Efr;'b'efr;'u'efr;'u'⪚'b'eg;'u'eg;'b'È'u'È'b'è'u'è'b'Egrave;'u'Egrave;'b'egrave;'u'egrave;'u'⪖'b'egs;'u'egs;'u'⪘'b'egsdot;'u'egsdot;'u'⪙'b'el;'u'el;'u'∈'b'Element;'u'Element;'u'⏧'b'elinters;'u'elinters;'u'ℓ'b'ell;'u'ell;'u'⪕'b'els;'u'els;'u'⪗'b'elsdot;'u'elsdot;'u'Ē'b'Emacr;'u'Emacr;'u'ē'b'emacr;'u'emacr;'u'∅'b'empty;'u'empty;'b'emptyset;'u'emptyset;'u'◻'b'EmptySmallSquare;'u'EmptySmallSquare;'b'emptyv;'u'emptyv;'u'▫'b'EmptyVerySmallSquare;'u'EmptyVerySmallSquare;'u' 'b'emsp13;'u'emsp13;'u' 'b'emsp14;'u'emsp14;'u' 'b'emsp;'u'emsp;'u'Ŋ'b'ENG;'u'ENG;'u'ŋ'b'eng;'u'eng;'u' 'b'ensp;'u'ensp;'u'Ę'b'Eogon;'u'Eogon;'u'ę'b'eogon;'u'eogon;'b'Eopf;'u'Eopf;'b'eopf;'u'eopf;'u'⋕'b'epar;'u'epar;'u'⧣'b'eparsl;'u'eparsl;'u'⩱'b'eplus;'u'eplus;'u'ε'b'epsi;'u'epsi;'u'Ε'b'Epsilon;'u'Epsilon;'b'epsilon;'u'epsilon;'u'ϵ'b'epsiv;'u'epsiv;'b'eqcirc;'u'eqcirc;'b'eqcolon;'u'eqcolon;'u'≂'b'eqsim;'u'eqsim;'b'eqslantgtr;'u'eqslantgtr;'b'eqslantless;'u'eqslantless;'u'⩵'b'Equal;'u'Equal;'b'equals;'u'equals;'b'EqualTilde;'u'EqualTilde;'u'≟'b'equest;'u'equest;'u'⇌'b'Equilibrium;'u'Equilibrium;'b'equiv;'u'equiv;'u'⩸'b'equivDD;'u'equivDD;'u'⧥'b'eqvparsl;'u'eqvparsl;'u'⥱'b'erarr;'u'erarr;'u'≓'b'erDot;'u'erDot;'u'ℰ'b'Escr;'u'Escr;'u'ℯ'b'escr;'u'escr;'b'esdot;'u'esdot;'u'⩳'b'Esim;'u'Esim;'b'esim;'u'esim;'u'Η'b'Eta;'u'Eta;'u'η'b'eta;'u'eta;'b'Ð'u'Ð'b'ð'u'ð'b'ETH;'u'ETH;'b'eth;'u'eth;'b'Ë'u'Ë'b'ë'u'ë'b'Euml;'u'Euml;'b'euml;'u'euml;'b'euro;'u'euro;'b'excl;'u'excl;'u'∃'b'exist;'u'exist;'b'Exists;'u'Exists;'b'expectation;'u'expectation;'b'ExponentialE;'u'ExponentialE;'b'exponentiale;'u'exponentiale;'b'fallingdotseq;'u'fallingdotseq;'u'Ф'b'Fcy;'u'Fcy;'u'ф'b'fcy;'u'fcy;'u'♀'b'female;'u'female;'u'ffi'b'ffilig;'u'ffilig;'u'ff'b'fflig;'u'fflig;'u'ffl'b'ffllig;'u'ffllig;'b'Ffr;'u'Ffr;'b'ffr;'u'ffr;'u'fi'b'filig;'u'filig;'u'◼'b'FilledSmallSquare;'u'FilledSmallSquare;'b'FilledVerySmallSquare;'u'FilledVerySmallSquare;'b'fj'u'fj'b'fjlig;'u'fjlig;'u'♭'b'flat;'u'flat;'u'fl'b'fllig;'u'fllig;'u'▱'b'fltns;'u'fltns;'b'fnof;'u'fnof;'b'Fopf;'u'Fopf;'b'fopf;'u'fopf;'u'∀'b'ForAll;'u'ForAll;'b'forall;'u'forall;'u'⋔'b'fork;'u'fork;'u'⫙'b'forkv;'u'forkv;'u'ℱ'b'Fouriertrf;'u'Fouriertrf;'u'⨍'b'fpartint;'u'fpartint;'b'½'u'½'b'frac12;'u'frac12;'u'⅓'b'frac13;'u'frac13;'b'¼'u'¼'b'frac14;'u'frac14;'u'⅕'b'frac15;'u'frac15;'u'⅙'b'frac16;'u'frac16;'u'⅛'b'frac18;'u'frac18;'u'⅔'b'frac23;'u'frac23;'u'⅖'b'frac25;'u'frac25;'b'¾'u'¾'b'frac34;'u'frac34;'u'⅗'b'frac35;'u'frac35;'u'⅜'b'frac38;'u'frac38;'u'⅘'b'frac45;'u'frac45;'u'⅚'b'frac56;'u'frac56;'u'⅝'b'frac58;'u'frac58;'u'⅞'b'frac78;'u'frac78;'u'⁄'b'frasl;'u'frasl;'u'⌢'b'frown;'u'frown;'b'Fscr;'u'Fscr;'b'fscr;'u'fscr;'u'ǵ'b'gacute;'u'gacute;'u'Γ'b'Gamma;'u'Gamma;'u'γ'b'gamma;'u'gamma;'u'Ϝ'b'Gammad;'u'Gammad;'b'gammad;'u'gammad;'u'⪆'b'gap;'u'gap;'u'Ğ'b'Gbreve;'u'Gbreve;'u'ğ'b'gbreve;'u'gbreve;'u'Ģ'b'Gcedil;'u'Gcedil;'u'Ĝ'b'Gcirc;'u'Gcirc;'u'ĝ'b'gcirc;'u'gcirc;'u'Г'b'Gcy;'u'Gcy;'u'г'b'gcy;'u'gcy;'u'Ġ'b'Gdot;'u'Gdot;'u'ġ'b'gdot;'u'gdot;'u'≧'b'gE;'u'gE;'u'≥'b'ge;'u'ge;'u'⪌'b'gEl;'u'gEl;'u'⋛'b'gel;'u'gel;'b'geq;'u'geq;'b'geqq;'u'geqq;'u'⩾'b'geqslant;'u'geqslant;'b'ges;'u'ges;'u'⪩'b'gescc;'u'gescc;'u'⪀'b'gesdot;'u'gesdot;'u'⪂'b'gesdoto;'u'gesdoto;'u'⪄'b'gesdotol;'u'gesdotol;'u'⋛︀'b'gesl;'u'gesl;'u'⪔'b'gesles;'u'gesles;'b'Gfr;'u'Gfr;'b'gfr;'u'gfr;'u'⋙'b'Gg;'u'Gg;'u'≫'b'gg;'u'gg;'b'ggg;'u'ggg;'u'ℷ'b'gimel;'u'gimel;'u'Ѓ'b'GJcy;'u'GJcy;'u'ѓ'b'gjcy;'u'gjcy;'u'≷'b'gl;'u'gl;'u'⪥'b'gla;'u'gla;'u'⪒'b'glE;'u'glE;'u'⪤'b'glj;'u'glj;'u'⪊'b'gnap;'u'gnap;'b'gnapprox;'u'gnapprox;'u'≩'b'gnE;'u'gnE;'u'⪈'b'gne;'u'gne;'b'gneq;'u'gneq;'b'gneqq;'u'gneqq;'u'⋧'b'gnsim;'u'gnsim;'b'Gopf;'u'Gopf;'b'gopf;'u'gopf;'b'grave;'u'grave;'b'GreaterEqual;'u'GreaterEqual;'b'GreaterEqualLess;'u'GreaterEqualLess;'b'GreaterFullEqual;'u'GreaterFullEqual;'u'⪢'b'GreaterGreater;'u'GreaterGreater;'b'GreaterLess;'u'GreaterLess;'b'GreaterSlantEqual;'u'GreaterSlantEqual;'u'≳'b'GreaterTilde;'u'GreaterTilde;'b'Gscr;'u'Gscr;'u'ℊ'b'gscr;'u'gscr;'b'gsim;'u'gsim;'u'⪎'b'gsime;'u'gsime;'u'⪐'b'gsiml;'u'gsiml;'b'GT'u'GT'b'GT;'u'GT;'b'Gt;'u'Gt;'b'gt;'u'gt;'u'⪧'b'gtcc;'u'gtcc;'u'⩺'b'gtcir;'u'gtcir;'u'⋗'b'gtdot;'u'gtdot;'u'⦕'b'gtlPar;'u'gtlPar;'u'⩼'b'gtquest;'u'gtquest;'b'gtrapprox;'u'gtrapprox;'u'⥸'b'gtrarr;'u'gtrarr;'b'gtrdot;'u'gtrdot;'b'gtreqless;'u'gtreqless;'b'gtreqqless;'u'gtreqqless;'b'gtrless;'u'gtrless;'b'gtrsim;'u'gtrsim;'u'≩︀'b'gvertneqq;'u'gvertneqq;'b'gvnE;'u'gvnE;'b'Hacek;'u'Hacek;'u' 'b'hairsp;'u'hairsp;'b'half;'u'half;'u'ℋ'b'hamilt;'u'hamilt;'u'Ъ'b'HARDcy;'u'HARDcy;'u'ъ'b'hardcy;'u'hardcy;'b'hArr;'u'hArr;'u'↔'b'harr;'u'harr;'u'⥈'b'harrcir;'u'harrcir;'u'↭'b'harrw;'u'harrw;'b'Hat;'u'Hat;'u'ℏ'b'hbar;'u'hbar;'u'Ĥ'b'Hcirc;'u'Hcirc;'u'ĥ'b'hcirc;'u'hcirc;'u'♥'b'hearts;'u'hearts;'b'heartsuit;'u'heartsuit;'b'hellip;'u'hellip;'u'⊹'b'hercon;'u'hercon;'u'ℌ'b'Hfr;'u'Hfr;'b'hfr;'u'hfr;'b'HilbertSpace;'u'HilbertSpace;'u'⤥'b'hksearow;'u'hksearow;'u'⤦'b'hkswarow;'u'hkswarow;'u'⇿'b'hoarr;'u'hoarr;'u'∻'b'homtht;'u'homtht;'u'↩'b'hookleftarrow;'u'hookleftarrow;'u'↪'b'hookrightarrow;'u'hookrightarrow;'u'ℍ'b'Hopf;'u'Hopf;'b'hopf;'u'hopf;'u'―'b'horbar;'u'horbar;'b'HorizontalLine;'u'HorizontalLine;'b'Hscr;'u'Hscr;'b'hscr;'u'hscr;'b'hslash;'u'hslash;'u'Ħ'b'Hstrok;'u'Hstrok;'u'ħ'b'hstrok;'u'hstrok;'b'HumpDownHump;'u'HumpDownHump;'b'HumpEqual;'u'HumpEqual;'u'⁃'b'hybull;'u'hybull;'b'hyphen;'u'hyphen;'b'Í'u'Í'b'í'u'í'b'Iacute;'u'Iacute;'b'iacute;'u'iacute;'u'⁣'b'ic;'u'ic;'b'Î'u'Î'b'î'u'î'b'Icirc;'u'Icirc;'b'icirc;'u'icirc;'u'И'b'Icy;'u'Icy;'u'и'b'icy;'u'icy;'b'Idot;'u'Idot;'u'Е'b'IEcy;'u'IEcy;'u'е'b'iecy;'u'iecy;'b'¡'u'¡'b'iexcl;'u'iexcl;'b'iff;'u'iff;'u'ℑ'b'Ifr;'u'Ifr;'b'ifr;'u'ifr;'b'Ì'u'Ì'b'ì'u'ì'b'Igrave;'u'Igrave;'b'igrave;'u'igrave;'u'ⅈ'b'ii;'u'ii;'u'⨌'b'iiiint;'u'iiiint;'u'∭'b'iiint;'u'iiint;'u'⧜'b'iinfin;'u'iinfin;'u'℩'b'iiota;'u'iiota;'u'IJ'b'IJlig;'u'IJlig;'u'ij'b'ijlig;'u'ijlig;'b'Im;'u'Im;'u'Ī'b'Imacr;'u'Imacr;'u'ī'b'imacr;'u'imacr;'b'image;'u'image;'b'ImaginaryI;'u'ImaginaryI;'u'ℐ'b'imagline;'u'imagline;'b'imagpart;'u'imagpart;'u'ı'b'imath;'u'imath;'u'⊷'b'imof;'u'imof;'u'Ƶ'b'imped;'u'imped;'b'Implies;'u'Implies;'b'in;'u'in;'u'℅'b'incare;'u'incare;'u'∞'b'infin;'u'infin;'u'⧝'b'infintie;'u'infintie;'b'inodot;'u'inodot;'u'∬'b'Int;'u'Int;'u'∫'b'int;'u'int;'u'⊺'b'intcal;'u'intcal;'u'ℤ'b'integers;'u'integers;'b'Integral;'u'Integral;'b'intercal;'u'intercal;'b'Intersection;'u'Intersection;'u'⨗'b'intlarhk;'u'intlarhk;'u'⨼'b'intprod;'u'intprod;'b'InvisibleComma;'u'InvisibleComma;'u'⁢'b'InvisibleTimes;'u'InvisibleTimes;'u'Ё'b'IOcy;'u'IOcy;'u'ё'b'iocy;'u'iocy;'u'Į'b'Iogon;'u'Iogon;'u'į'b'iogon;'u'iogon;'b'Iopf;'u'Iopf;'b'iopf;'u'iopf;'u'Ι'b'Iota;'u'Iota;'u'ι'b'iota;'u'iota;'b'iprod;'u'iprod;'b'¿'u'¿'b'iquest;'u'iquest;'b'Iscr;'u'Iscr;'b'iscr;'u'iscr;'b'isin;'u'isin;'u'⋵'b'isindot;'u'isindot;'u'⋹'b'isinE;'u'isinE;'u'⋴'b'isins;'u'isins;'u'⋳'b'isinsv;'u'isinsv;'b'isinv;'u'isinv;'b'it;'u'it;'u'Ĩ'b'Itilde;'u'Itilde;'u'ĩ'b'itilde;'u'itilde;'u'І'b'Iukcy;'u'Iukcy;'u'і'b'iukcy;'u'iukcy;'b'Ï'u'Ï'b'ï'u'ï'b'Iuml;'u'Iuml;'b'iuml;'u'iuml;'u'Ĵ'b'Jcirc;'u'Jcirc;'u'ĵ'b'jcirc;'u'jcirc;'u'Й'b'Jcy;'u'Jcy;'u'й'b'jcy;'u'jcy;'b'Jfr;'u'Jfr;'b'jfr;'u'jfr;'u'ȷ'b'jmath;'u'jmath;'b'Jopf;'u'Jopf;'b'jopf;'u'jopf;'b'Jscr;'u'Jscr;'b'jscr;'u'jscr;'u'Ј'b'Jsercy;'u'Jsercy;'u'ј'b'jsercy;'u'jsercy;'u'Є'b'Jukcy;'u'Jukcy;'u'є'b'jukcy;'u'jukcy;'u'Κ'b'Kappa;'u'Kappa;'u'κ'b'kappa;'u'kappa;'u'ϰ'b'kappav;'u'kappav;'u'Ķ'b'Kcedil;'u'Kcedil;'u'ķ'b'kcedil;'u'kcedil;'b'Kcy;'u'Kcy;'u'к'b'kcy;'u'kcy;'b'Kfr;'u'Kfr;'b'kfr;'u'kfr;'u'ĸ'b'kgreen;'u'kgreen;'u'Х'b'KHcy;'u'KHcy;'u'х'b'khcy;'u'khcy;'u'Ќ'b'KJcy;'u'KJcy;'u'ќ'b'kjcy;'u'kjcy;'b'Kopf;'u'Kopf;'b'kopf;'u'kopf;'b'Kscr;'u'Kscr;'b'kscr;'u'kscr;'u'⇚'b'lAarr;'u'lAarr;'u'Ĺ'b'Lacute;'u'Lacute;'u'ĺ'b'lacute;'u'lacute;'u'⦴'b'laemptyv;'u'laemptyv;'u'ℒ'b'lagran;'u'lagran;'u'Λ'b'Lambda;'u'Lambda;'u'λ'b'lambda;'u'lambda;'u'⟪'b'Lang;'u'Lang;'u'⟨'b'lang;'u'lang;'u'⦑'b'langd;'u'langd;'b'langle;'u'langle;'u'⪅'b'lap;'u'lap;'b'Laplacetrf;'u'Laplacetrf;'b'«'u'«'b'laquo;'u'laquo;'u'↞'b'Larr;'u'Larr;'b'lArr;'u'lArr;'u'←'b'larr;'u'larr;'u'⇤'b'larrb;'u'larrb;'u'⤟'b'larrbfs;'u'larrbfs;'u'⤝'b'larrfs;'u'larrfs;'b'larrhk;'u'larrhk;'u'↫'b'larrlp;'u'larrlp;'u'⤹'b'larrpl;'u'larrpl;'u'⥳'b'larrsim;'u'larrsim;'u'↢'b'larrtl;'u'larrtl;'u'⪫'b'lat;'u'lat;'u'⤛'b'lAtail;'u'lAtail;'u'⤙'b'latail;'u'latail;'u'⪭'b'late;'u'late;'u'⪭︀'b'lates;'u'lates;'u'⤎'b'lBarr;'u'lBarr;'u'⤌'b'lbarr;'u'lbarr;'u'❲'b'lbbrk;'u'lbbrk;'b'lbrace;'u'lbrace;'b'lbrack;'u'lbrack;'u'⦋'b'lbrke;'u'lbrke;'u'⦏'b'lbrksld;'u'lbrksld;'u'⦍'b'lbrkslu;'u'lbrkslu;'u'Ľ'b'Lcaron;'u'Lcaron;'u'ľ'b'lcaron;'u'lcaron;'u'Ļ'b'Lcedil;'u'Lcedil;'u'ļ'b'lcedil;'u'lcedil;'u'⌈'b'lceil;'u'lceil;'b'lcub;'u'lcub;'u'Л'b'Lcy;'u'Lcy;'u'л'b'lcy;'u'lcy;'u'⤶'b'ldca;'u'ldca;'b'ldquo;'u'ldquo;'b'ldquor;'u'ldquor;'u'⥧'b'ldrdhar;'u'ldrdhar;'u'⥋'b'ldrushar;'u'ldrushar;'u'↲'b'ldsh;'u'ldsh;'u'≦'b'lE;'u'lE;'u'≤'b'le;'u'le;'b'LeftAngleBracket;'u'LeftAngleBracket;'b'LeftArrow;'u'LeftArrow;'b'Leftarrow;'u'Leftarrow;'b'leftarrow;'u'leftarrow;'b'LeftArrowBar;'u'LeftArrowBar;'u'⇆'b'LeftArrowRightArrow;'u'LeftArrowRightArrow;'b'leftarrowtail;'u'leftarrowtail;'b'LeftCeiling;'u'LeftCeiling;'u'⟦'b'LeftDoubleBracket;'u'LeftDoubleBracket;'u'⥡'b'LeftDownTeeVector;'u'LeftDownTeeVector;'b'LeftDownVector;'u'LeftDownVector;'u'⥙'b'LeftDownVectorBar;'u'LeftDownVectorBar;'u'⌊'b'LeftFloor;'u'LeftFloor;'b'leftharpoondown;'u'leftharpoondown;'u'↼'b'leftharpoonup;'u'leftharpoonup;'u'⇇'b'leftleftarrows;'u'leftleftarrows;'b'LeftRightArrow;'u'LeftRightArrow;'b'Leftrightarrow;'u'Leftrightarrow;'b'leftrightarrow;'u'leftrightarrow;'b'leftrightarrows;'u'leftrightarrows;'u'⇋'b'leftrightharpoons;'u'leftrightharpoons;'b'leftrightsquigarrow;'u'leftrightsquigarrow;'u'⥎'b'LeftRightVector;'u'LeftRightVector;'b'LeftTee;'u'LeftTee;'u'↤'b'LeftTeeArrow;'u'LeftTeeArrow;'u'⥚'b'LeftTeeVector;'u'LeftTeeVector;'u'⋋'b'leftthreetimes;'u'leftthreetimes;'u'⊲'b'LeftTriangle;'u'LeftTriangle;'u'⧏'b'LeftTriangleBar;'u'LeftTriangleBar;'u'⊴'b'LeftTriangleEqual;'u'LeftTriangleEqual;'u'⥑'b'LeftUpDownVector;'u'LeftUpDownVector;'u'⥠'b'LeftUpTeeVector;'u'LeftUpTeeVector;'u'↿'b'LeftUpVector;'u'LeftUpVector;'u'⥘'b'LeftUpVectorBar;'u'LeftUpVectorBar;'b'LeftVector;'u'LeftVector;'u'⥒'b'LeftVectorBar;'u'LeftVectorBar;'u'⪋'b'lEg;'u'lEg;'u'⋚'b'leg;'u'leg;'b'leq;'u'leq;'b'leqq;'u'leqq;'u'⩽'b'leqslant;'u'leqslant;'b'les;'u'les;'u'⪨'b'lescc;'u'lescc;'u'⩿'b'lesdot;'u'lesdot;'u'⪁'b'lesdoto;'u'lesdoto;'u'⪃'b'lesdotor;'u'lesdotor;'u'⋚︀'b'lesg;'u'lesg;'u'⪓'b'lesges;'u'lesges;'b'lessapprox;'u'lessapprox;'u'⋖'b'lessdot;'u'lessdot;'b'lesseqgtr;'u'lesseqgtr;'b'lesseqqgtr;'u'lesseqqgtr;'b'LessEqualGreater;'u'LessEqualGreater;'b'LessFullEqual;'u'LessFullEqual;'u'≶'b'LessGreater;'u'LessGreater;'b'lessgtr;'u'lessgtr;'u'⪡'b'LessLess;'u'LessLess;'u'≲'b'lesssim;'u'lesssim;'b'LessSlantEqual;'u'LessSlantEqual;'b'LessTilde;'u'LessTilde;'u'⥼'b'lfisht;'u'lfisht;'b'lfloor;'u'lfloor;'b'Lfr;'u'Lfr;'b'lfr;'u'lfr;'b'lg;'u'lg;'u'⪑'b'lgE;'u'lgE;'u'⥢'b'lHar;'u'lHar;'b'lhard;'u'lhard;'b'lharu;'u'lharu;'u'⥪'b'lharul;'u'lharul;'u'▄'b'lhblk;'u'lhblk;'u'Љ'b'LJcy;'u'LJcy;'u'љ'b'ljcy;'u'ljcy;'u'⋘'b'Ll;'u'Ll;'u'≪'b'll;'u'll;'b'llarr;'u'llarr;'b'llcorner;'u'llcorner;'b'Lleftarrow;'u'Lleftarrow;'u'⥫'b'llhard;'u'llhard;'u'◺'b'lltri;'u'lltri;'u'Ŀ'b'Lmidot;'u'Lmidot;'u'ŀ'b'lmidot;'u'lmidot;'u'⎰'b'lmoust;'u'lmoust;'b'lmoustache;'u'lmoustache;'u'⪉'b'lnap;'u'lnap;'b'lnapprox;'u'lnapprox;'u'≨'b'lnE;'u'lnE;'u'⪇'b'lne;'u'lne;'b'lneq;'u'lneq;'b'lneqq;'u'lneqq;'u'⋦'b'lnsim;'u'lnsim;'u'⟬'b'loang;'u'loang;'u'⇽'b'loarr;'u'loarr;'b'lobrk;'u'lobrk;'u'⟵'b'LongLeftArrow;'u'LongLeftArrow;'b'Longleftarrow;'u'Longleftarrow;'b'longleftarrow;'u'longleftarrow;'u'⟷'b'LongLeftRightArrow;'u'LongLeftRightArrow;'b'Longleftrightarrow;'u'Longleftrightarrow;'b'longleftrightarrow;'u'longleftrightarrow;'u'⟼'b'longmapsto;'u'longmapsto;'u'⟶'b'LongRightArrow;'u'LongRightArrow;'b'Longrightarrow;'u'Longrightarrow;'b'longrightarrow;'u'longrightarrow;'b'looparrowleft;'u'looparrowleft;'u'↬'b'looparrowright;'u'looparrowright;'u'⦅'b'lopar;'u'lopar;'b'Lopf;'u'Lopf;'b'lopf;'u'lopf;'u'⨭'b'loplus;'u'loplus;'u'⨴'b'lotimes;'u'lotimes;'u'∗'b'lowast;'u'lowast;'b'lowbar;'u'lowbar;'u'↙'b'LowerLeftArrow;'u'LowerLeftArrow;'u'↘'b'LowerRightArrow;'u'LowerRightArrow;'u'◊'b'loz;'u'loz;'b'lozenge;'u'lozenge;'b'lozf;'u'lozf;'b'lpar;'u'lpar;'u'⦓'b'lparlt;'u'lparlt;'b'lrarr;'u'lrarr;'b'lrcorner;'u'lrcorner;'b'lrhar;'u'lrhar;'u'⥭'b'lrhard;'u'lrhard;'u'‎'b'lrm;'u'lrm;'u'⊿'b'lrtri;'u'lrtri;'b'lsaquo;'u'lsaquo;'b'Lscr;'u'Lscr;'b'lscr;'u'lscr;'u'↰'b'Lsh;'u'Lsh;'b'lsh;'u'lsh;'b'lsim;'u'lsim;'u'⪍'b'lsime;'u'lsime;'u'⪏'b'lsimg;'u'lsimg;'b'lsqb;'u'lsqb;'b'lsquo;'u'lsquo;'b'lsquor;'u'lsquor;'b'Lstrok;'u'Lstrok;'u'ł'b'lstrok;'u'lstrok;'b'LT'u'LT'b'LT;'u'LT;'b'Lt;'u'Lt;'b'lt;'u'lt;'u'⪦'b'ltcc;'u'ltcc;'u'⩹'b'ltcir;'u'ltcir;'b'ltdot;'u'ltdot;'b'lthree;'u'lthree;'u'⋉'b'ltimes;'u'ltimes;'u'⥶'b'ltlarr;'u'ltlarr;'u'⩻'b'ltquest;'u'ltquest;'u'◃'b'ltri;'u'ltri;'b'ltrie;'u'ltrie;'b'ltrif;'u'ltrif;'u'⦖'b'ltrPar;'u'ltrPar;'u'⥊'b'lurdshar;'u'lurdshar;'u'⥦'b'luruhar;'u'luruhar;'u'≨︀'b'lvertneqq;'u'lvertneqq;'b'lvnE;'u'lvnE;'b'¯'u'¯'b'macr;'u'macr;'u'♂'b'male;'u'male;'u'✠'b'malt;'u'malt;'b'maltese;'u'maltese;'u'⤅'b'Map;'u'Map;'u'↦'b'map;'u'map;'b'mapsto;'u'mapsto;'b'mapstodown;'u'mapstodown;'b'mapstoleft;'u'mapstoleft;'u'↥'b'mapstoup;'u'mapstoup;'u'▮'b'marker;'u'marker;'u'⨩'b'mcomma;'u'mcomma;'u'М'b'Mcy;'u'Mcy;'u'м'b'mcy;'u'mcy;'b'mdash;'u'mdash;'u'∺'b'mDDot;'u'mDDot;'b'measuredangle;'u'measuredangle;'u' 'b'MediumSpace;'u'MediumSpace;'u'ℳ'b'Mellintrf;'u'Mellintrf;'b'Mfr;'u'Mfr;'b'mfr;'u'mfr;'u'℧'b'mho;'u'mho;'b'µ'u'µ'b'micro;'u'micro;'u'∣'b'mid;'u'mid;'b'midast;'u'midast;'u'⫰'b'midcir;'u'midcir;'b'middot;'u'middot;'u'−'b'minus;'u'minus;'b'minusb;'u'minusb;'b'minusd;'u'minusd;'u'⨪'b'minusdu;'u'minusdu;'u'∓'b'MinusPlus;'u'MinusPlus;'u'⫛'b'mlcp;'u'mlcp;'b'mldr;'u'mldr;'b'mnplus;'u'mnplus;'u'⊧'b'models;'u'models;'b'Mopf;'u'Mopf;'b'mopf;'u'mopf;'b'mp;'u'mp;'b'Mscr;'u'Mscr;'b'mscr;'u'mscr;'b'mstpos;'u'mstpos;'u'Μ'b'Mu;'u'Mu;'u'μ'b'mu;'u'mu;'u'⊸'b'multimap;'u'multimap;'b'mumap;'u'mumap;'b'nabla;'u'nabla;'u'Ń'b'Nacute;'u'Nacute;'u'ń'b'nacute;'u'nacute;'u'∠⃒'b'nang;'u'nang;'u'≉'b'nap;'u'nap;'u'⩰̸'b'napE;'u'napE;'u'≋̸'b'napid;'u'napid;'u'ʼn'b'napos;'u'napos;'b'napprox;'u'napprox;'u'♮'b'natur;'u'natur;'b'natural;'u'natural;'u'ℕ'b'naturals;'u'naturals;'b'nbsp;'u'nbsp;'u'≎̸'b'nbump;'u'nbump;'u'≏̸'b'nbumpe;'u'nbumpe;'u'⩃'b'ncap;'u'ncap;'u'Ň'b'Ncaron;'u'Ncaron;'u'ň'b'ncaron;'u'ncaron;'u'Ņ'b'Ncedil;'u'Ncedil;'u'ņ'b'ncedil;'u'ncedil;'u'≇'b'ncong;'u'ncong;'u'⩭̸'b'ncongdot;'u'ncongdot;'u'⩂'b'ncup;'u'ncup;'u'Н'b'Ncy;'u'Ncy;'u'н'b'ncy;'u'ncy;'b'ndash;'u'ndash;'u'≠'b'ne;'u'ne;'u'⤤'b'nearhk;'u'nearhk;'u'⇗'b'neArr;'u'neArr;'u'↗'b'nearr;'u'nearr;'b'nearrow;'u'nearrow;'u'≐̸'b'nedot;'u'nedot;'u'​'b'NegativeMediumSpace;'u'NegativeMediumSpace;'b'NegativeThickSpace;'u'NegativeThickSpace;'b'NegativeThinSpace;'u'NegativeThinSpace;'b'NegativeVeryThinSpace;'u'NegativeVeryThinSpace;'u'≢'b'nequiv;'u'nequiv;'u'⤨'b'nesear;'u'nesear;'u'≂̸'b'nesim;'u'nesim;'b'NestedGreaterGreater;'u'NestedGreaterGreater;'b'NestedLessLess;'u'NestedLessLess;'b'NewLine;'u'NewLine;'u'∄'b'nexist;'u'nexist;'b'nexists;'u'nexists;'b'Nfr;'u'Nfr;'b'nfr;'u'nfr;'u'≧̸'b'ngE;'u'ngE;'u'≱'b'nge;'u'nge;'b'ngeq;'u'ngeq;'b'ngeqq;'u'ngeqq;'u'⩾̸'b'ngeqslant;'u'ngeqslant;'b'nges;'u'nges;'u'⋙̸'b'nGg;'u'nGg;'u'≵'b'ngsim;'u'ngsim;'u'≫⃒'b'nGt;'u'nGt;'u'≯'b'ngt;'u'ngt;'b'ngtr;'u'ngtr;'u'≫̸'b'nGtv;'u'nGtv;'u'⇎'b'nhArr;'u'nhArr;'u'↮'b'nharr;'u'nharr;'u'⫲'b'nhpar;'u'nhpar;'u'∋'b'ni;'u'ni;'u'⋼'b'nis;'u'nis;'u'⋺'b'nisd;'u'nisd;'b'niv;'u'niv;'u'Њ'b'NJcy;'u'NJcy;'u'њ'b'njcy;'u'njcy;'u'⇍'b'nlArr;'u'nlArr;'u'↚'b'nlarr;'u'nlarr;'u'‥'b'nldr;'u'nldr;'u'≦̸'b'nlE;'u'nlE;'u'≰'b'nle;'u'nle;'b'nLeftarrow;'u'nLeftarrow;'b'nleftarrow;'u'nleftarrow;'b'nLeftrightarrow;'u'nLeftrightarrow;'b'nleftrightarrow;'u'nleftrightarrow;'b'nleq;'u'nleq;'b'nleqq;'u'nleqq;'u'⩽̸'b'nleqslant;'u'nleqslant;'b'nles;'u'nles;'u'≮'b'nless;'u'nless;'u'⋘̸'b'nLl;'u'nLl;'u'≴'b'nlsim;'u'nlsim;'u'≪⃒'b'nLt;'u'nLt;'b'nlt;'u'nlt;'u'⋪'b'nltri;'u'nltri;'u'⋬'b'nltrie;'u'nltrie;'u'≪̸'b'nLtv;'u'nLtv;'u'∤'b'nmid;'u'nmid;'u'⁠'b'NoBreak;'u'NoBreak;'b'NonBreakingSpace;'u'NonBreakingSpace;'b'Nopf;'u'Nopf;'b'nopf;'u'nopf;'b'¬'u'¬'u'⫬'b'Not;'u'Not;'b'not;'u'not;'b'NotCongruent;'u'NotCongruent;'u'≭'b'NotCupCap;'u'NotCupCap;'u'∦'b'NotDoubleVerticalBar;'u'NotDoubleVerticalBar;'u'∉'b'NotElement;'u'NotElement;'b'NotEqual;'u'NotEqual;'b'NotEqualTilde;'u'NotEqualTilde;'b'NotExists;'u'NotExists;'b'NotGreater;'u'NotGreater;'b'NotGreaterEqual;'u'NotGreaterEqual;'b'NotGreaterFullEqual;'u'NotGreaterFullEqual;'b'NotGreaterGreater;'u'NotGreaterGreater;'u'≹'b'NotGreaterLess;'u'NotGreaterLess;'b'NotGreaterSlantEqual;'u'NotGreaterSlantEqual;'b'NotGreaterTilde;'u'NotGreaterTilde;'b'NotHumpDownHump;'u'NotHumpDownHump;'b'NotHumpEqual;'u'NotHumpEqual;'b'notin;'u'notin;'u'⋵̸'b'notindot;'u'notindot;'u'⋹̸'b'notinE;'u'notinE;'b'notinva;'u'notinva;'u'⋷'b'notinvb;'u'notinvb;'u'⋶'b'notinvc;'u'notinvc;'b'NotLeftTriangle;'u'NotLeftTriangle;'u'⧏̸'b'NotLeftTriangleBar;'u'NotLeftTriangleBar;'b'NotLeftTriangleEqual;'u'NotLeftTriangleEqual;'b'NotLess;'u'NotLess;'b'NotLessEqual;'u'NotLessEqual;'u'≸'b'NotLessGreater;'u'NotLessGreater;'b'NotLessLess;'u'NotLessLess;'b'NotLessSlantEqual;'u'NotLessSlantEqual;'b'NotLessTilde;'u'NotLessTilde;'u'⪢̸'b'NotNestedGreaterGreater;'u'NotNestedGreaterGreater;'u'⪡̸'b'NotNestedLessLess;'u'NotNestedLessLess;'u'∌'b'notni;'u'notni;'b'notniva;'u'notniva;'u'⋾'b'notnivb;'u'notnivb;'u'⋽'b'notnivc;'u'notnivc;'u'⊀'b'NotPrecedes;'u'NotPrecedes;'u'⪯̸'b'NotPrecedesEqual;'u'NotPrecedesEqual;'u'⋠'b'NotPrecedesSlantEqual;'u'NotPrecedesSlantEqual;'b'NotReverseElement;'u'NotReverseElement;'u'⋫'b'NotRightTriangle;'u'NotRightTriangle;'u'⧐̸'b'NotRightTriangleBar;'u'NotRightTriangleBar;'u'⋭'b'NotRightTriangleEqual;'u'NotRightTriangleEqual;'u'⊏̸'b'NotSquareSubset;'u'NotSquareSubset;'u'⋢'b'NotSquareSubsetEqual;'u'NotSquareSubsetEqual;'u'⊐̸'b'NotSquareSuperset;'u'NotSquareSuperset;'u'⋣'b'NotSquareSupersetEqual;'u'NotSquareSupersetEqual;'u'⊂⃒'b'NotSubset;'u'NotSubset;'u'⊈'b'NotSubsetEqual;'u'NotSubsetEqual;'u'⊁'b'NotSucceeds;'u'NotSucceeds;'u'⪰̸'b'NotSucceedsEqual;'u'NotSucceedsEqual;'u'⋡'b'NotSucceedsSlantEqual;'u'NotSucceedsSlantEqual;'u'≿̸'b'NotSucceedsTilde;'u'NotSucceedsTilde;'u'⊃⃒'b'NotSuperset;'u'NotSuperset;'u'⊉'b'NotSupersetEqual;'u'NotSupersetEqual;'u'≁'b'NotTilde;'u'NotTilde;'u'≄'b'NotTildeEqual;'u'NotTildeEqual;'b'NotTildeFullEqual;'u'NotTildeFullEqual;'b'NotTildeTilde;'u'NotTildeTilde;'b'NotVerticalBar;'u'NotVerticalBar;'b'npar;'u'npar;'b'nparallel;'u'nparallel;'u'⫽⃥'b'nparsl;'u'nparsl;'u'∂̸'b'npart;'u'npart;'u'⨔'b'npolint;'u'npolint;'b'npr;'u'npr;'b'nprcue;'u'nprcue;'b'npre;'u'npre;'b'nprec;'u'nprec;'b'npreceq;'u'npreceq;'u'⇏'b'nrArr;'u'nrArr;'u'↛'b'nrarr;'u'nrarr;'u'⤳̸'b'nrarrc;'u'nrarrc;'u'↝̸'b'nrarrw;'u'nrarrw;'b'nRightarrow;'u'nRightarrow;'b'nrightarrow;'u'nrightarrow;'b'nrtri;'u'nrtri;'b'nrtrie;'u'nrtrie;'b'nsc;'u'nsc;'b'nsccue;'u'nsccue;'b'nsce;'u'nsce;'b'Nscr;'u'Nscr;'b'nscr;'u'nscr;'b'nshortmid;'u'nshortmid;'b'nshortparallel;'u'nshortparallel;'b'nsim;'u'nsim;'b'nsime;'u'nsime;'b'nsimeq;'u'nsimeq;'b'nsmid;'u'nsmid;'b'nspar;'u'nspar;'b'nsqsube;'u'nsqsube;'b'nsqsupe;'u'nsqsupe;'u'⊄'b'nsub;'u'nsub;'u'⫅̸'b'nsubE;'u'nsubE;'b'nsube;'u'nsube;'b'nsubset;'u'nsubset;'b'nsubseteq;'u'nsubseteq;'b'nsubseteqq;'u'nsubseteqq;'b'nsucc;'u'nsucc;'b'nsucceq;'u'nsucceq;'u'⊅'b'nsup;'u'nsup;'u'⫆̸'b'nsupE;'u'nsupE;'b'nsupe;'u'nsupe;'b'nsupset;'u'nsupset;'b'nsupseteq;'u'nsupseteq;'b'nsupseteqq;'u'nsupseteqq;'b'ntgl;'u'ntgl;'b'Ñ'u'Ñ'b'ñ'u'ñ'b'Ntilde;'u'Ntilde;'b'ntilde;'u'ntilde;'b'ntlg;'u'ntlg;'b'ntriangleleft;'u'ntriangleleft;'b'ntrianglelefteq;'u'ntrianglelefteq;'b'ntriangleright;'u'ntriangleright;'b'ntrianglerighteq;'u'ntrianglerighteq;'u'Ν'b'Nu;'u'Nu;'u'ν'b'nu;'u'nu;'b'num;'u'num;'u'№'b'numero;'u'numero;'u' 'b'numsp;'u'numsp;'u'≍⃒'b'nvap;'u'nvap;'u'⊯'b'nVDash;'u'nVDash;'u'⊮'b'nVdash;'u'nVdash;'u'⊭'b'nvDash;'u'nvDash;'u'⊬'b'nvdash;'u'nvdash;'u'≥⃒'b'nvge;'u'nvge;'u'>⃒'b'nvgt;'u'nvgt;'u'⤄'b'nvHarr;'u'nvHarr;'u'⧞'b'nvinfin;'u'nvinfin;'u'⤂'b'nvlArr;'u'nvlArr;'u'≤⃒'b'nvle;'u'nvle;'u'<⃒'b'nvlt;'u'nvlt;'u'⊴⃒'b'nvltrie;'u'nvltrie;'u'⤃'b'nvrArr;'u'nvrArr;'u'⊵⃒'b'nvrtrie;'u'nvrtrie;'u'∼⃒'b'nvsim;'u'nvsim;'u'⤣'b'nwarhk;'u'nwarhk;'u'⇖'b'nwArr;'u'nwArr;'u'↖'b'nwarr;'u'nwarr;'b'nwarrow;'u'nwarrow;'u'⤧'b'nwnear;'u'nwnear;'b'Ó'u'Ó'b'ó'u'ó'b'Oacute;'u'Oacute;'b'oacute;'u'oacute;'b'oast;'u'oast;'b'ocir;'u'ocir;'b'Ô'u'Ô'b'ô'u'ô'b'Ocirc;'u'Ocirc;'b'ocirc;'u'ocirc;'u'О'b'Ocy;'u'Ocy;'u'о'b'ocy;'u'ocy;'b'odash;'u'odash;'u'Ő'b'Odblac;'u'Odblac;'u'ő'b'odblac;'u'odblac;'u'⨸'b'odiv;'u'odiv;'b'odot;'u'odot;'u'⦼'b'odsold;'u'odsold;'b'OElig;'u'OElig;'b'oelig;'u'oelig;'u'⦿'b'ofcir;'u'ofcir;'b'Ofr;'u'Ofr;'b'ofr;'u'ofr;'u'˛'b'ogon;'u'ogon;'b'Ò'u'Ò'b'ò'u'ò'b'Ograve;'u'Ograve;'b'ograve;'u'ograve;'u'⧁'b'ogt;'u'ogt;'u'⦵'b'ohbar;'u'ohbar;'u'Ω'b'ohm;'u'ohm;'b'oint;'u'oint;'b'olarr;'u'olarr;'u'⦾'b'olcir;'u'olcir;'u'⦻'b'olcross;'u'olcross;'u'‾'b'oline;'u'oline;'u'⧀'b'olt;'u'olt;'u'Ō'b'Omacr;'u'Omacr;'u'ō'b'omacr;'u'omacr;'b'Omega;'u'Omega;'u'ω'b'omega;'u'omega;'u'Ο'b'Omicron;'u'Omicron;'u'ο'b'omicron;'u'omicron;'u'⦶'b'omid;'u'omid;'b'ominus;'u'ominus;'b'Oopf;'u'Oopf;'b'oopf;'u'oopf;'u'⦷'b'opar;'u'opar;'b'OpenCurlyDoubleQuote;'u'OpenCurlyDoubleQuote;'b'OpenCurlyQuote;'u'OpenCurlyQuote;'u'⦹'b'operp;'u'operp;'b'oplus;'u'oplus;'u'⩔'b'Or;'u'Or;'u'∨'b'or;'u'or;'b'orarr;'u'orarr;'u'⩝'b'ord;'u'ord;'u'ℴ'b'order;'u'order;'b'orderof;'u'orderof;'b'ª'u'ª'b'ordf;'u'ordf;'b'º'u'º'b'ordm;'u'ordm;'u'⊶'b'origof;'u'origof;'u'⩖'b'oror;'u'oror;'u'⩗'b'orslope;'u'orslope;'u'⩛'b'orv;'u'orv;'b'oS;'u'oS;'b'Oscr;'u'Oscr;'b'oscr;'u'oscr;'b'Ø'u'Ø'b'ø'u'ø'b'Oslash;'u'Oslash;'b'oslash;'u'oslash;'u'⊘'b'osol;'u'osol;'b'Õ'u'Õ'b'õ'u'õ'b'Otilde;'u'Otilde;'b'otilde;'u'otilde;'u'⨷'b'Otimes;'u'Otimes;'b'otimes;'u'otimes;'u'⨶'b'otimesas;'u'otimesas;'b'Ö'u'Ö'b'ö'u'ö'b'Ouml;'u'Ouml;'b'ouml;'u'ouml;'u'⌽'b'ovbar;'u'ovbar;'b'OverBar;'u'OverBar;'u'⏞'b'OverBrace;'u'OverBrace;'u'⎴'b'OverBracket;'u'OverBracket;'u'⏜'b'OverParenthesis;'u'OverParenthesis;'b'par;'u'par;'b'¶'u'¶'b'para;'u'para;'b'parallel;'u'parallel;'u'⫳'b'parsim;'u'parsim;'u'⫽'b'parsl;'u'parsl;'u'∂'b'part;'u'part;'b'PartialD;'u'PartialD;'u'П'b'Pcy;'u'Pcy;'u'п'b'pcy;'u'pcy;'b'percnt;'u'percnt;'b'period;'u'period;'b'permil;'u'permil;'b'perp;'u'perp;'u'‱'b'pertenk;'u'pertenk;'b'Pfr;'u'Pfr;'b'pfr;'u'pfr;'u'Φ'b'Phi;'u'Phi;'b'phi;'u'phi;'u'ϕ'b'phiv;'u'phiv;'b'phmmat;'u'phmmat;'u'☎'b'phone;'u'phone;'u'Π'b'Pi;'u'Pi;'u'π'b'pi;'u'pi;'b'pitchfork;'u'pitchfork;'u'ϖ'b'piv;'u'piv;'b'planck;'u'planck;'u'ℎ'b'planckh;'u'planckh;'b'plankv;'u'plankv;'b'plus;'u'plus;'u'⨣'b'plusacir;'u'plusacir;'b'plusb;'u'plusb;'u'⨢'b'pluscir;'u'pluscir;'b'plusdo;'u'plusdo;'u'⨥'b'plusdu;'u'plusdu;'u'⩲'b'pluse;'u'pluse;'b'±'u'±'b'PlusMinus;'u'PlusMinus;'b'plusmn;'u'plusmn;'u'⨦'b'plussim;'u'plussim;'u'⨧'b'plustwo;'u'plustwo;'b'pm;'u'pm;'b'Poincareplane;'u'Poincareplane;'u'⨕'b'pointint;'u'pointint;'u'ℙ'b'Popf;'u'Popf;'b'popf;'u'popf;'b'£'u'£'b'pound;'u'pound;'u'⪻'b'Pr;'u'Pr;'u'≺'b'pr;'u'pr;'u'⪷'b'prap;'u'prap;'u'≼'b'prcue;'u'prcue;'u'⪳'b'prE;'u'prE;'u'⪯'b'pre;'u'pre;'b'prec;'u'prec;'b'precapprox;'u'precapprox;'b'preccurlyeq;'u'preccurlyeq;'b'Precedes;'u'Precedes;'b'PrecedesEqual;'u'PrecedesEqual;'b'PrecedesSlantEqual;'u'PrecedesSlantEqual;'u'≾'b'PrecedesTilde;'u'PrecedesTilde;'b'preceq;'u'preceq;'u'⪹'b'precnapprox;'u'precnapprox;'u'⪵'b'precneqq;'u'precneqq;'u'⋨'b'precnsim;'u'precnsim;'b'precsim;'u'precsim;'u'″'b'Prime;'u'Prime;'u'′'b'prime;'u'prime;'b'primes;'u'primes;'b'prnap;'u'prnap;'b'prnE;'u'prnE;'b'prnsim;'u'prnsim;'u'∏'b'prod;'u'prod;'b'Product;'u'Product;'u'⌮'b'profalar;'u'profalar;'u'⌒'b'profline;'u'profline;'u'⌓'b'profsurf;'u'profsurf;'u'∝'b'prop;'u'prop;'b'Proportion;'u'Proportion;'b'Proportional;'u'Proportional;'b'propto;'u'propto;'b'prsim;'u'prsim;'u'⊰'b'prurel;'u'prurel;'b'Pscr;'u'Pscr;'b'pscr;'u'pscr;'u'Ψ'b'Psi;'u'Psi;'u'ψ'b'psi;'u'psi;'u' 'b'puncsp;'u'puncsp;'b'Qfr;'u'Qfr;'b'qfr;'u'qfr;'b'qint;'u'qint;'u'ℚ'b'Qopf;'u'Qopf;'b'qopf;'u'qopf;'u'⁗'b'qprime;'u'qprime;'b'Qscr;'u'Qscr;'b'qscr;'u'qscr;'b'quaternions;'u'quaternions;'u'⨖'b'quatint;'u'quatint;'b'quest;'u'quest;'b'questeq;'u'questeq;'b'QUOT'u'QUOT'b'QUOT;'u'QUOT;'b'quot;'u'quot;'u'⇛'b'rAarr;'u'rAarr;'u'∽̱'b'race;'u'race;'u'Ŕ'b'Racute;'u'Racute;'u'ŕ'b'racute;'u'racute;'u'√'b'radic;'u'radic;'u'⦳'b'raemptyv;'u'raemptyv;'u'⟫'b'Rang;'u'Rang;'u'⟩'b'rang;'u'rang;'u'⦒'b'rangd;'u'rangd;'u'⦥'b'range;'u'range;'b'rangle;'u'rangle;'b'»'u'»'b'raquo;'u'raquo;'u'↠'b'Rarr;'u'Rarr;'b'rArr;'u'rArr;'u'→'b'rarr;'u'rarr;'u'⥵'b'rarrap;'u'rarrap;'u'⇥'b'rarrb;'u'rarrb;'u'⤠'b'rarrbfs;'u'rarrbfs;'u'⤳'b'rarrc;'u'rarrc;'u'⤞'b'rarrfs;'u'rarrfs;'b'rarrhk;'u'rarrhk;'b'rarrlp;'u'rarrlp;'u'⥅'b'rarrpl;'u'rarrpl;'u'⥴'b'rarrsim;'u'rarrsim;'u'⤖'b'Rarrtl;'u'Rarrtl;'u'↣'b'rarrtl;'u'rarrtl;'u'↝'b'rarrw;'u'rarrw;'u'⤜'b'rAtail;'u'rAtail;'u'⤚'b'ratail;'u'ratail;'u'∶'b'ratio;'u'ratio;'b'rationals;'u'rationals;'b'RBarr;'u'RBarr;'b'rBarr;'u'rBarr;'b'rbarr;'u'rbarr;'u'❳'b'rbbrk;'u'rbbrk;'b'rbrace;'u'rbrace;'b'rbrack;'u'rbrack;'u'⦌'b'rbrke;'u'rbrke;'u'⦎'b'rbrksld;'u'rbrksld;'u'⦐'b'rbrkslu;'u'rbrkslu;'u'Ř'b'Rcaron;'u'Rcaron;'u'ř'b'rcaron;'u'rcaron;'u'Ŗ'b'Rcedil;'u'Rcedil;'u'ŗ'b'rcedil;'u'rcedil;'u'⌉'b'rceil;'u'rceil;'b'rcub;'u'rcub;'u'Р'b'Rcy;'u'Rcy;'u'р'b'rcy;'u'rcy;'u'⤷'b'rdca;'u'rdca;'u'⥩'b'rdldhar;'u'rdldhar;'b'rdquo;'u'rdquo;'b'rdquor;'u'rdquor;'u'↳'b'rdsh;'u'rdsh;'u'ℜ'b'Re;'u'Re;'b'real;'u'real;'u'ℛ'b'realine;'u'realine;'b'realpart;'u'realpart;'u'ℝ'b'reals;'u'reals;'u'▭'b'rect;'u'rect;'b'REG'u'REG'b'REG;'u'REG;'b'reg;'u'reg;'b'ReverseElement;'u'ReverseElement;'b'ReverseEquilibrium;'u'ReverseEquilibrium;'b'ReverseUpEquilibrium;'u'ReverseUpEquilibrium;'u'⥽'b'rfisht;'u'rfisht;'u'⌋'b'rfloor;'u'rfloor;'b'Rfr;'u'Rfr;'b'rfr;'u'rfr;'u'⥤'b'rHar;'u'rHar;'b'rhard;'u'rhard;'u'⇀'b'rharu;'u'rharu;'u'⥬'b'rharul;'u'rharul;'u'Ρ'b'Rho;'u'Rho;'u'ρ'b'rho;'u'rho;'u'ϱ'b'rhov;'u'rhov;'b'RightAngleBracket;'u'RightAngleBracket;'b'RightArrow;'u'RightArrow;'b'Rightarrow;'u'Rightarrow;'b'rightarrow;'u'rightarrow;'b'RightArrowBar;'u'RightArrowBar;'u'⇄'b'RightArrowLeftArrow;'u'RightArrowLeftArrow;'b'rightarrowtail;'u'rightarrowtail;'b'RightCeiling;'u'RightCeiling;'u'⟧'b'RightDoubleBracket;'u'RightDoubleBracket;'u'⥝'b'RightDownTeeVector;'u'RightDownTeeVector;'b'RightDownVector;'u'RightDownVector;'u'⥕'b'RightDownVectorBar;'u'RightDownVectorBar;'b'RightFloor;'u'RightFloor;'b'rightharpoondown;'u'rightharpoondown;'b'rightharpoonup;'u'rightharpoonup;'b'rightleftarrows;'u'rightleftarrows;'b'rightleftharpoons;'u'rightleftharpoons;'u'⇉'b'rightrightarrows;'u'rightrightarrows;'b'rightsquigarrow;'u'rightsquigarrow;'u'⊢'b'RightTee;'u'RightTee;'b'RightTeeArrow;'u'RightTeeArrow;'u'⥛'b'RightTeeVector;'u'RightTeeVector;'u'⋌'b'rightthreetimes;'u'rightthreetimes;'u'⊳'b'RightTriangle;'u'RightTriangle;'u'⧐'b'RightTriangleBar;'u'RightTriangleBar;'u'⊵'b'RightTriangleEqual;'u'RightTriangleEqual;'u'⥏'b'RightUpDownVector;'u'RightUpDownVector;'u'⥜'b'RightUpTeeVector;'u'RightUpTeeVector;'u'↾'b'RightUpVector;'u'RightUpVector;'u'⥔'b'RightUpVectorBar;'u'RightUpVectorBar;'b'RightVector;'u'RightVector;'u'⥓'b'RightVectorBar;'u'RightVectorBar;'u'˚'b'ring;'u'ring;'b'risingdotseq;'u'risingdotseq;'b'rlarr;'u'rlarr;'b'rlhar;'u'rlhar;'u'‏'b'rlm;'u'rlm;'u'⎱'b'rmoust;'u'rmoust;'b'rmoustache;'u'rmoustache;'u'⫮'b'rnmid;'u'rnmid;'u'⟭'b'roang;'u'roang;'u'⇾'b'roarr;'u'roarr;'b'robrk;'u'robrk;'u'⦆'b'ropar;'u'ropar;'b'Ropf;'u'Ropf;'b'ropf;'u'ropf;'u'⨮'b'roplus;'u'roplus;'u'⨵'b'rotimes;'u'rotimes;'u'⥰'b'RoundImplies;'u'RoundImplies;'b'rpar;'u'rpar;'u'⦔'b'rpargt;'u'rpargt;'u'⨒'b'rppolint;'u'rppolint;'b'rrarr;'u'rrarr;'b'Rrightarrow;'u'Rrightarrow;'b'rsaquo;'u'rsaquo;'b'Rscr;'u'Rscr;'b'rscr;'u'rscr;'u'↱'b'Rsh;'u'Rsh;'b'rsh;'u'rsh;'b'rsqb;'u'rsqb;'b'rsquo;'u'rsquo;'b'rsquor;'u'rsquor;'b'rthree;'u'rthree;'u'⋊'b'rtimes;'u'rtimes;'u'▹'b'rtri;'u'rtri;'b'rtrie;'u'rtrie;'b'rtrif;'u'rtrif;'u'⧎'b'rtriltri;'u'rtriltri;'u'⧴'b'RuleDelayed;'u'RuleDelayed;'u'⥨'b'ruluhar;'u'ruluhar;'u'℞'b'rx;'u'rx;'u'Ś'b'Sacute;'u'Sacute;'u'ś'b'sacute;'u'sacute;'b'sbquo;'u'sbquo;'u'⪼'b'Sc;'u'Sc;'u'≻'b'sc;'u'sc;'u'⪸'b'scap;'u'scap;'b'Scaron;'u'Scaron;'b'scaron;'u'scaron;'u'≽'b'sccue;'u'sccue;'u'⪴'b'scE;'u'scE;'u'⪰'b'sce;'u'sce;'u'Ş'b'Scedil;'u'Scedil;'u'ş'b'scedil;'u'scedil;'u'Ŝ'b'Scirc;'u'Scirc;'u'ŝ'b'scirc;'u'scirc;'u'⪺'b'scnap;'u'scnap;'u'⪶'b'scnE;'u'scnE;'u'⋩'b'scnsim;'u'scnsim;'u'⨓'b'scpolint;'u'scpolint;'u'≿'b'scsim;'u'scsim;'u'С'b'Scy;'u'Scy;'u'с'b'scy;'u'scy;'u'⋅'b'sdot;'u'sdot;'b'sdotb;'u'sdotb;'u'⩦'b'sdote;'u'sdote;'b'searhk;'u'searhk;'u'⇘'b'seArr;'u'seArr;'b'searr;'u'searr;'b'searrow;'u'searrow;'b'§'u'§'b'sect;'u'sect;'b'semi;'u'semi;'u'⤩'b'seswar;'u'seswar;'b'setminus;'u'setminus;'b'setmn;'u'setmn;'u'✶'b'sext;'u'sext;'b'Sfr;'u'Sfr;'b'sfr;'u'sfr;'b'sfrown;'u'sfrown;'u'♯'b'sharp;'u'sharp;'u'Щ'b'SHCHcy;'u'SHCHcy;'u'щ'b'shchcy;'u'shchcy;'u'Ш'b'SHcy;'u'SHcy;'u'ш'b'shcy;'u'shcy;'b'ShortDownArrow;'u'ShortDownArrow;'b'ShortLeftArrow;'u'ShortLeftArrow;'b'shortmid;'u'shortmid;'b'shortparallel;'u'shortparallel;'b'ShortRightArrow;'u'ShortRightArrow;'u'↑'b'ShortUpArrow;'u'ShortUpArrow;'b'­'u'­'b'shy;'u'shy;'u'Σ'b'Sigma;'u'Sigma;'u'σ'b'sigma;'u'sigma;'u'ς'b'sigmaf;'u'sigmaf;'b'sigmav;'u'sigmav;'u'∼'b'sim;'u'sim;'u'⩪'b'simdot;'u'simdot;'u'≃'b'sime;'u'sime;'b'simeq;'u'simeq;'u'⪞'b'simg;'u'simg;'u'⪠'b'simgE;'u'simgE;'u'⪝'b'siml;'u'siml;'u'⪟'b'simlE;'u'simlE;'u'≆'b'simne;'u'simne;'u'⨤'b'simplus;'u'simplus;'u'⥲'b'simrarr;'u'simrarr;'b'slarr;'u'slarr;'b'SmallCircle;'u'SmallCircle;'b'smallsetminus;'u'smallsetminus;'u'⨳'b'smashp;'u'smashp;'u'⧤'b'smeparsl;'u'smeparsl;'b'smid;'u'smid;'u'⌣'b'smile;'u'smile;'u'⪪'b'smt;'u'smt;'u'⪬'b'smte;'u'smte;'u'⪬︀'b'smtes;'u'smtes;'u'Ь'b'SOFTcy;'u'SOFTcy;'u'ь'b'softcy;'u'softcy;'b'sol;'u'sol;'u'⧄'b'solb;'u'solb;'u'⌿'b'solbar;'u'solbar;'b'Sopf;'u'Sopf;'b'sopf;'u'sopf;'u'♠'b'spades;'u'spades;'b'spadesuit;'u'spadesuit;'b'spar;'u'spar;'u'⊓'b'sqcap;'u'sqcap;'u'⊓︀'b'sqcaps;'u'sqcaps;'u'⊔'b'sqcup;'u'sqcup;'u'⊔︀'b'sqcups;'u'sqcups;'b'Sqrt;'u'Sqrt;'u'⊏'b'sqsub;'u'sqsub;'u'⊑'b'sqsube;'u'sqsube;'b'sqsubset;'u'sqsubset;'b'sqsubseteq;'u'sqsubseteq;'u'⊐'b'sqsup;'u'sqsup;'u'⊒'b'sqsupe;'u'sqsupe;'b'sqsupset;'u'sqsupset;'b'sqsupseteq;'u'sqsupseteq;'u'□'b'squ;'u'squ;'b'Square;'u'Square;'b'square;'u'square;'b'SquareIntersection;'u'SquareIntersection;'b'SquareSubset;'u'SquareSubset;'b'SquareSubsetEqual;'u'SquareSubsetEqual;'b'SquareSuperset;'u'SquareSuperset;'b'SquareSupersetEqual;'u'SquareSupersetEqual;'b'SquareUnion;'u'SquareUnion;'b'squarf;'u'squarf;'b'squf;'u'squf;'b'srarr;'u'srarr;'b'Sscr;'u'Sscr;'b'sscr;'u'sscr;'b'ssetmn;'u'ssetmn;'b'ssmile;'u'ssmile;'u'⋆'b'sstarf;'u'sstarf;'b'Star;'u'Star;'u'☆'b'star;'u'star;'b'starf;'u'starf;'b'straightepsilon;'u'straightepsilon;'b'straightphi;'u'straightphi;'b'strns;'u'strns;'u'⋐'b'Sub;'u'Sub;'u'⊂'b'sub;'u'sub;'u'⪽'b'subdot;'u'subdot;'u'⫅'b'subE;'u'subE;'u'⊆'b'sube;'u'sube;'u'⫃'b'subedot;'u'subedot;'u'⫁'b'submult;'u'submult;'u'⫋'b'subnE;'u'subnE;'u'⊊'b'subne;'u'subne;'u'⪿'b'subplus;'u'subplus;'u'⥹'b'subrarr;'u'subrarr;'b'Subset;'u'Subset;'b'subset;'u'subset;'b'subseteq;'u'subseteq;'b'subseteqq;'u'subseteqq;'b'SubsetEqual;'u'SubsetEqual;'b'subsetneq;'u'subsetneq;'b'subsetneqq;'u'subsetneqq;'u'⫇'b'subsim;'u'subsim;'u'⫕'b'subsub;'u'subsub;'u'⫓'b'subsup;'u'subsup;'b'succ;'u'succ;'b'succapprox;'u'succapprox;'b'succcurlyeq;'u'succcurlyeq;'b'Succeeds;'u'Succeeds;'b'SucceedsEqual;'u'SucceedsEqual;'b'SucceedsSlantEqual;'u'SucceedsSlantEqual;'b'SucceedsTilde;'u'SucceedsTilde;'b'succeq;'u'succeq;'b'succnapprox;'u'succnapprox;'b'succneqq;'u'succneqq;'b'succnsim;'u'succnsim;'b'succsim;'u'succsim;'b'SuchThat;'u'SuchThat;'u'∑'b'Sum;'u'Sum;'b'sum;'u'sum;'u'♪'b'sung;'u'sung;'b'¹'u'¹'b'sup1;'u'sup1;'b'²'u'²'b'sup2;'u'sup2;'b'³'u'³'b'sup3;'u'sup3;'u'⋑'b'Sup;'u'Sup;'u'⊃'b'sup;'u'sup;'u'⪾'b'supdot;'u'supdot;'u'⫘'b'supdsub;'u'supdsub;'u'⫆'b'supE;'u'supE;'u'⊇'b'supe;'u'supe;'u'⫄'b'supedot;'u'supedot;'b'Superset;'u'Superset;'b'SupersetEqual;'u'SupersetEqual;'u'⟉'b'suphsol;'u'suphsol;'u'⫗'b'suphsub;'u'suphsub;'u'⥻'b'suplarr;'u'suplarr;'u'⫂'b'supmult;'u'supmult;'u'⫌'b'supnE;'u'supnE;'u'⊋'b'supne;'u'supne;'u'⫀'b'supplus;'u'supplus;'b'Supset;'u'Supset;'b'supset;'u'supset;'b'supseteq;'u'supseteq;'b'supseteqq;'u'supseteqq;'b'supsetneq;'u'supsetneq;'b'supsetneqq;'u'supsetneqq;'u'⫈'b'supsim;'u'supsim;'u'⫔'b'supsub;'u'supsub;'u'⫖'b'supsup;'u'supsup;'b'swarhk;'u'swarhk;'u'⇙'b'swArr;'u'swArr;'b'swarr;'u'swarr;'b'swarrow;'u'swarrow;'u'⤪'b'swnwar;'u'swnwar;'b'ß'u'ß'b'szlig;'u'szlig;'b'Tab;'u'Tab;'u'⌖'b'target;'u'target;'u'Τ'b'Tau;'u'Tau;'u'τ'b'tau;'u'tau;'b'tbrk;'u'tbrk;'u'Ť'b'Tcaron;'u'Tcaron;'u'ť'b'tcaron;'u'tcaron;'u'Ţ'b'Tcedil;'u'Tcedil;'u'ţ'b'tcedil;'u'tcedil;'u'Т'b'Tcy;'u'Tcy;'u'т'b'tcy;'u'tcy;'u'⃛'b'tdot;'u'tdot;'u'⌕'b'telrec;'u'telrec;'b'Tfr;'u'Tfr;'b'tfr;'u'tfr;'u'∴'b'there4;'u'there4;'b'Therefore;'u'Therefore;'b'therefore;'u'therefore;'u'Θ'b'Theta;'u'Theta;'u'θ'b'theta;'u'theta;'u'ϑ'b'thetasym;'u'thetasym;'b'thetav;'u'thetav;'b'thickapprox;'u'thickapprox;'b'thicksim;'u'thicksim;'u'  'b'ThickSpace;'u'ThickSpace;'u' 'b'thinsp;'u'thinsp;'b'ThinSpace;'u'ThinSpace;'b'thkap;'u'thkap;'b'thksim;'u'thksim;'b'Þ'u'Þ'b'þ'u'þ'b'THORN;'u'THORN;'b'thorn;'u'thorn;'b'Tilde;'u'Tilde;'b'tilde;'u'tilde;'b'TildeEqual;'u'TildeEqual;'b'TildeFullEqual;'u'TildeFullEqual;'b'TildeTilde;'u'TildeTilde;'b'×'u'×'b'times;'u'times;'b'timesb;'u'timesb;'u'⨱'b'timesbar;'u'timesbar;'u'⨰'b'timesd;'u'timesd;'b'tint;'u'tint;'b'toea;'u'toea;'b'top;'u'top;'u'⌶'b'topbot;'u'topbot;'u'⫱'b'topcir;'u'topcir;'b'Topf;'u'Topf;'b'topf;'u'topf;'u'⫚'b'topfork;'u'topfork;'b'tosa;'u'tosa;'u'‴'b'tprime;'u'tprime;'b'TRADE;'u'TRADE;'b'trade;'u'trade;'u'▵'b'triangle;'u'triangle;'b'triangledown;'u'triangledown;'b'triangleleft;'u'triangleleft;'b'trianglelefteq;'u'trianglelefteq;'u'≜'b'triangleq;'u'triangleq;'b'triangleright;'u'triangleright;'b'trianglerighteq;'u'trianglerighteq;'u'◬'b'tridot;'u'tridot;'b'trie;'u'trie;'u'⨺'b'triminus;'u'triminus;'b'TripleDot;'u'TripleDot;'u'⨹'b'triplus;'u'triplus;'u'⧍'b'trisb;'u'trisb;'u'⨻'b'tritime;'u'tritime;'u'⏢'b'trpezium;'u'trpezium;'b'Tscr;'u'Tscr;'b'tscr;'u'tscr;'u'Ц'b'TScy;'u'TScy;'u'ц'b'tscy;'u'tscy;'u'Ћ'b'TSHcy;'u'TSHcy;'u'ћ'b'tshcy;'u'tshcy;'u'Ŧ'b'Tstrok;'u'Tstrok;'u'ŧ'b'tstrok;'u'tstrok;'b'twixt;'u'twixt;'b'twoheadleftarrow;'u'twoheadleftarrow;'b'twoheadrightarrow;'u'twoheadrightarrow;'b'Ú'u'Ú'b'ú'u'ú'b'Uacute;'u'Uacute;'b'uacute;'u'uacute;'u'↟'b'Uarr;'u'Uarr;'b'uArr;'u'uArr;'b'uarr;'u'uarr;'u'⥉'b'Uarrocir;'u'Uarrocir;'u'Ў'b'Ubrcy;'u'Ubrcy;'u'ў'b'ubrcy;'u'ubrcy;'u'Ŭ'b'Ubreve;'u'Ubreve;'u'ŭ'b'ubreve;'u'ubreve;'b'Û'u'Û'b'û'u'û'b'Ucirc;'u'Ucirc;'b'ucirc;'u'ucirc;'u'У'b'Ucy;'u'Ucy;'u'у'b'ucy;'u'ucy;'u'⇅'b'udarr;'u'udarr;'u'Ű'b'Udblac;'u'Udblac;'u'ű'b'udblac;'u'udblac;'u'⥮'b'udhar;'u'udhar;'u'⥾'b'ufisht;'u'ufisht;'b'Ufr;'u'Ufr;'b'ufr;'u'ufr;'b'Ù'u'Ù'b'ù'u'ù'b'Ugrave;'u'Ugrave;'b'ugrave;'u'ugrave;'u'⥣'b'uHar;'u'uHar;'b'uharl;'u'uharl;'b'uharr;'u'uharr;'u'▀'b'uhblk;'u'uhblk;'u'⌜'b'ulcorn;'u'ulcorn;'b'ulcorner;'u'ulcorner;'u'⌏'b'ulcrop;'u'ulcrop;'u'◸'b'ultri;'u'ultri;'u'Ū'b'Umacr;'u'Umacr;'u'ū'b'umacr;'u'umacr;'b'uml;'u'uml;'b'UnderBar;'u'UnderBar;'u'⏟'b'UnderBrace;'u'UnderBrace;'b'UnderBracket;'u'UnderBracket;'u'⏝'b'UnderParenthesis;'u'UnderParenthesis;'b'Union;'u'Union;'u'⊎'b'UnionPlus;'u'UnionPlus;'u'Ų'b'Uogon;'u'Uogon;'u'ų'b'uogon;'u'uogon;'b'Uopf;'u'Uopf;'b'uopf;'u'uopf;'b'UpArrow;'u'UpArrow;'b'Uparrow;'u'Uparrow;'b'uparrow;'u'uparrow;'u'⤒'b'UpArrowBar;'u'UpArrowBar;'b'UpArrowDownArrow;'u'UpArrowDownArrow;'u'↕'b'UpDownArrow;'u'UpDownArrow;'b'Updownarrow;'u'Updownarrow;'b'updownarrow;'u'updownarrow;'b'UpEquilibrium;'u'UpEquilibrium;'b'upharpoonleft;'u'upharpoonleft;'b'upharpoonright;'u'upharpoonright;'b'uplus;'u'uplus;'b'UpperLeftArrow;'u'UpperLeftArrow;'b'UpperRightArrow;'u'UpperRightArrow;'u'ϒ'b'Upsi;'u'Upsi;'u'υ'b'upsi;'u'upsi;'b'upsih;'u'upsih;'u'Υ'b'Upsilon;'u'Upsilon;'b'upsilon;'u'upsilon;'b'UpTee;'u'UpTee;'b'UpTeeArrow;'u'UpTeeArrow;'u'⇈'b'upuparrows;'u'upuparrows;'u'⌝'b'urcorn;'u'urcorn;'b'urcorner;'u'urcorner;'u'⌎'b'urcrop;'u'urcrop;'u'Ů'b'Uring;'u'Uring;'u'ů'b'uring;'u'uring;'u'◹'b'urtri;'u'urtri;'b'Uscr;'u'Uscr;'b'uscr;'u'uscr;'u'⋰'b'utdot;'u'utdot;'u'Ũ'b'Utilde;'u'Utilde;'u'ũ'b'utilde;'u'utilde;'b'utri;'u'utri;'b'utrif;'u'utrif;'b'uuarr;'u'uuarr;'b'Ü'u'Ü'b'ü'u'ü'b'Uuml;'u'Uuml;'b'uuml;'u'uuml;'u'⦧'b'uwangle;'u'uwangle;'u'⦜'b'vangrt;'u'vangrt;'b'varepsilon;'u'varepsilon;'b'varkappa;'u'varkappa;'b'varnothing;'u'varnothing;'b'varphi;'u'varphi;'b'varpi;'u'varpi;'b'varpropto;'u'varpropto;'b'vArr;'u'vArr;'b'varr;'u'varr;'b'varrho;'u'varrho;'b'varsigma;'u'varsigma;'u'⊊︀'b'varsubsetneq;'u'varsubsetneq;'u'⫋︀'b'varsubsetneqq;'u'varsubsetneqq;'u'⊋︀'b'varsupsetneq;'u'varsupsetneq;'u'⫌︀'b'varsupsetneqq;'u'varsupsetneqq;'b'vartheta;'u'vartheta;'b'vartriangleleft;'u'vartriangleleft;'b'vartriangleright;'u'vartriangleright;'u'⫫'b'Vbar;'u'Vbar;'u'⫨'b'vBar;'u'vBar;'u'⫩'b'vBarv;'u'vBarv;'u'В'b'Vcy;'u'Vcy;'u'в'b'vcy;'u'vcy;'u'⊫'b'VDash;'u'VDash;'u'⊩'b'Vdash;'u'Vdash;'b'vDash;'u'vDash;'b'vdash;'u'vdash;'u'⫦'b'Vdashl;'u'Vdashl;'b'Vee;'u'Vee;'b'vee;'u'vee;'u'⊻'b'veebar;'u'veebar;'u'≚'b'veeeq;'u'veeeq;'u'⋮'b'vellip;'u'vellip;'u'‖'b'Verbar;'u'Verbar;'b'verbar;'u'verbar;'b'Vert;'u'Vert;'b'vert;'u'vert;'b'VerticalBar;'u'VerticalBar;'b'VerticalLine;'u'VerticalLine;'u'❘'b'VerticalSeparator;'u'VerticalSeparator;'u'≀'b'VerticalTilde;'u'VerticalTilde;'b'VeryThinSpace;'u'VeryThinSpace;'b'Vfr;'u'Vfr;'b'vfr;'u'vfr;'b'vltri;'u'vltri;'b'vnsub;'u'vnsub;'b'vnsup;'u'vnsup;'b'Vopf;'u'Vopf;'b'vopf;'u'vopf;'b'vprop;'u'vprop;'b'vrtri;'u'vrtri;'b'Vscr;'u'Vscr;'b'vscr;'u'vscr;'b'vsubnE;'u'vsubnE;'b'vsubne;'u'vsubne;'b'vsupnE;'u'vsupnE;'b'vsupne;'u'vsupne;'u'⊪'b'Vvdash;'u'Vvdash;'u'⦚'b'vzigzag;'u'vzigzag;'u'Ŵ'b'Wcirc;'u'Wcirc;'u'ŵ'b'wcirc;'u'wcirc;'u'⩟'b'wedbar;'u'wedbar;'b'Wedge;'u'Wedge;'b'wedge;'u'wedge;'u'≙'b'wedgeq;'u'wedgeq;'u'℘'b'weierp;'u'weierp;'b'Wfr;'u'Wfr;'b'wfr;'u'wfr;'b'Wopf;'u'Wopf;'b'wopf;'u'wopf;'b'wp;'u'wp;'b'wr;'u'wr;'b'wreath;'u'wreath;'b'Wscr;'u'Wscr;'b'wscr;'u'wscr;'b'xcap;'u'xcap;'b'xcirc;'u'xcirc;'b'xcup;'u'xcup;'b'xdtri;'u'xdtri;'b'Xfr;'u'Xfr;'b'xfr;'u'xfr;'b'xhArr;'u'xhArr;'b'xharr;'u'xharr;'u'Ξ'b'Xi;'u'Xi;'u'ξ'b'xi;'u'xi;'b'xlArr;'u'xlArr;'b'xlarr;'u'xlarr;'b'xmap;'u'xmap;'u'⋻'b'xnis;'u'xnis;'b'xodot;'u'xodot;'b'Xopf;'u'Xopf;'b'xopf;'u'xopf;'b'xoplus;'u'xoplus;'b'xotime;'u'xotime;'b'xrArr;'u'xrArr;'b'xrarr;'u'xrarr;'b'Xscr;'u'Xscr;'b'xscr;'u'xscr;'b'xsqcup;'u'xsqcup;'b'xuplus;'u'xuplus;'b'xutri;'u'xutri;'b'xvee;'u'xvee;'b'xwedge;'u'xwedge;'b'Ý'u'Ý'b'ý'u'ý'b'Yacute;'u'Yacute;'b'yacute;'u'yacute;'u'Я'b'YAcy;'u'YAcy;'u'я'b'yacy;'u'yacy;'u'Ŷ'b'Ycirc;'u'Ycirc;'u'ŷ'b'ycirc;'u'ycirc;'u'Ы'b'Ycy;'u'Ycy;'u'ы'b'ycy;'u'ycy;'b'¥'u'¥'b'yen;'u'yen;'b'Yfr;'u'Yfr;'b'yfr;'u'yfr;'u'Ї'b'YIcy;'u'YIcy;'u'ї'b'yicy;'u'yicy;'b'Yopf;'u'Yopf;'b'yopf;'u'yopf;'b'Yscr;'u'Yscr;'b'yscr;'u'yscr;'u'Ю'b'YUcy;'u'YUcy;'u'ю'b'yucy;'u'yucy;'u'ÿ'b'Yuml;'u'Yuml;'b'yuml;'u'yuml;'u'Ź'b'Zacute;'u'Zacute;'u'ź'b'zacute;'u'zacute;'b'Zcaron;'u'Zcaron;'b'zcaron;'u'zcaron;'u'З'b'Zcy;'u'Zcy;'u'з'b'zcy;'u'zcy;'u'Ż'b'Zdot;'u'Zdot;'u'ż'b'zdot;'u'zdot;'u'ℨ'b'zeetrf;'u'zeetrf;'b'ZeroWidthSpace;'u'ZeroWidthSpace;'u'Ζ'b'Zeta;'u'Zeta;'u'ζ'b'zeta;'u'zeta;'b'Zfr;'u'Zfr;'b'zfr;'u'zfr;'u'Ж'b'ZHcy;'u'ZHcy;'u'ж'b'zhcy;'u'zhcy;'u'⇝'b'zigrarr;'u'zigrarr;'b'Zopf;'u'Zopf;'b'zopf;'u'zopf;'b'Zscr;'u'Zscr;'b'zscr;'u'zscr;'u'‍'b'zwj;'u'zwj;'u'‌'b'zwnj;'u'zwnj;'u'entities'MappingProxyTypeDynamicClassAttributeEnumMetaFlagIntFlagunique_is_descriptor + Returns True if obj is a descriptor, False otherwise. + _is_dunder + Returns True if a __dunder__ name, False otherwise. + _is_sunder + Returns True if a _sunder_ name, False otherwise. + _make_class_unpicklable + Make the given class un-picklable. + _break_on_call_reduce%r cannot be pickled_auto_null + Instances are replaced with an appropriate value in Enum class suites. + _EnumDict + Track enum member order and ensure member names are not reused. + + EnumMeta will use the names found in self._member_names as the + enumeration member names. + _member_names_last_values_ignore_auto_called + Changes anything not dundered or not a descriptor. + + If an enum member name is used twice, an error is raised; duplicate + values are not checked for. + + Single underscore (sunder) names are reserved. + _order__create_pseudo_member__generate_next_value__missing__ignore__names_ are reserved for future Enum use_generate_next_value_ must be defined before members_generate_next_valuealready_ignore_ cannot specify already set names: %r__order__Attempted to reuse key: %r%r already defined as: %r + Metaclass for Enum + metacls_check_for_existing_membersenum_dict_get_mixins_member_typefirst_enumclassdict_find_new_save_newuse_argsenum_membersinvalid_namesInvalid enum member name: {0}An enumeration.enum_class_member_names__member_map__member_type_dynamic_attributes_value2member_map___getnewargs_ex__member_nameenum_member_name_canonical_memberclass_methodobj_methodenum_method__new_member__member order does not match _order_ + classes/types should always be True. + qualname + Either returns an existing member, or creates a new enum class. + + This method is used both when an enum class is given a value to match + to an enumeration member (i.e. Color(3)) and for the functional API + (i.e. Color = Enum('Color', names='RED GREEN BLUE')). + + When used for the functional API: + + `value` will be the name of the new class. + + `names` should be either a string of white-space/comma delimited names + (values will start at `start`), or an iterator/mapping of name, value pairs. + + `module` should be set to the module this class is being created in; + if it is not set, an attempt to find that module will be made, but if + it fails the class will not be picklable. + + `qualname` should be set to the actual location this class can be found + at in its module; by default it is set to the global scope. If this is + not correct, unpickling will fail in some circumstances. + + `type`, if set, will be mixed in as the first base class. + _create_unsupported operand type(s) for 'in': '%s' and '%s'%s: cannot delete Enum member. + Return the enum member matching `name` + + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + + Returns members in definition order. + + Returns a mapping of member name->value. + + This mapping lists all enum members, including aliases. Note that this + is a read-only view of the internal mapping. + + Returns members in reverse definition order. + + Block attempts to reassign Enum members. + + A simple assignment to the class namespace only changes one of the + several possible ways to get an Enum member from the Enum class, + resulting in an inconsistent Enumeration. + member_mapCannot reassign members. + Convenience method to create a new Enum class. + + `names` can be: + + * A string containing member names, separated either with spaces or + commas. Values are incremented by 1 from `start`. + * An iterable of member names. Values are incremented by 1 from `start`. + * An iterable of (member name, value) pairs. + * A mapping of member name -> value pairs. + original_nameslast_valuesmember_value_convert_ + Create a new Enum subclass that replaces a collection of global constants + _reduce_ex_by_name_convert is deprecated and will be removed in 3.9, use _convert_ instead."_convert is deprecated and will be removed in 3.9, use ""_convert_ instead."%s: cannot extend enumeration %r + Returns the type for creating enum members, and the first inherited + enum class. + + bases: the tuple of bases that was given to __new__ + _find_data_typedata_typescandidate%r: too many data types: %rnew enumerations should be created as `EnumName([mixin_type, ...] [data_type,] enum_type)`"new enumerations should be created as ""`EnumName([mixin_type, ...] [data_type,] enum_type)`"Cannot extend enumerations + Returns the __new__ to be used for creating the enum members. + + classdict: the class dictionary given to __new__ + member_type: the data type whose __new__ will be used by default + first_enum: enumeration to check for an overriding __new__ + possible + Generic enumeration. + + Derive from this class to define new enumerations. + %r is not a valid %sve_excerror in %s._missing_: returned %r instead of None or a valid member + Generate the next value when not given. + + name: the name of the member + start: the initial start value or None + count: the number of existing members + last_value: the last value assigned or None + <%s.%s: %r> + Returns all members and all public methods + added_behavior + Returns format using actual value type unless __str__ has been overridden. + str_overriddenThe name of the Enum member.The value of the Enum member.Enum where members are also (and must be) ints + Support for flags + _high_bithigh_bitInvalid Flag value: %r + Returns member (possibly creating it) if one can be found for value. + possible_member + Create a composite member iff value contains only members. + pseudo_member_decomposeextra_flags + Returns True if self has at least the same flags set as other. + uncovered%s.%rinverted + Support for integer-based Flags + new_memberneed_to_createbitflag_value + returns index of highest bit, or -1 if value is zero or negative + enumeration + Class decorator for enumerations ensuring unique member values. + duplicates%s -> %salias_detailsduplicate values found in %r: %s + Extract all members from the value. + not_coverednegativeflags_to_check_power_of_two# check if members already defined as auto()# descriptor overwriting an enum?# enum overwriting a descriptor?# Dummy value for Enum as EnumMeta explicitly checks for it, but of course# until EnumMeta finishes running the first time the Enum class doesn't exist.# This is also why there are checks in EnumMeta like `if Enum is not None`# check that previous enum members do not exist# create the namespace dict# inherit previous flags and _generate_next_value_ function# an Enum class is final once enumeration items have been defined; it# cannot be mixed with other types (int, float, etc.) if it has an# inherited __new__ unless a new __new__ is defined (or the resulting# class will fail).# remove any keys listed in _ignore_# save enum items into separate mapping so they don't get baked into# the new class# adjust the sunders# check for illegal enum names (any others?)# create a default docstring if one has not been provided# create our new Enum type# names in definition order# name->value map# save DynamicClassAttribute attributes from super classes so we know# if we can take the shortcut of storing members in the class dict# Reverse value->name map for hashable values.# If a custom type is mixed into the Enum, and it does not know how# to pickle itself, pickle.dumps will succeed but pickle.loads will# fail. Rather than have the error show up later and possibly far# from the source, sabotage the pickle protocol for this class so# that pickle.dumps also fails.# However, if the new class implements its own __reduce_ex__, do not# sabotage -- it's on them to make sure it works correctly. We use# __reduce_ex__ instead of any of the others as it is preferred by# pickle over __reduce__, and it handles all pickle protocols.# instantiate them, checking for duplicates as we go# we instantiate first instead of checking for duplicates first in case# a custom __new__ is doing something funky with the values -- such as# auto-numbering ;)# special case for tuple enums# wrap it one more time# If another member with the same value was already defined, the# new member becomes an alias to the existing one.# Aliases don't appear in member names (only in __members__).# performance boost for any member that would not shadow# a DynamicClassAttribute# now add to _member_map_# This may fail if value is not hashable. We can't add the value# to the map, and by-value lookups for this value will be# linear.# double check that repr and friends are not the mixin's or various# things break (such as pickle)# however, if the method is defined in the Enum itself, don't replace# it# replace any other __new__ with our own (as long as Enum is not None,# anyway) -- again, this is to support pickle# if the user defined their own __new__, save it before it gets# clobbered in case they subclass later# py3 support for definition order (helps keep py2/py3 code in sync)# simple value lookup# otherwise, functional API: we're creating a new Enum type# nicer error message when someone tries to delete an attribute# (see issue19025).# special processing needed for names?# Here, names is either an iterable of (name, value) or a mapping.# TODO: replace the frame hack if a blessed way to know the calling# module is ever developed# convert all constants from source (or module) that pass filter() to# a new Enum called name, and export the enum and its members back to# module;# also, replace the __reduce_ex__ method so unpickling works in# previous Python versions# _value2member_map_ is populated in the same order every time# for a consistent reverse mapping of number to name when there# are multiple names for the same number.# sort by value# unless some values aren't comparable, in which case sort by name# ensure final parent class is an Enum derivative, find any concrete# data type, and check that Enum has no members# now find the correct __new__, checking to see of one was defined# by the user; also check earlier enum classes in case a __new__ was# saved as __new_member__# should __new__ be saved as __new_member__ later?# check all possibles for __new_member__ before falling back to# __new__# if a non-object.__new__ is used then whatever value/tuple was# assigned to the enum member name will be passed to __new__ and to the# new enum member's __init__# all enum instances are actually created during class construction# without calling this method; this method is called by the metaclass'# __call__ (i.e. Color(3) ), and by pickle# For lookups like Color(Color.RED)# by-value search for a matching enum member# see if it's in the reverse mapping (for hashable values)# Not found, no need to do long O(n) search# not there, now do long search -- O(n) behavior# still not found -- try _missing_ hook# ensure all variables that could hold an exception are destroyed# mixed-in Enums should use the mixed-in type's __format__, otherwise# we can get strange results with the Enum name showing up instead of# the value# pure Enum branch, or branch with __str__ explicitly overridden# mix-in branch# DynamicClassAttribute is used to provide access to the `name` and# `value` properties of enum members while keeping some measure of# protection from modification, while still allowing for an enumeration# to have members named `name` and `value`. This works because enumeration# members are not set directly on the enum class -- __getattr__ is# used to look them up.# verify all bits are accounted for# construct a singleton enum pseudo-member# use setdefault in case another thread already created a composite# with this value# get unaccounted for bits# timer = 10# timer -= 1# construct singleton pseudo-members# _decompose is only called if the value is not named# issue29167: wrap accesses to _value2member_map_ in a list to avoid race# conditions between iterating over it and having more pseudo-# members added to it# only check for named flags# check for named flags and powers-of-two flags# we have the breakdown, don't need the value member itselfb'EnumMeta'u'EnumMeta'b'Enum'u'Enum'b'IntEnum'u'IntEnum'b'Flag'u'Flag'b'IntFlag'u'IntFlag'b'unique'u'unique'b' + Returns True if obj is a descriptor, False otherwise. + 'u' + Returns True if obj is a descriptor, False otherwise. + 'b'__get__'u'__get__'b'__set__'u'__set__'b'__delete__'u'__delete__'b' + Returns True if a __dunder__ name, False otherwise. + 'u' + Returns True if a __dunder__ name, False otherwise. + 'b' + Returns True if a _sunder_ name, False otherwise. + 'u' + Returns True if a _sunder_ name, False otherwise. + 'b' + Make the given class un-picklable. + 'u' + Make the given class un-picklable. + 'b'%r cannot be pickled'u'%r cannot be pickled'b' + Instances are replaced with an appropriate value in Enum class suites. + 'u' + Instances are replaced with an appropriate value in Enum class suites. + 'b' + Track enum member order and ensure member names are not reused. + + EnumMeta will use the names found in self._member_names as the + enumeration member names. + 'u' + Track enum member order and ensure member names are not reused. + + EnumMeta will use the names found in self._member_names as the + enumeration member names. + 'b' + Changes anything not dundered or not a descriptor. + + If an enum member name is used twice, an error is raised; duplicate + values are not checked for. + + Single underscore (sunder) names are reserved. + 'u' + Changes anything not dundered or not a descriptor. + + If an enum member name is used twice, an error is raised; duplicate + values are not checked for. + + Single underscore (sunder) names are reserved. + 'b'_order_'u'_order_'b'_create_pseudo_member_'u'_create_pseudo_member_'b'_generate_next_value_'u'_generate_next_value_'b'_missing_'u'_missing_'b'_ignore_'u'_ignore_'b'_names_ are reserved for future Enum use'u'_names_ are reserved for future Enum use'b'_generate_next_value_ must be defined before members'u'_generate_next_value_ must be defined before members'b'_generate_next_value'u'_generate_next_value'b'_ignore_ cannot specify already set names: %r'u'_ignore_ cannot specify already set names: %r'b'__order__'u'__order__'b'Attempted to reuse key: %r'u'Attempted to reuse key: %r'b'%r already defined as: %r'u'%r already defined as: %r'b' + Metaclass for Enum + 'u' + Metaclass for Enum + 'b'mro'u'mro'b'Invalid enum member name: {0}'u'Invalid enum member name: {0}'b'An enumeration.'u'An enumeration.'b'__getnewargs_ex__'u'__getnewargs_ex__'b'_value_'u'_value_'b'__str__'u'__str__'b'__format__'u'__format__'b'member order does not match _order_'u'member order does not match _order_'b' + classes/types should always be True. + 'u' + classes/types should always be True. + 'b' + Either returns an existing member, or creates a new enum class. + + This method is used both when an enum class is given a value to match + to an enumeration member (i.e. Color(3)) and for the functional API + (i.e. Color = Enum('Color', names='RED GREEN BLUE')). + + When used for the functional API: + + `value` will be the name of the new class. + + `names` should be either a string of white-space/comma delimited names + (values will start at `start`), or an iterator/mapping of name, value pairs. + + `module` should be set to the module this class is being created in; + if it is not set, an attempt to find that module will be made, but if + it fails the class will not be picklable. + + `qualname` should be set to the actual location this class can be found + at in its module; by default it is set to the global scope. If this is + not correct, unpickling will fail in some circumstances. + + `type`, if set, will be mixed in as the first base class. + 'u' + Either returns an existing member, or creates a new enum class. + + This method is used both when an enum class is given a value to match + to an enumeration member (i.e. Color(3)) and for the functional API + (i.e. Color = Enum('Color', names='RED GREEN BLUE')). + + When used for the functional API: + + `value` will be the name of the new class. + + `names` should be either a string of white-space/comma delimited names + (values will start at `start`), or an iterator/mapping of name, value pairs. + + `module` should be set to the module this class is being created in; + if it is not set, an attempt to find that module will be made, but if + it fails the class will not be picklable. + + `qualname` should be set to the actual location this class can be found + at in its module; by default it is set to the global scope. If this is + not correct, unpickling will fail in some circumstances. + + `type`, if set, will be mixed in as the first base class. + 'b'unsupported operand type(s) for 'in': '%s' and '%s''u'unsupported operand type(s) for 'in': '%s' and '%s''b'%s: cannot delete Enum member.'u'%s: cannot delete Enum member.'b'__members__'u'__members__'b' + Return the enum member matching `name` + + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + 'u' + Return the enum member matching `name` + + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + 'b' + Returns members in definition order. + 'u' + Returns members in definition order. + 'b' + Returns a mapping of member name->value. + + This mapping lists all enum members, including aliases. Note that this + is a read-only view of the internal mapping. + 'u' + Returns a mapping of member name->value. + + This mapping lists all enum members, including aliases. Note that this + is a read-only view of the internal mapping. + 'b''u''b' + Returns members in reverse definition order. + 'u' + Returns members in reverse definition order. + 'b' + Block attempts to reassign Enum members. + + A simple assignment to the class namespace only changes one of the + several possible ways to get an Enum member from the Enum class, + resulting in an inconsistent Enumeration. + 'u' + Block attempts to reassign Enum members. + + A simple assignment to the class namespace only changes one of the + several possible ways to get an Enum member from the Enum class, + resulting in an inconsistent Enumeration. + 'b'_member_map_'u'_member_map_'b'Cannot reassign members.'u'Cannot reassign members.'b' + Convenience method to create a new Enum class. + + `names` can be: + + * A string containing member names, separated either with spaces or + commas. Values are incremented by 1 from `start`. + * An iterable of member names. Values are incremented by 1 from `start`. + * An iterable of (member name, value) pairs. + * A mapping of member name -> value pairs. + 'u' + Convenience method to create a new Enum class. + + `names` can be: + + * A string containing member names, separated either with spaces or + commas. Values are incremented by 1 from `start`. + * An iterable of member names. Values are incremented by 1 from `start`. + * An iterable of (member name, value) pairs. + * A mapping of member name -> value pairs. + 'b' + Create a new Enum subclass that replaces a collection of global constants + 'u' + Create a new Enum subclass that replaces a collection of global constants + 'b'_convert is deprecated and will be removed in 3.9, use _convert_ instead.'u'_convert is deprecated and will be removed in 3.9, use _convert_ instead.'b'%s: cannot extend enumeration %r'u'%s: cannot extend enumeration %r'b' + Returns the type for creating enum members, and the first inherited + enum class. + + bases: the tuple of bases that was given to __new__ + 'u' + Returns the type for creating enum members, and the first inherited + enum class. + + bases: the tuple of bases that was given to __new__ + 'b'%r: too many data types: %r'u'%r: too many data types: %r'b'new enumerations should be created as `EnumName([mixin_type, ...] [data_type,] enum_type)`'u'new enumerations should be created as `EnumName([mixin_type, ...] [data_type,] enum_type)`'b'Cannot extend enumerations'u'Cannot extend enumerations'b' + Returns the __new__ to be used for creating the enum members. + + classdict: the class dictionary given to __new__ + member_type: the data type whose __new__ will be used by default + first_enum: enumeration to check for an overriding __new__ + 'u' + Returns the __new__ to be used for creating the enum members. + + classdict: the class dictionary given to __new__ + member_type: the data type whose __new__ will be used by default + first_enum: enumeration to check for an overriding __new__ + 'b'__new_member__'u'__new_member__'b' + Generic enumeration. + + Derive from this class to define new enumerations. + 'u' + Generic enumeration. + + Derive from this class to define new enumerations. + 'b'%r is not a valid %s'u'%r is not a valid %s'b'error in %s._missing_: returned %r instead of None or a valid member'u'error in %s._missing_: returned %r instead of None or a valid member'b' + Generate the next value when not given. + + name: the name of the member + start: the initial start value or None + count: the number of existing members + last_value: the last value assigned or None + 'u' + Generate the next value when not given. + + name: the name of the member + start: the initial start value or None + count: the number of existing members + last_value: the last value assigned or None + 'b'<%s.%s: %r>'u'<%s.%s: %r>'b' + Returns all members and all public methods + 'u' + Returns all members and all public methods + 'b' + Returns format using actual value type unless __str__ has been overridden. + 'u' + Returns format using actual value type unless __str__ has been overridden. + 'b'The name of the Enum member.'u'The name of the Enum member.'b'The value of the Enum member.'u'The value of the Enum member.'b'Enum where members are also (and must be) ints'u'Enum where members are also (and must be) ints'b' + Support for flags + 'u' + Support for flags + 'b'Invalid Flag value: %r'u'Invalid Flag value: %r'b' + Returns member (possibly creating it) if one can be found for value. + 'u' + Returns member (possibly creating it) if one can be found for value. + 'b' + Create a composite member iff value contains only members. + 'u' + Create a composite member iff value contains only members. + 'b' + Returns True if self has at least the same flags set as other. + 'u' + Returns True if self has at least the same flags set as other. + 'b'%s.%r'u'%s.%r'b' + Support for integer-based Flags + 'u' + Support for integer-based Flags + 'b' + returns index of highest bit, or -1 if value is zero or negative + 'u' + returns index of highest bit, or -1 if value is zero or negative + 'b' + Class decorator for enumerations ensuring unique member values. + 'u' + Class decorator for enumerations ensuring unique member values. + 'b'%s -> %s'u'%s -> %s'b'duplicate values found in %r: %s'u'duplicate values found in %r: %s'b' + Extract all members from the value. + 'u' + Extract all members from the value. + 'u'enum'E2BIGEACCESEADDRINUSEEAGAINEALREADYEAUTH86EBADARCHEBADEXEC88EBADMACHO94EBADMSGEBADRPCEBUSY89ECANCELEDECHILD6154EDEADLKEDESTADDRREQ83EDEVERREDOM69EDQUOTEFAULTEFBIG79EFTYPEEHOSTDOWNEIDRM92EILSEQEINPROGRESSEINTREINVALEIOEISCONNEISDIRELOOPEMFILEEMLINKEMSGSIZE95EMULTIHOPENAMETOOLONG81ENEEDAUTHENETDOWNENETRESETENFILEENOATTR55ENOBUFS96ENODATAENODEVENOENTENOEXECENOLCK97ENOLINKENOMEM91ENOMSGENOPOLICYENOPROTOOPTENOSPC98ENOSRENOSTRENOSYSENOTBLK57ENOTCONNENOTDIR66ENOTEMPTYENOTRECOVERABLEENOTSOCKENOTSUPENOTTYENXIOEOPNOTSUPP84EOVERFLOW105EOWNERDEADEPERMEPFNOSUPPORT67EPROCLIMEPROCUNAVAIL75EPROGMISMATCH74EPROGUNAVAILEPROTOEPROTONOSUPPORTEPROTOTYPE82EPWROFFERANGEEREMOTEEROFS73ERPCMISMATCH87ESHLIBVERSESHUTDOWNESOCKTNOSUPPORTESPIPEESRCHESTALEETIMEETOOMANYREFSETXTBSYEUSERSEWOULDBLOCKEXDEVu'This module makes available standard errno system symbols. + +The value of each symbol is the corresponding integer value, +e.g., on most systems, errno.ENOENT equals the integer 2. + +The dictionary errno.errorcode maps numeric codes to symbol names, +e.g., errno.errorcode[2] could be the string 'ENOENT'. + +Symbols that are not relevant to the underlying system are not defined. + +To map error codes to error messages, use the function os.strerror(), +e.g. os.strerror(2) could return 'No such file or directory'.'errorcodeException classes raised by urllib. + +The base exception class is URLError, which inherits from OSError. It +doesn't define any behavior of its own, but is the base class for all +exceptions defined in this package. + +HTTPError is an exception class that is also a valid HTTP response +instance. It behaves this way because HTTP protocol errors are valid +responses, with a status code, headers, and a body. In some contexts, +an application may want to handle an exception like a regular +response. +urllib.responseaddinfourlRaised when HTTP error occurs, but also acts like non-error return__super_inithdrsHTTP Error %s: %sException raised when downloaded size does not match content-length.# URLError is a sub-type of OSError, but it doesn't share any of# the implementation. need to override __init__ and __str__.# It sets self.args for compatibility with other OSError# subclasses, but args doesn't have the typical format with errno in# slot 0 and strerror in slot 1. This may be better than nothing.# The addinfourl classes depend on fp being a valid file# object. In some cases, the HTTPError may not have a valid# file object. If this happens, the simplest workaround is to# not initialize the base classes.# since URLError specifies a .reason attribute, HTTPError should also# provide this attribute. See issue13211 for discussion.b'Exception classes raised by urllib. + +The base exception class is URLError, which inherits from OSError. It +doesn't define any behavior of its own, but is the base class for all +exceptions defined in this package. + +HTTPError is an exception class that is also a valid HTTP response +instance. It behaves this way because HTTP protocol errors are valid +responses, with a status code, headers, and a body. In some contexts, +an application may want to handle an exception like a regular +response. +'u'Exception classes raised by urllib. + +The base exception class is URLError, which inherits from OSError. It +doesn't define any behavior of its own, but is the base class for all +exceptions defined in this package. + +HTTPError is an exception class that is also a valid HTTP response +instance. It behaves this way because HTTP protocol errors are valid +responses, with a status code, headers, and a body. In some contexts, +an application may want to handle an exception like a regular +response. +'b''u''b'Raised when HTTP error occurs, but also acts like non-error return'u'Raised when HTTP error occurs, but also acts like non-error return'b'HTTP Error %s: %s'u'HTTP Error %s: %s'b''u''b'Exception raised when downloaded size does not match content-length.'u'Exception raised when downloaded size does not match content-length.'email package exception classes.MessageErrorBase class for errors in the email package.MessageParseErrorBase class for message parsing errors.HeaderParseErrorError while parsing headers.BoundaryErrorCouldn't find terminating boundary.MultipartConversionErrorConversion to a multipart is prohibited.An illegal charset was given.MessageDefectBase class for a message defect.NoBoundaryInMultipartDefectA message claimed to be a multipart but had no boundary parameter.StartBoundaryNotFoundDefectThe claimed start boundary was never found.CloseBoundaryNotFoundDefectA start boundary was found, but not the corresponding close boundary.FirstHeaderLineIsContinuationDefectA message had a continuation line as its first header line.MisplacedEnvelopeHeaderDefectA 'Unix-from' header was found in the middle of a header block.MissingHeaderBodySeparatorDefectFound line with no leading whitespace and no colon before blank line.MalformedHeaderDefectMultipartInvariantViolationDefectA message claimed to be a multipart but no subparts were found.InvalidMultipartContentTransferEncodingDefectAn invalid content transfer encoding was set on the multipart itself.Header contained bytes that could not be decodedbase64 encoded sequence had an incorrect lengthbase64 encoded sequence had characters not in base64 alphabetbase64 encoded sequence had invalid length (1 mod 4)HeaderDefectBase class for a header defect.InvalidHeaderDefectHeader is not valid, message gives details.HeaderMissingRequiredValueA header that must have a value had noneNonPrintableDefectASCII characters outside the ascii-printable range foundnon_printablesthe following ASCII non-printables found in header: {}"the following ASCII non-printables found in header: ""{}"ObsoleteHeaderDefectHeader uses syntax declared obsolete by RFC 5322NonASCIILocalPartDefectlocal_part contains non-ASCII characters# These are parsing defects which the parser was able to work around.# XXX: backward compatibility, just in case (it was never emitted).# These errors are specific to header parsing.# This defect only occurs during unicode parsing, not when# parsing messages decoded from binary.b'email package exception classes.'u'email package exception classes.'b'Base class for errors in the email package.'u'Base class for errors in the email package.'b'Base class for message parsing errors.'u'Base class for message parsing errors.'b'Error while parsing headers.'u'Error while parsing headers.'b'Couldn't find terminating boundary.'u'Couldn't find terminating boundary.'b'Conversion to a multipart is prohibited.'u'Conversion to a multipart is prohibited.'b'An illegal charset was given.'u'An illegal charset was given.'b'Base class for a message defect.'u'Base class for a message defect.'b'A message claimed to be a multipart but had no boundary parameter.'u'A message claimed to be a multipart but had no boundary parameter.'b'The claimed start boundary was never found.'u'The claimed start boundary was never found.'b'A start boundary was found, but not the corresponding close boundary.'u'A start boundary was found, but not the corresponding close boundary.'b'A message had a continuation line as its first header line.'u'A message had a continuation line as its first header line.'b'A 'Unix-from' header was found in the middle of a header block.'u'A 'Unix-from' header was found in the middle of a header block.'b'Found line with no leading whitespace and no colon before blank line.'u'Found line with no leading whitespace and no colon before blank line.'b'A message claimed to be a multipart but no subparts were found.'u'A message claimed to be a multipart but no subparts were found.'b'An invalid content transfer encoding was set on the multipart itself.'u'An invalid content transfer encoding was set on the multipart itself.'b'Header contained bytes that could not be decoded'u'Header contained bytes that could not be decoded'b'base64 encoded sequence had an incorrect length'u'base64 encoded sequence had an incorrect length'b'base64 encoded sequence had characters not in base64 alphabet'u'base64 encoded sequence had characters not in base64 alphabet'b'base64 encoded sequence had invalid length (1 mod 4)'u'base64 encoded sequence had invalid length (1 mod 4)'b'Base class for a header defect.'u'Base class for a header defect.'b'Header is not valid, message gives details.'u'Header is not valid, message gives details.'b'A header that must have a value had none'u'A header that must have a value had none'b'ASCII characters outside the ascii-printable range found'u'ASCII characters outside the ascii-printable range found'b'the following ASCII non-printables found in header: {}'u'the following ASCII non-printables found in header: {}'b'Header uses syntax declared obsolete by RFC 5322'u'Header uses syntax declared obsolete by RFC 5322'b'local_part contains non-ASCII characters'u'local_part contains non-ASCII characters'u'email.errors'distutils.errors + +Provides exceptions used by the Distutils modules. Note that Distutils +modules may raise standard exceptions; in particular, SystemExit is +usually raised for errors that are obviously the end-user's fault +(eg. bad command-line arguments). + +This module is safe to use in "from ... import *" mode; it only exports +symbols whose names start with "Distutils" and end with "Error".DistutilsErrorThe root of all Distutils evil.Unable to load an expected module, or to find an expected class + within some module (in particular, command modules and classes).DistutilsClassErrorSome command class (or possibly distribution class, if anyone + feels a need to subclass Distribution) is found not to be holding + up its end of the bargain, ie. implementing some part of the + "command "interface.DistutilsGetoptErrorThe option table provided to 'fancy_getopt()' is bogus.DistutilsArgErrorRaised by fancy_getopt in response to getopt.error -- ie. an + error in the command line usage.Any problems in the filesystem: expected file not found, etc. + Typically this is for problems that we detect before OSError + could be raised.DistutilsOptionErrorSyntactic/semantic errors in command options, such as use of + mutually conflicting options, or inconsistent options, + badly-spelled values, etc. No distinction is made between option + values originating in the setup script, the command line, config + files, or what-have-you -- but if we *know* something originated in + the setup script, we'll raise DistutilsSetupError instead.DistutilsSetupErrorFor errors that can be definitely blamed on the setup script, + such as invalid keyword arguments to 'setup()'.We don't know how to do something on the current platform (but + we do know how to do it on some platform) -- eg. trying to compile + C files on a platform not supported by a CCompiler subclass.DistutilsExecErrorAny problems executing an external program (such as the C + compiler, when compiling C files).Internal inconsistencies or impossibilities (obviously, this + should never be seen if the code is working!).DistutilsTemplateErrorSyntax error in a file list template.DistutilsByteCompileErrorByte compile error.CCompilerErrorSome compile/link operation failed.PreprocessErrorFailure to preprocess one or more C/C++ files.Failure to compile one or more C/C++ source files.LibErrorFailure to create a static library from one or more C/C++ object + files.Failure to link one or more C/C++ object files into an executable + or shared library file.Attempt to process an unknown file type.# Exception classes used by the CCompiler implementation classesb'distutils.errors + +Provides exceptions used by the Distutils modules. Note that Distutils +modules may raise standard exceptions; in particular, SystemExit is +usually raised for errors that are obviously the end-user's fault +(eg. bad command-line arguments). + +This module is safe to use in "from ... import *" mode; it only exports +symbols whose names start with "Distutils" and end with "Error".'u'distutils.errors + +Provides exceptions used by the Distutils modules. Note that Distutils +modules may raise standard exceptions; in particular, SystemExit is +usually raised for errors that are obviously the end-user's fault +(eg. bad command-line arguments). + +This module is safe to use in "from ... import *" mode; it only exports +symbols whose names start with "Distutils" and end with "Error".'b'The root of all Distutils evil.'u'The root of all Distutils evil.'b'Unable to load an expected module, or to find an expected class + within some module (in particular, command modules and classes).'u'Unable to load an expected module, or to find an expected class + within some module (in particular, command modules and classes).'b'Some command class (or possibly distribution class, if anyone + feels a need to subclass Distribution) is found not to be holding + up its end of the bargain, ie. implementing some part of the + "command "interface.'u'Some command class (or possibly distribution class, if anyone + feels a need to subclass Distribution) is found not to be holding + up its end of the bargain, ie. implementing some part of the + "command "interface.'b'The option table provided to 'fancy_getopt()' is bogus.'u'The option table provided to 'fancy_getopt()' is bogus.'b'Raised by fancy_getopt in response to getopt.error -- ie. an + error in the command line usage.'u'Raised by fancy_getopt in response to getopt.error -- ie. an + error in the command line usage.'b'Any problems in the filesystem: expected file not found, etc. + Typically this is for problems that we detect before OSError + could be raised.'u'Any problems in the filesystem: expected file not found, etc. + Typically this is for problems that we detect before OSError + could be raised.'b'Syntactic/semantic errors in command options, such as use of + mutually conflicting options, or inconsistent options, + badly-spelled values, etc. No distinction is made between option + values originating in the setup script, the command line, config + files, or what-have-you -- but if we *know* something originated in + the setup script, we'll raise DistutilsSetupError instead.'u'Syntactic/semantic errors in command options, such as use of + mutually conflicting options, or inconsistent options, + badly-spelled values, etc. No distinction is made between option + values originating in the setup script, the command line, config + files, or what-have-you -- but if we *know* something originated in + the setup script, we'll raise DistutilsSetupError instead.'b'For errors that can be definitely blamed on the setup script, + such as invalid keyword arguments to 'setup()'.'u'For errors that can be definitely blamed on the setup script, + such as invalid keyword arguments to 'setup()'.'b'We don't know how to do something on the current platform (but + we do know how to do it on some platform) -- eg. trying to compile + C files on a platform not supported by a CCompiler subclass.'u'We don't know how to do something on the current platform (but + we do know how to do it on some platform) -- eg. trying to compile + C files on a platform not supported by a CCompiler subclass.'b'Any problems executing an external program (such as the C + compiler, when compiling C files).'u'Any problems executing an external program (such as the C + compiler, when compiling C files).'b'Internal inconsistencies or impossibilities (obviously, this + should never be seen if the code is working!).'u'Internal inconsistencies or impossibilities (obviously, this + should never be seen if the code is working!).'b'Syntax error in a file list template.'u'Syntax error in a file list template.'b'Byte compile error.'u'Byte compile error.'b'Some compile/link operation failed.'u'Some compile/link operation failed.'b'Failure to preprocess one or more C/C++ files.'u'Failure to preprocess one or more C/C++ files.'b'Failure to compile one or more C/C++ source files.'u'Failure to compile one or more C/C++ source files.'b'Failure to create a static library from one or more C/C++ object + files.'u'Failure to create a static library from one or more C/C++ object + files.'b'Failure to link one or more C/C++ object files into an executable + or shared library file.'u'Failure to link one or more C/C++ object files into an executable + or shared library file.'b'Attempt to process an unknown file type.'u'Attempt to process an unknown file type.'u'distutils.errors'Event loop and event loop policy.AbstractEventLoopPolicyget_event_loop_policyset_event_loop_policyget_child_watcherset_child_watcherObject returned by callback registration methods._args_reprException in callback Object returned by timed callback registration methods.when=Return a scheduled callback time. + + The time is an absolute timestamp, using the same time + reference as loop.time(). + Abstract server returned by create_server().Stop serving. This leaves existing connections open.Get the event loop the Server object is attached to.Return True if the server is accepting connections.Start accepting connections. + + This method is idempotent, so it can be called when + the server is already being serving. + Start accepting connections until the coroutine is cancelled. + + The server is closed when the coroutine is cancelled. + Coroutine to wait until service is closed.Abstract event loop.Run the event loop until stop() is called.Run the event loop until a Future is done. + + Return the Future's result, or raise its exception. + Stop the event loop as soon as reasonable. + + Exactly how soon that is may depend on the implementation, but + no more I/O callbacks should be scheduled. + Return whether the event loop is currently running.Close the loop. + + The loop should not be running. + + This is idempotent and irreversible. + + No other methods should be called after this one. + A coroutine which creates a TCP server bound to host and port. + + The return value is a Server object which can be used to stop + the service. + + If host is an empty string or None all interfaces are assumed + and a list of multiple sockets will be returned (most likely + one for IPv4 and another one for IPv6). The host parameter can also be + a sequence (e.g. list) of hosts to bind to. + + family can be set to either AF_INET or AF_INET6 to force the + socket to use IPv4 or IPv6. If not set it will be determined + from host (defaults to AF_UNSPEC). + + flags is a bitmask for getaddrinfo(). + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows. + + ssl_handshake_timeout is the time in seconds that an SSL server + will wait for completion of the SSL handshake before aborting the + connection. Default is 60s. + + start_serving set to True (default) causes the created server + to start accepting connections immediately. When set to False, + the user should await Server.start_serving() or Server.serve_forever() + to make the server to start accepting connections. + Send a file through a transport. + + Return an amount of sent bytes. + Upgrade a transport to TLS. + + Return a new transport that *protocol* should start using + immediately. + create_unix_connectioncreate_unix_serverA coroutine which creates a UNIX Domain Socket server. + + The return value is a Server object, which can be used to stop + the service. + + path is a str, representing a file systsem path to bind the + server socket to. + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + ssl_handshake_timeout is the time in seconds that an SSL server + will wait for the SSL handshake to complete (defaults to 60s). + + start_serving set to True (default) causes the created server + to start accepting connections immediately. When set to False, + the user should await Server.start_serving() or Server.serve_forever() + to make the server to start accepting connections. + A coroutine which creates a datagram endpoint. + + This method will try to establish the endpoint in the background. + When successful, the coroutine returns a (transport, protocol) pair. + + protocol_factory must be a callable returning a protocol instance. + + socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on + host (or family if specified), socket type SOCK_DGRAM. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified it will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows and some UNIX's. If the + :py:data:`~socket.SO_REUSEPORT` constant is not defined then this + capability is unsupported. + + allow_broadcast tells the kernel to allow this endpoint to send + messages to the broadcast address. + + sock can optionally be specified in order to use a preexisting + socket object. + Register read pipe in event loop. Set the pipe to non-blocking mode. + + protocol_factory should instantiate object with Protocol interface. + pipe is a file-like object. + Return pair (transport, protocol), where transport supports the + ReadTransport interface.Register write pipe in event loop. + + protocol_factory should instantiate object with BaseProtocol interface. + Pipe is file-like object already switched to nonblocking. + Return pair (transport, protocol), where transport support + WriteTransport interface.add_readerremove_readeradd_writerremove_writersock_recvsock_recv_intosock_acceptadd_signal_handlerremove_signal_handlerAbstract policy for accessing the event loop.Get the event loop for the current context. + + Returns an event loop object implementing the BaseEventLoop interface, + or raises an exception in case no event loop has been set for the + current context and the current policy does not specify to create one. + + It should never return None.Set the event loop for the current context to loop.Create and return a new event loop object according to this + policy's rules. If there's need to set this loop as the event loop for + the current context, set_event_loop must be called explicitly.Get the watcher for child processes.Set the watcher for child processes.BaseDefaultEventLoopPolicyDefault policy implementation for accessing the event loop. + + In this policy, each thread has its own event loop. However, we + only automatically create an event loop by default for the main + thread; other threads by default have no event loop. + + Other policies may have different rules (e.g. a single global + event loop, or automatically creating an event loop per thread, or + using some other notion of context to which an event loop is + associated). + _loop_factory_Local_set_calledGet the event loop for the current context. + + Returns an instance of EventLoop or raises an exception. + _MainThreadThere is no current event loop in thread %r.Set the event loop.Create a new event loop. + + You must call set_event_loop() to make this the current event + loop. + _RunningLooploop_pid_running_loopReturn the running event loop. Raise a RuntimeError if there is none. + + This function is thread-specific. + no running event loopReturn the running event loop or None. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + running_loopSet the running event loop. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + _init_event_loop_policyDefaultEventLoopPolicyGet the current event loop policy.policySet the current event loop policy. + + If policy is None, the default policy is restored.Return an asyncio event loop. + + When called from a coroutine or a callback (e.g. scheduled with call_soon + or similar API), this function will always return the running event loop. + + If there is no running event loop set, the function will return + the result of `get_event_loop_policy().get_event_loop()` call. + current_loopEquivalent to calling get_event_loop_policy().set_event_loop(loop).Equivalent to calling get_event_loop_policy().new_event_loop().Equivalent to calling get_event_loop_policy().get_child_watcher().Equivalent to calling + get_event_loop_policy().set_child_watcher(watcher)._py__get_running_loop_py__set_running_loop_py_get_running_loop_py_get_event_loop_c__get_running_loop_c__set_running_loop_c_get_running_loop_c_get_event_loop# Keep a representation in debug mode to keep callback and# parameters. For example, to log the warning# "Executing took 2.5 second"# Running and stopping the event loop.# Methods scheduling callbacks. All these return Handles.# Method scheduling a coroutine object: create a task.# Methods for interacting with threads.# Network I/O methods returning Futures.# Pipes and subprocesses.# The reason to accept file-like object instead of just file descriptor# is: we need to own pipe and close it at transport finishing# Can got complicated errors if pass f.fileno(),# close fd in pipe transport then close f and vise versa.# Ready-based callback registration methods.# The add_*() methods return None.# The remove_*() methods return True if something was removed,# False if there was nothing to delete.# Completion based I/O methods returning Futures.# Signal handling.# Task factory.# Error handlers.# Debug flag management.# Child processes handling (Unix only).# Event loop policy. The policy itself is always global, even if the# policy's rules say that there is an event loop per thread (or other# notion of context). The default policy is installed by the first# call to get_event_loop_policy().# Lock for protecting the on-the-fly creation of the event loop policy.# A TLS for the running event loop, used by _get_running_loop.# NOTE: this function is implemented in C (see _asynciomodule.c)# pragma: no branch# Alias pure-Python implementations for testing purposes.# get_event_loop() is one of the most frequently called# functions in asyncio. Pure Python implementation is# about 4 times slower than C-accelerated.# Alias C implementations for testing purposes.b'Event loop and event loop policy.'u'Event loop and event loop policy.'b'AbstractEventLoopPolicy'u'AbstractEventLoopPolicy'b'AbstractEventLoop'u'AbstractEventLoop'b'AbstractServer'u'AbstractServer'b'Handle'u'Handle'b'TimerHandle'u'TimerHandle'b'get_event_loop_policy'u'get_event_loop_policy'b'set_event_loop_policy'u'set_event_loop_policy'b'get_event_loop'u'get_event_loop'b'set_event_loop'u'set_event_loop'b'new_event_loop'u'new_event_loop'b'get_child_watcher'u'get_child_watcher'b'set_child_watcher'u'set_child_watcher'b'_set_running_loop'u'_set_running_loop'b'get_running_loop'u'get_running_loop'b'_get_running_loop'u'_get_running_loop'b'Object returned by callback registration methods.'u'Object returned by callback registration methods.'b'_callback'u'_callback'b'_args'u'_args'b'_cancelled'u'_cancelled'b'_loop'u'_loop'b'_repr'u'_repr'b'_context'u'_context'b'Exception in callback 'u'Exception in callback 'b'Object returned by timed callback registration methods.'u'Object returned by timed callback registration methods.'b'_scheduled'u'_scheduled'b'_when'u'_when'b'when='u'when='b'Return a scheduled callback time. + + The time is an absolute timestamp, using the same time + reference as loop.time(). + 'u'Return a scheduled callback time. + + The time is an absolute timestamp, using the same time + reference as loop.time(). + 'b'Abstract server returned by create_server().'u'Abstract server returned by create_server().'b'Stop serving. This leaves existing connections open.'u'Stop serving. This leaves existing connections open.'b'Get the event loop the Server object is attached to.'u'Get the event loop the Server object is attached to.'b'Return True if the server is accepting connections.'u'Return True if the server is accepting connections.'b'Start accepting connections. + + This method is idempotent, so it can be called when + the server is already being serving. + 'u'Start accepting connections. + + This method is idempotent, so it can be called when + the server is already being serving. + 'b'Start accepting connections until the coroutine is cancelled. + + The server is closed when the coroutine is cancelled. + 'u'Start accepting connections until the coroutine is cancelled. + + The server is closed when the coroutine is cancelled. + 'b'Coroutine to wait until service is closed.'u'Coroutine to wait until service is closed.'b'Abstract event loop.'u'Abstract event loop.'b'Run the event loop until stop() is called.'u'Run the event loop until stop() is called.'b'Run the event loop until a Future is done. + + Return the Future's result, or raise its exception. + 'u'Run the event loop until a Future is done. + + Return the Future's result, or raise its exception. + 'b'Stop the event loop as soon as reasonable. + + Exactly how soon that is may depend on the implementation, but + no more I/O callbacks should be scheduled. + 'u'Stop the event loop as soon as reasonable. + + Exactly how soon that is may depend on the implementation, but + no more I/O callbacks should be scheduled. + 'b'Return whether the event loop is currently running.'u'Return whether the event loop is currently running.'b'Close the loop. + + The loop should not be running. + + This is idempotent and irreversible. + + No other methods should be called after this one. + 'u'Close the loop. + + The loop should not be running. + + This is idempotent and irreversible. + + No other methods should be called after this one. + 'b'A coroutine which creates a TCP server bound to host and port. + + The return value is a Server object which can be used to stop + the service. + + If host is an empty string or None all interfaces are assumed + and a list of multiple sockets will be returned (most likely + one for IPv4 and another one for IPv6). The host parameter can also be + a sequence (e.g. list) of hosts to bind to. + + family can be set to either AF_INET or AF_INET6 to force the + socket to use IPv4 or IPv6. If not set it will be determined + from host (defaults to AF_UNSPEC). + + flags is a bitmask for getaddrinfo(). + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows. + + ssl_handshake_timeout is the time in seconds that an SSL server + will wait for completion of the SSL handshake before aborting the + connection. Default is 60s. + + start_serving set to True (default) causes the created server + to start accepting connections immediately. When set to False, + the user should await Server.start_serving() or Server.serve_forever() + to make the server to start accepting connections. + 'u'A coroutine which creates a TCP server bound to host and port. + + The return value is a Server object which can be used to stop + the service. + + If host is an empty string or None all interfaces are assumed + and a list of multiple sockets will be returned (most likely + one for IPv4 and another one for IPv6). The host parameter can also be + a sequence (e.g. list) of hosts to bind to. + + family can be set to either AF_INET or AF_INET6 to force the + socket to use IPv4 or IPv6. If not set it will be determined + from host (defaults to AF_UNSPEC). + + flags is a bitmask for getaddrinfo(). + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows. + + ssl_handshake_timeout is the time in seconds that an SSL server + will wait for completion of the SSL handshake before aborting the + connection. Default is 60s. + + start_serving set to True (default) causes the created server + to start accepting connections immediately. When set to False, + the user should await Server.start_serving() or Server.serve_forever() + to make the server to start accepting connections. + 'b'Send a file through a transport. + + Return an amount of sent bytes. + 'u'Send a file through a transport. + + Return an amount of sent bytes. + 'b'Upgrade a transport to TLS. + + Return a new transport that *protocol* should start using + immediately. + 'u'Upgrade a transport to TLS. + + Return a new transport that *protocol* should start using + immediately. + 'b'A coroutine which creates a UNIX Domain Socket server. + + The return value is a Server object, which can be used to stop + the service. + + path is a str, representing a file systsem path to bind the + server socket to. + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + ssl_handshake_timeout is the time in seconds that an SSL server + will wait for the SSL handshake to complete (defaults to 60s). + + start_serving set to True (default) causes the created server + to start accepting connections immediately. When set to False, + the user should await Server.start_serving() or Server.serve_forever() + to make the server to start accepting connections. + 'u'A coroutine which creates a UNIX Domain Socket server. + + The return value is a Server object, which can be used to stop + the service. + + path is a str, representing a file systsem path to bind the + server socket to. + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + ssl_handshake_timeout is the time in seconds that an SSL server + will wait for the SSL handshake to complete (defaults to 60s). + + start_serving set to True (default) causes the created server + to start accepting connections immediately. When set to False, + the user should await Server.start_serving() or Server.serve_forever() + to make the server to start accepting connections. + 'b'A coroutine which creates a datagram endpoint. + + This method will try to establish the endpoint in the background. + When successful, the coroutine returns a (transport, protocol) pair. + + protocol_factory must be a callable returning a protocol instance. + + socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on + host (or family if specified), socket type SOCK_DGRAM. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified it will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows and some UNIX's. If the + :py:data:`~socket.SO_REUSEPORT` constant is not defined then this + capability is unsupported. + + allow_broadcast tells the kernel to allow this endpoint to send + messages to the broadcast address. + + sock can optionally be specified in order to use a preexisting + socket object. + 'u'A coroutine which creates a datagram endpoint. + + This method will try to establish the endpoint in the background. + When successful, the coroutine returns a (transport, protocol) pair. + + protocol_factory must be a callable returning a protocol instance. + + socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on + host (or family if specified), socket type SOCK_DGRAM. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified it will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows and some UNIX's. If the + :py:data:`~socket.SO_REUSEPORT` constant is not defined then this + capability is unsupported. + + allow_broadcast tells the kernel to allow this endpoint to send + messages to the broadcast address. + + sock can optionally be specified in order to use a preexisting + socket object. + 'b'Register read pipe in event loop. Set the pipe to non-blocking mode. + + protocol_factory should instantiate object with Protocol interface. + pipe is a file-like object. + Return pair (transport, protocol), where transport supports the + ReadTransport interface.'u'Register read pipe in event loop. Set the pipe to non-blocking mode. + + protocol_factory should instantiate object with Protocol interface. + pipe is a file-like object. + Return pair (transport, protocol), where transport supports the + ReadTransport interface.'b'Register write pipe in event loop. + + protocol_factory should instantiate object with BaseProtocol interface. + Pipe is file-like object already switched to nonblocking. + Return pair (transport, protocol), where transport support + WriteTransport interface.'u'Register write pipe in event loop. + + protocol_factory should instantiate object with BaseProtocol interface. + Pipe is file-like object already switched to nonblocking. + Return pair (transport, protocol), where transport support + WriteTransport interface.'b'Abstract policy for accessing the event loop.'u'Abstract policy for accessing the event loop.'b'Get the event loop for the current context. + + Returns an event loop object implementing the BaseEventLoop interface, + or raises an exception in case no event loop has been set for the + current context and the current policy does not specify to create one. + + It should never return None.'u'Get the event loop for the current context. + + Returns an event loop object implementing the BaseEventLoop interface, + or raises an exception in case no event loop has been set for the + current context and the current policy does not specify to create one. + + It should never return None.'b'Set the event loop for the current context to loop.'u'Set the event loop for the current context to loop.'b'Create and return a new event loop object according to this + policy's rules. If there's need to set this loop as the event loop for + the current context, set_event_loop must be called explicitly.'u'Create and return a new event loop object according to this + policy's rules. If there's need to set this loop as the event loop for + the current context, set_event_loop must be called explicitly.'b'Get the watcher for child processes.'u'Get the watcher for child processes.'b'Set the watcher for child processes.'u'Set the watcher for child processes.'b'Default policy implementation for accessing the event loop. + + In this policy, each thread has its own event loop. However, we + only automatically create an event loop by default for the main + thread; other threads by default have no event loop. + + Other policies may have different rules (e.g. a single global + event loop, or automatically creating an event loop per thread, or + using some other notion of context to which an event loop is + associated). + 'u'Default policy implementation for accessing the event loop. + + In this policy, each thread has its own event loop. However, we + only automatically create an event loop by default for the main + thread; other threads by default have no event loop. + + Other policies may have different rules (e.g. a single global + event loop, or automatically creating an event loop per thread, or + using some other notion of context to which an event loop is + associated). + 'b'Get the event loop for the current context. + + Returns an instance of EventLoop or raises an exception. + 'u'Get the event loop for the current context. + + Returns an instance of EventLoop or raises an exception. + 'b'There is no current event loop in thread %r.'u'There is no current event loop in thread %r.'b'Set the event loop.'u'Set the event loop.'b'Create a new event loop. + + You must call set_event_loop() to make this the current event + loop. + 'u'Create a new event loop. + + You must call set_event_loop() to make this the current event + loop. + 'b'Return the running event loop. Raise a RuntimeError if there is none. + + This function is thread-specific. + 'u'Return the running event loop. Raise a RuntimeError if there is none. + + This function is thread-specific. + 'b'no running event loop'u'no running event loop'b'Return the running event loop or None. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + 'u'Return the running event loop or None. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + 'b'Set the running event loop. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + 'u'Set the running event loop. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + 'b'Get the current event loop policy.'u'Get the current event loop policy.'b'Set the current event loop policy. + + If policy is None, the default policy is restored.'u'Set the current event loop policy. + + If policy is None, the default policy is restored.'b'Return an asyncio event loop. + + When called from a coroutine or a callback (e.g. scheduled with call_soon + or similar API), this function will always return the running event loop. + + If there is no running event loop set, the function will return + the result of `get_event_loop_policy().get_event_loop()` call. + 'u'Return an asyncio event loop. + + When called from a coroutine or a callback (e.g. scheduled with call_soon + or similar API), this function will always return the running event loop. + + If there is no running event loop set, the function will return + the result of `get_event_loop_policy().get_event_loop()` call. + 'b'Equivalent to calling get_event_loop_policy().set_event_loop(loop).'u'Equivalent to calling get_event_loop_policy().set_event_loop(loop).'b'Equivalent to calling get_event_loop_policy().new_event_loop().'u'Equivalent to calling get_event_loop_policy().new_event_loop().'b'Equivalent to calling get_event_loop_policy().get_child_watcher().'u'Equivalent to calling get_event_loop_policy().get_child_watcher().'b'Equivalent to calling + get_event_loop_policy().set_child_watcher(watcher).'u'Equivalent to calling + get_event_loop_policy().set_child_watcher(watcher).'u'asyncio.events'u'events'asyncio exceptions.IncompleteReadErrorLimitOverrunErrorThe Future or Task was cancelled.Sendfile syscall is not available. + + Raised if OS does not support sendfile syscall for given socket or + file type. + + Incomplete read error. Attributes: + + - partial: read bytes string before the end of stream was reached + - expected: total number of expected bytes (or None if unknown) + undefinedr_expected bytes read on a total of ' bytes read on a total of ' expected bytesReached the buffer limit while looking for a separator. + + Attributes: + - consumed: total number of to be consumed bytes. + b'asyncio exceptions.'u'asyncio exceptions.'b'InvalidStateError'u'InvalidStateError'b'IncompleteReadError'u'IncompleteReadError'b'LimitOverrunError'u'LimitOverrunError'b'SendfileNotAvailableError'u'SendfileNotAvailableError'b'The Future or Task was cancelled.'u'The Future or Task was cancelled.'b'Sendfile syscall is not available. + + Raised if OS does not support sendfile syscall for given socket or + file type. + 'u'Sendfile syscall is not available. + + Raised if OS does not support sendfile syscall for given socket or + file type. + 'b' + Incomplete read error. Attributes: + + - partial: read bytes string before the end of stream was reached + - expected: total number of expected bytes (or None if unknown) + 'u' + Incomplete read error. Attributes: + + - partial: read bytes string before the end of stream was reached + - expected: total number of expected bytes (or None if unknown) + 'b'undefined'u'undefined'b' bytes read on a total of 'u' bytes read on a total of 'b' expected bytes'u' expected bytes'b'Reached the buffer limit while looking for a separator. + + Attributes: + - consumed: total number of to be consumed bytes. + 'u'Reached the buffer limit while looking for a separator. + + Attributes: + - consumed: total number of to be consumed bytes. + 'u'asyncio.exceptions'Interface to the Expat non-validating XML parser.xml.parsers.expat.modelxml.parsers.expat.errors# provide pyexpat submodules as xml.parsers.expat submodulesb'Interface to the Expat non-validating XML parser.'u'Interface to the Expat non-validating XML parser.'b'xml.parsers.expat.model'u'xml.parsers.expat.model'b'xml.parsers.expat.errors'u'xml.parsers.expat.errors'u'xml.parsers.expat'u'parsers.expat'u'expat'distutils.fancy_getopt + +Wrapper around the standard getopt module that provides the following +additional features: + * short and long options are tied together + * options have help strings, so fancy_getopt could potentially + create a complete usage summary + * options set attributes of a passed-in object +[a-zA-Z](?:[a-zA-Z0-9-]*)longopt_pat^%s$longopt_re^(%s)=!(%s)$neg_alias_relongopt_xlateWrapper around the standard 'getopt()' module that provides some + handy extra functionality: + * short and long options are tied together + * options have help strings, and help text can be assembled + from them + * options set attributes of a passed-in object + * boolean options can have "negative aliases" -- eg. if + --quiet is the "negative alias" of --verbose, then "--quiet" + on the command line sets 'verbose' to false + option_tableoption_index_build_indexnegative_aliasshort_optslong_optsshort2longtakes_argoption_orderset_option_tableadd_optionlong_optionshort_optionhelp_stringoption conflict: already an option '%s'has_optionReturn true if the option table for this parser has an + option with long name 'long_option'.get_attr_nameTranslate long option name 'long_option' to the form it + has as an attribute of some object: ie., translate hyphens + to underscores._check_alias_dictinvalid %s '%s': option '%s' not defined"invalid %s '%s': ""option '%s' not defined"invalid %s '%s': aliased option '%s' not defined"aliased option '%s' not defined"set_aliasesSet the aliases for this option parser.set_negative_aliasesSet the negative aliases for this option parser. + 'negative_alias' should be a dictionary mapping option names to + option names, both the key and value must already be defined + in the option table.negative alias_grok_option_tablePopulate the various data structures that keep tabs on the + option table. Called by 'getopt()' before it can do anything + worthwhile. + shortinvalid option tuple: %rinvalid long option '%s': must be a string of length >= 2"invalid long option '%s': ""must be a string of length >= 2"invalid short option '%s': must a single character or None"invalid short option '%s': ""must a single character or None"alias_toinvalid negative alias '%s': aliased option '%s' takes a value"invalid negative alias '%s': ""aliased option '%s' takes a value"invalid alias '%s': inconsistent with aliased option '%s' (one of them takes a value, the other doesn't"invalid alias '%s': inconsistent with ""aliased option '%s' (one of them takes a value, ""the other doesn't"invalid long option name '%s' (must be letters, numbers, hyphens only"invalid long option name '%s' ""(must be letters, numbers, hyphens only"Parse command-line options in args. Store as attributes on object. + + If 'args' is None or not supplied, uses 'sys.argv[1:]'. If + 'object' is None or not supplied, creates a new OptionDummy + object, stores option values there, and returns a tuple (args, + object). If 'object' is supplied, it is modified in place and + 'getopt()' just returns 'args'; in both cases, the returned + 'args' is a modified copy of the passed-in 'args' list, which + is left untouched. + OptionDummycreated_objectboolean option can't have valueget_option_orderReturns the list of (option, value) tuples processed by the + previous run of 'getopt()'. Raises RuntimeError if + 'getopt()' hasn't been called yet. + 'getopt()' hasn't been called yetgenerate_helpGenerate help text (a list of strings, one per suggested line of + output) from the option table for this FancyGetopt object. + max_optopt_widthline_widthbig_indentOption summary:wrap_text --%-*s %s --%-*s %s (-%s)opt_names --%-*sfancy_getoptnegative_opt_wscharwhitespaceWS_TRANSwrap_text(text : string, width : int) -> [string] + + Split 'text' into multiple lines of no more than 'width' characters + each, and return the list of strings that results. + ( +|-+)cur_linecur_lentranslate_longoptConvert a long option name to a valid Python identifier by + changing "-" to "_". + Dummy class just used as a place to hold command-line option + values as instance attributes.Create a new OptionDummy instance. The attributes listed in + 'options' will be initialized to None.Tra-la-la, supercalifragilisticexpialidocious. +How *do* you spell that odd word, anyways? +(Someone ask Mary -- she'll know [or she'll +say, "How should I know?"].)width: %d# Much like command_re in distutils.core, this is close to but not quite# the same as a Python NAME -- except, in the spirit of most GNU# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)# The similarities to NAME are again not a coincidence...# For recognizing "negative alias" options, eg. "quiet=!verbose"# This is used to translate long options to legitimate Python identifiers# (for use as attributes of some object).# The option table is (currently) a list of tuples. The# tuples may have 3 or four values:# (long_option, short_option, help_string [, repeatable])# if an option takes an argument, its long_option should have '='# appended; short_option should just be a single character, no ':'# in any case. If a long_option doesn't have a corresponding# short_option, short_option should be None. All option tuples# must have long options.# 'option_index' maps long option names to entries in the option# table (ie. those 3-tuples).# 'alias' records (duh) alias options; {'foo': 'bar'} means# --foo is an alias for --bar# 'negative_alias' keeps track of options that are the boolean# opposite of some other option# These keep track of the information in the option table. We# don't actually populate these structures until we're ready to# parse the command-line, since the 'option_table' passed in here# isn't necessarily the final word.# And 'option_order' is filled up in 'getopt()'; it records the# original order of options (and their values) on the command-line,# but expands short options, converts aliases, etc.# the option table is part of the code, so simply# assert that it is correct# Type- and value-check the option names# option takes an argument?# Is option is a "negative alias" for some other option (eg.# "quiet" == "!verbose")?# XXX redundant?!# If this is an alias option, make sure its "takes arg" flag is# the same as the option it's aliased to.# Now enforce some bondage on the long option name, so we can# later translate it to an attribute name on some object. Have# to do this a bit late to make sure we've removed any trailing# '='.# it's a short option# boolean option?# The only repeating option at the moment is 'verbose'.# It has a negative option -q quiet, which should set verbose = 0.# for opts# Blithely assume the option table is good: probably wouldn't call# 'generate_help()' unless you've already called 'getopt()'.# First pass: determine maximum length of long option names# " (-x)" where short == 'x'# room for indent + dashes + gutter# Typical help block looks like this:# --foo controls foonabulation# Help block for longest option looks like this:# --flimflam set the flim-flam level# and with wrapped text:# --flimflam set the flim-flam level (must be between# 0 and 100, except on Tuesdays)# Options with short names will have the short name shown (but# it doesn't contribute to max_opt):# --foo (-f) controls foonabulation# If adding the short option would make the left column too wide,# we push the explanation off to the next line# --flimflam (-l)# set the flim-flam level# Important parameters:# - 2 spaces before option block start lines# - 2 dashes for each long option name# - min. 2 spaces between option and explanation (gutter)# - 5 characters (incl. space) for short option name# Now generate lines of help text. (If 80 columns were good enough# for Jesus, then 78 columns are good enough for me!)# Case 1: no short option at all (makes life easy)# Case 2: we have a short option, so we have to include it# just after the long option# ' - ' results in empty strings# list of chunks (to-be-joined)# length of current line# can squeeze (at least) this chunk in# this line is full# drop last chunk if all space# any chunks left to process?# if the current line is still empty, then we had a single# chunk that's too big too fit on a line -- so we break# down and break it up at the line width# all-whitespace chunks at the end of a line can be discarded# (and we know from the re.split above that if a chunk has# *any* whitespace, it is *all* whitespace)# and store this line in the list-of-all-lines -- as a single# string, of course!b'distutils.fancy_getopt + +Wrapper around the standard getopt module that provides the following +additional features: + * short and long options are tied together + * options have help strings, so fancy_getopt could potentially + create a complete usage summary + * options set attributes of a passed-in object +'u'distutils.fancy_getopt + +Wrapper around the standard getopt module that provides the following +additional features: + * short and long options are tied together + * options have help strings, so fancy_getopt could potentially + create a complete usage summary + * options set attributes of a passed-in object +'b'[a-zA-Z](?:[a-zA-Z0-9-]*)'u'[a-zA-Z](?:[a-zA-Z0-9-]*)'b'^%s$'u'^%s$'b'^(%s)=!(%s)$'u'^(%s)=!(%s)$'b'Wrapper around the standard 'getopt()' module that provides some + handy extra functionality: + * short and long options are tied together + * options have help strings, and help text can be assembled + from them + * options set attributes of a passed-in object + * boolean options can have "negative aliases" -- eg. if + --quiet is the "negative alias" of --verbose, then "--quiet" + on the command line sets 'verbose' to false + 'u'Wrapper around the standard 'getopt()' module that provides some + handy extra functionality: + * short and long options are tied together + * options have help strings, and help text can be assembled + from them + * options set attributes of a passed-in object + * boolean options can have "negative aliases" -- eg. if + --quiet is the "negative alias" of --verbose, then "--quiet" + on the command line sets 'verbose' to false + 'b'option conflict: already an option '%s''u'option conflict: already an option '%s''b'Return true if the option table for this parser has an + option with long name 'long_option'.'u'Return true if the option table for this parser has an + option with long name 'long_option'.'b'Translate long option name 'long_option' to the form it + has as an attribute of some object: ie., translate hyphens + to underscores.'u'Translate long option name 'long_option' to the form it + has as an attribute of some object: ie., translate hyphens + to underscores.'b'invalid %s '%s': option '%s' not defined'u'invalid %s '%s': option '%s' not defined'b'invalid %s '%s': aliased option '%s' not defined'u'invalid %s '%s': aliased option '%s' not defined'b'Set the aliases for this option parser.'u'Set the aliases for this option parser.'b'alias'u'alias'b'Set the negative aliases for this option parser. + 'negative_alias' should be a dictionary mapping option names to + option names, both the key and value must already be defined + in the option table.'u'Set the negative aliases for this option parser. + 'negative_alias' should be a dictionary mapping option names to + option names, both the key and value must already be defined + in the option table.'b'negative alias'u'negative alias'b'Populate the various data structures that keep tabs on the + option table. Called by 'getopt()' before it can do anything + worthwhile. + 'u'Populate the various data structures that keep tabs on the + option table. Called by 'getopt()' before it can do anything + worthwhile. + 'b'invalid option tuple: %r'u'invalid option tuple: %r'b'invalid long option '%s': must be a string of length >= 2'u'invalid long option '%s': must be a string of length >= 2'b'invalid short option '%s': must a single character or None'u'invalid short option '%s': must a single character or None'b'invalid negative alias '%s': aliased option '%s' takes a value'u'invalid negative alias '%s': aliased option '%s' takes a value'b'invalid alias '%s': inconsistent with aliased option '%s' (one of them takes a value, the other doesn't'u'invalid alias '%s': inconsistent with aliased option '%s' (one of them takes a value, the other doesn't'b'invalid long option name '%s' (must be letters, numbers, hyphens only'u'invalid long option name '%s' (must be letters, numbers, hyphens only'b'Parse command-line options in args. Store as attributes on object. + + If 'args' is None or not supplied, uses 'sys.argv[1:]'. If + 'object' is None or not supplied, creates a new OptionDummy + object, stores option values there, and returns a tuple (args, + object). If 'object' is supplied, it is modified in place and + 'getopt()' just returns 'args'; in both cases, the returned + 'args' is a modified copy of the passed-in 'args' list, which + is left untouched. + 'u'Parse command-line options in args. Store as attributes on object. + + If 'args' is None or not supplied, uses 'sys.argv[1:]'. If + 'object' is None or not supplied, creates a new OptionDummy + object, stores option values there, and returns a tuple (args, + object). If 'object' is supplied, it is modified in place and + 'getopt()' just returns 'args'; in both cases, the returned + 'args' is a modified copy of the passed-in 'args' list, which + is left untouched. + 'b'boolean option can't have value'u'boolean option can't have value'b'Returns the list of (option, value) tuples processed by the + previous run of 'getopt()'. Raises RuntimeError if + 'getopt()' hasn't been called yet. + 'u'Returns the list of (option, value) tuples processed by the + previous run of 'getopt()'. Raises RuntimeError if + 'getopt()' hasn't been called yet. + 'b''getopt()' hasn't been called yet'u''getopt()' hasn't been called yet'b'Generate help text (a list of strings, one per suggested line of + output) from the option table for this FancyGetopt object. + 'u'Generate help text (a list of strings, one per suggested line of + output) from the option table for this FancyGetopt object. + 'b'Option summary:'u'Option summary:'b' --%-*s %s'u' --%-*s %s'b' --%-*s 'u' --%-*s 'b'%s (-%s)'u'%s (-%s)'b' --%-*s'u' --%-*s'b'wrap_text(text : string, width : int) -> [string] + + Split 'text' into multiple lines of no more than 'width' characters + each, and return the list of strings that results. + 'u'wrap_text(text : string, width : int) -> [string] + + Split 'text' into multiple lines of no more than 'width' characters + each, and return the list of strings that results. + 'b'( +|-+)'u'( +|-+)'b'Convert a long option name to a valid Python identifier by + changing "-" to "_". + 'u'Convert a long option name to a valid Python identifier by + changing "-" to "_". + 'b'Dummy class just used as a place to hold command-line option + values as instance attributes.'u'Dummy class just used as a place to hold command-line option + values as instance attributes.'b'Create a new OptionDummy instance. The attributes listed in + 'options' will be initialized to None.'u'Create a new OptionDummy instance. The attributes listed in + 'options' will be initialized to None.'b'Tra-la-la, supercalifragilisticexpialidocious. +How *do* you spell that odd word, anyways? +(Someone ask Mary -- she'll know [or she'll +say, "How should I know?"].)'u'Tra-la-la, supercalifragilisticexpialidocious. +How *do* you spell that odd word, anyways? +(Someone ask Mary -- she'll know [or she'll +say, "How should I know?"].)'b'width: %d'u'width: %d'u'distutils.fancy_getopt'u'fancy_getopt'u'faulthandler module.'_fatal_error_fatal_error_c_thread_read_null_sigabrt_sigfpe_sigsegv_stack_overflowcancel_dump_traceback_laterdump_traceback_laterFeedParser - An email feed parser. + +The feed parser implements an interface for incrementally parsing an email +message, line by line. This has advantages for certain applications, such as +those reading email messages off a socket. + +FeedParser.feed() is the primary interface for pushing new data into the +parser. It returns when there's nothing more it can do with the available +data. When you have no more data to push into the parser, call .close(). +This completes the parsing and returns the root message object. + +The other advantage of this parser is that it will never raise a parsing +exception. Instead, when it finds something unexpected, it adds a 'defect' to +the current message. Defects are just instances that live on the message +object's .defects attribute. +FeedParserBytesFeedParseremail._policybase\r\n|\r|\nNLCRE(\r\n|\r|\n)NLCRE_bol(\r\n|\r|\n)\ZNLCRE_eolNLCRE_crack^(From |[\041-\071\073-\176]*:|[\t ])headerRENeedMoreDataBufferedSubFileA file-ish object that can have new data loaded into it. + + You can also push and pop line-matching predicates onto a stack. When the + current predicate matches the current line, a false EOF response + (i.e. empty string) is returned instead. This lets the parser adhere to a + simple abstraction -- it parses until EOF closes the current message. + _partial_lines_eofstackpush_eof_matcherpop_eof_matcherpushlinesateofunreadlinePush some new data into this object.A feed-style parser of email._factory is called with no arguments to create a new message obj + + The policy keyword specifies a policy object that controls a number of + aspects of the parser's operation. The default policy maintains + backward compatibility. + + _old_style_factory_input_msgstack_parsegen_parse_cur_headersonly_set_headersonlyPush more data into the parser._call_parseParse all remaining data and return the root message object._pop_messageget_content_maintypemultipartis_multipart_new_messageget_content_typemultipart/digestset_default_typemessage/rfc822attach_parse_headersmessage/delivery-statusget_boundaryboundarycontent-transfer-encodingbinary(?P)(?P--)?(?P[ \t]*)(?P\r\n|\r|\n)?$boundaryrecapturing_preamblepreambleclose_boundary_seenmolastlineeolmoepilogue_payloadbolmolastheaderlastvalueset_rawFrom set_unixfromMissing header name._parse_headers fed line with no : and no leading WSLike FeedParser, but feed accepts bytes.# Copyright (C) 2004-2006 Python Software Foundation# Authors: Baxter, Wouters and Warsaw# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character# except controls, SP, and ":".# Text stream of the last partial line pushed into this object.# See issue 22233 for why this is a text stream and not a list.# A deque of full, pushed lines# The stack of false-EOF checking predicates.# A flag indicating whether the file has been closed or not.# Don't forget any trailing partial line.# Pop the line off the stack and see if it matches the current# false-EOF predicate.# RFC 2046, section 5.1.2 requires us to recognize outer level# boundaries at any level of inner nesting. Do this, but be sure it's# in the order of most to least nested.# We're at the false EOF. But push the last line back first.# Let the consumer push a line back into the buffer.# No new complete lines, wait for more.# Crack into lines, preserving the linesep characters.# If the last element of the list does not end in a newline, then treat# it as a partial line. We only check for '\n' here because a line# ending with '\r' might be a line that was split in the middle of a# '\r\n' sequence (see bugs 1555570 and 1721862).# Assume this is an old-style factory# Non-public interface for supporting Parser's headersonly flag# Look for final set of defects# Create a new message and start by parsing headers.# Collect the headers, searching for a line that doesn't match the RFC# 2822 header or continuation pattern (including an empty line).# If we saw the RFC defined header/body separator# (i.e. newline), just throw it away. Otherwise the line is# part of the body so push it back.# Done with the headers, so parse them and figure out what we're# supposed to see in the body of the message.# Headers-only parsing is a backwards compatibility hack, which was# necessary in the older parser, which could raise errors. All# remaining lines in the input are thrown into the message body.# message/delivery-status contains blocks of headers separated by# a blank line. We'll represent each header block as a separate# nested message object, but the processing is a bit different# than standard message/* types because there is no body for the# nested messages. A blank line separates the subparts.# We need to pop the EOF matcher in order to tell if we're at# the end of the current file, not the end of the last block# of message headers.# The input stream must be sitting at the newline or at the# EOF. We want to see if we're at the end of this subpart, so# first consume the blank line, then test the next line to see# if we're at this subpart's EOF.# Not at EOF so this is a line we're going to need.# The message claims to be a message/* type, then what follows is# another RFC 2822 message.# The message /claims/ to be a multipart but it has not# defined a boundary. That's a problem which we'll handle by# reading everything until the EOF and marking the message as# defective.# Make sure a valid content type was specified per RFC 2045:6.4.# Create a line match predicate which matches the inter-part# boundary as well as the end-of-multipart boundary. Don't push# this onto the input stream until we've scanned past the# preamble.# If we're looking at the end boundary, we're done with# this multipart. If there was a newline at the end of# the closing boundary, then we need to initialize the# epilogue with the empty string (see below).# We saw an inter-part boundary. Were we in the preamble?# According to RFC 2046, the last newline belongs# to the boundary.# We saw a boundary separating two parts. Consume any# multiple boundary lines that may be following. Our# interpretation of RFC 2046 BNF grammar does not produce# body parts within such double boundaries.# Recurse to parse this subpart; the input stream points# at the subpart's first line.# Because of RFC 2046, the newline preceding the boundary# separator actually belongs to the boundary, not the# previous subpart's payload (or epilogue if the previous# part is a multipart).# Set the multipart up for newline cleansing, which will# happen if we're in a nested multipart.# I think we must be in the preamble# We've seen either the EOF or the end boundary. If we're still# capturing the preamble, we never saw the start boundary. Note# that as a defect and store the captured text as the payload.# If we're not processing the preamble, then we might have seen# EOF without seeing that end boundary...that is also a defect.# Everything from here to the EOF is epilogue. If the end boundary# ended in a newline, we'll need to make sure the epilogue isn't# Any CRLF at the front of the epilogue is not technically part of# the epilogue. Also, watch out for an empty string epilogue,# which means a single newline.# Otherwise, it's some non-multipart type, so the entire rest of the# file contents becomes the payload.# Passed a list of lines that make up the headers for the current msg# Check for continuation# The first line of the headers was a continuation. This# is illegal, so let's note the defect, store the illegal# line, and ignore it for purposes of headers.# Check for envelope header, i.e. unix-from# Strip off the trailing newline# Something looking like a unix-from at the end - it's# probably the first line of the body, so push back the# line and stop.# Weirdly placed unix-from line. Note this as a defect# and ignore it.# Split the line on the colon separating field name from value.# There will always be a colon, because if there wasn't the part of# the parser that calls us would have started parsing the body.# If the colon is on the start of the line the header is clearly# malformed, but we might be able to salvage the rest of the# message. Track the error but keep going.# Done with all the lines, so handle the last header.b'FeedParser - An email feed parser. + +The feed parser implements an interface for incrementally parsing an email +message, line by line. This has advantages for certain applications, such as +those reading email messages off a socket. + +FeedParser.feed() is the primary interface for pushing new data into the +parser. It returns when there's nothing more it can do with the available +data. When you have no more data to push into the parser, call .close(). +This completes the parsing and returns the root message object. + +The other advantage of this parser is that it will never raise a parsing +exception. Instead, when it finds something unexpected, it adds a 'defect' to +the current message. Defects are just instances that live on the message +object's .defects attribute. +'u'FeedParser - An email feed parser. + +The feed parser implements an interface for incrementally parsing an email +message, line by line. This has advantages for certain applications, such as +those reading email messages off a socket. + +FeedParser.feed() is the primary interface for pushing new data into the +parser. It returns when there's nothing more it can do with the available +data. When you have no more data to push into the parser, call .close(). +This completes the parsing and returns the root message object. + +The other advantage of this parser is that it will never raise a parsing +exception. Instead, when it finds something unexpected, it adds a 'defect' to +the current message. Defects are just instances that live on the message +object's .defects attribute. +'b'FeedParser'u'FeedParser'b'BytesFeedParser'u'BytesFeedParser'b'\r\n|\r|\n'u'\r\n|\r|\n'b'(\r\n|\r|\n)'u'(\r\n|\r|\n)'b'(\r\n|\r|\n)\Z'u'(\r\n|\r|\n)\Z'b'^(From |[\041-\071\073-\176]*:|[\t ])'u'^(From |[\041-\071\073-\176]*:|[\t ])'b'A file-ish object that can have new data loaded into it. + + You can also push and pop line-matching predicates onto a stack. When the + current predicate matches the current line, a false EOF response + (i.e. empty string) is returned instead. This lets the parser adhere to a + simple abstraction -- it parses until EOF closes the current message. + 'u'A file-ish object that can have new data loaded into it. + + You can also push and pop line-matching predicates onto a stack. When the + current predicate matches the current line, a false EOF response + (i.e. empty string) is returned instead. This lets the parser adhere to a + simple abstraction -- it parses until EOF closes the current message. + 'b'Push some new data into this object.'u'Push some new data into this object.'b'A feed-style parser of email.'u'A feed-style parser of email.'b'_factory is called with no arguments to create a new message obj + + The policy keyword specifies a policy object that controls a number of + aspects of the parser's operation. The default policy maintains + backward compatibility. + + 'u'_factory is called with no arguments to create a new message obj + + The policy keyword specifies a policy object that controls a number of + aspects of the parser's operation. The default policy maintains + backward compatibility. + + 'b'Push more data into the parser.'u'Push more data into the parser.'b'Parse all remaining data and return the root message object.'u'Parse all remaining data and return the root message object.'b'multipart'u'multipart'b'multipart/digest'u'multipart/digest'b'message/rfc822'u'message/rfc822'b'message/delivery-status'u'message/delivery-status'b'content-transfer-encoding'u'content-transfer-encoding'b'binary'u'binary'b'(?P'u'(?P'b')(?P--)?(?P[ \t]*)(?P\r\n|\r|\n)?$'u')(?P--)?(?P[ \t]*)(?P\r\n|\r|\n)?$'b'linesep'u'linesep'b'From 'u'From 'b'Missing header name.'u'Missing header name.'b'_parse_headers fed line with no : and no leading WS'u'_parse_headers fed line with no : and no leading WS'b'Like FeedParser, but feed accepts bytes.'u'Like FeedParser, but feed accepts bytes.'u'email.feedparser'distutils.file_util + +Utility functions for operating on single files. +copyinghard linkinghardsymbolically linkingsym_copy_action_copy_file_contentsbuffer_sizeCopy the file 'src' to 'dst'; both must be filenames. Any error + opening either file, reading from 'src', or writing to 'dst', raises + DistutilsFileError. Data is read/written in chunks of 'buffer_size' + bytes (default 16k). No attempt is made to handle anything apart from + regular files. + fsrcfdstcould not open '%s': %scould not delete '%s': %scould not read from '%s': %scould not write to '%s': %sCopy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is + copied there with the same name; otherwise, it must be a filename. (If + the file exists, it will be ruthlessly clobbered.) If 'preserve_mode' + is true (the default), the file's mode (type and permission bits, or + whatever is analogous on the current platform) is copied. If + 'preserve_times' is true (the default), the last-modified and + last-access times are copied as well. If 'update' is true, 'src' will + only be copied if 'dst' does not exist, or if 'dst' does exist but is + older than 'src'. + + 'link' allows you to make hard links (os.link) or symbolic links + (os.symlink) instead of copying: set it to "hard" or "sym"; if it is + None (the default), files are copied. Don't set 'link' on systems that + don't support it: 'copy_file()' doesn't check if hard or symbolic + linking is available. If hardlink fails, falls back to + _copy_file_contents(). + + Under Mac OS, uses the native file copy function in macostools; on + other systems, uses '_copy_file_contents()' to copy file contents. + + Return a tuple (dest_name, copied): 'dest_name' is the actual name of + the output file, and 'copied' is true if the file was copied (or would + have been copied, if 'dry_run' true). + can't copy '%s': doesn't exist or not a regular filenot copying %s (output up-to-date)invalid value '%s' for 'link' argument%s %s -> %sutimeMove a file 'src' to 'dst'. If 'dst' is a directory, the file will + be moved into it with the same name; otherwise, 'src' is just renamed + to 'dst'. Return the new full name of the file. + + Handles cross-device moves on Unix using 'copy_file()'. What about + other systems??? + moving %s -> %scan't move '%s': not a regular filecan't move '%s': destination '%s' already existscan't move '%s': destination '%s' not a valid pathcopy_itcouldn't move '%s' to '%s': %scouldn't move '%s' to '%s' by copy/delete: delete '%s' failed: %s"couldn't move '%s' to '%s' by copy/delete: ""delete '%s' failed: %s"write_fileCreate a file with the specified name and write 'contents' (a + sequence of strings without line terminators) to it. + # for generating verbose output in 'copy_file()'# Stolen from shutil module in the standard library, but with# custom error-handling added.# XXX if the destination file already exists, we clobber it if# copying, but blow up if linking. Hmmm. And I don't know what# macostools.copyfile() does. Should definitely be consistent, and# should probably blow up if destination exists and we would be# changing it (ie. it's not already a hard/soft link to src OR# (not update) and (src newer than dst).# If linking (hard or symbolic), use the appropriate system call# (Unix only, of course, but that's the caller's responsibility)# If hard linking fails, fall back on copying file# (some special filesystems don't support hard linking# even under Unix, see issue #8876).# Otherwise (non-Mac, not linking), copy the file contents and# (optionally) copy the times and mode.# According to David Ascher , utime() should be done# before chmod() (at least under NT).# XXX I suspect this is Unix-specific -- need porting help!b'distutils.file_util + +Utility functions for operating on single files. +'u'distutils.file_util + +Utility functions for operating on single files. +'b'copying'u'copying'b'hard linking'u'hard linking'b'hard'u'hard'b'symbolically linking'u'symbolically linking'b'sym'u'sym'b'Copy the file 'src' to 'dst'; both must be filenames. Any error + opening either file, reading from 'src', or writing to 'dst', raises + DistutilsFileError. Data is read/written in chunks of 'buffer_size' + bytes (default 16k). No attempt is made to handle anything apart from + regular files. + 'u'Copy the file 'src' to 'dst'; both must be filenames. Any error + opening either file, reading from 'src', or writing to 'dst', raises + DistutilsFileError. Data is read/written in chunks of 'buffer_size' + bytes (default 16k). No attempt is made to handle anything apart from + regular files. + 'b'could not open '%s': %s'u'could not open '%s': %s'b'could not delete '%s': %s'u'could not delete '%s': %s'b'could not read from '%s': %s'u'could not read from '%s': %s'b'could not write to '%s': %s'u'could not write to '%s': %s'b'Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is + copied there with the same name; otherwise, it must be a filename. (If + the file exists, it will be ruthlessly clobbered.) If 'preserve_mode' + is true (the default), the file's mode (type and permission bits, or + whatever is analogous on the current platform) is copied. If + 'preserve_times' is true (the default), the last-modified and + last-access times are copied as well. If 'update' is true, 'src' will + only be copied if 'dst' does not exist, or if 'dst' does exist but is + older than 'src'. + + 'link' allows you to make hard links (os.link) or symbolic links + (os.symlink) instead of copying: set it to "hard" or "sym"; if it is + None (the default), files are copied. Don't set 'link' on systems that + don't support it: 'copy_file()' doesn't check if hard or symbolic + linking is available. If hardlink fails, falls back to + _copy_file_contents(). + + Under Mac OS, uses the native file copy function in macostools; on + other systems, uses '_copy_file_contents()' to copy file contents. + + Return a tuple (dest_name, copied): 'dest_name' is the actual name of + the output file, and 'copied' is true if the file was copied (or would + have been copied, if 'dry_run' true). + 'u'Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is + copied there with the same name; otherwise, it must be a filename. (If + the file exists, it will be ruthlessly clobbered.) If 'preserve_mode' + is true (the default), the file's mode (type and permission bits, or + whatever is analogous on the current platform) is copied. If + 'preserve_times' is true (the default), the last-modified and + last-access times are copied as well. If 'update' is true, 'src' will + only be copied if 'dst' does not exist, or if 'dst' does exist but is + older than 'src'. + + 'link' allows you to make hard links (os.link) or symbolic links + (os.symlink) instead of copying: set it to "hard" or "sym"; if it is + None (the default), files are copied. Don't set 'link' on systems that + don't support it: 'copy_file()' doesn't check if hard or symbolic + linking is available. If hardlink fails, falls back to + _copy_file_contents(). + + Under Mac OS, uses the native file copy function in macostools; on + other systems, uses '_copy_file_contents()' to copy file contents. + + Return a tuple (dest_name, copied): 'dest_name' is the actual name of + the output file, and 'copied' is true if the file was copied (or would + have been copied, if 'dry_run' true). + 'b'can't copy '%s': doesn't exist or not a regular file'u'can't copy '%s': doesn't exist or not a regular file'b'not copying %s (output up-to-date)'u'not copying %s (output up-to-date)'b'invalid value '%s' for 'link' argument'u'invalid value '%s' for 'link' argument'b'%s %s -> %s'u'%s %s -> %s'b'Move a file 'src' to 'dst'. If 'dst' is a directory, the file will + be moved into it with the same name; otherwise, 'src' is just renamed + to 'dst'. Return the new full name of the file. + + Handles cross-device moves on Unix using 'copy_file()'. What about + other systems??? + 'u'Move a file 'src' to 'dst'. If 'dst' is a directory, the file will + be moved into it with the same name; otherwise, 'src' is just renamed + to 'dst'. Return the new full name of the file. + + Handles cross-device moves on Unix using 'copy_file()'. What about + other systems??? + 'b'moving %s -> %s'u'moving %s -> %s'b'can't move '%s': not a regular file'u'can't move '%s': not a regular file'b'can't move '%s': destination '%s' already exists'u'can't move '%s': destination '%s' already exists'b'can't move '%s': destination '%s' not a valid path'u'can't move '%s': destination '%s' not a valid path'b'couldn't move '%s' to '%s': %s'u'couldn't move '%s' to '%s': %s'b'couldn't move '%s' to '%s' by copy/delete: delete '%s' failed: %s'u'couldn't move '%s' to '%s' by copy/delete: delete '%s' failed: %s'b'Create a file with the specified name and write 'contents' (a + sequence of strings without line terminators) to it. + 'u'Create a file with the specified name and write 'contents' (a + sequence of strings without line terminators) to it. + 'u'distutils.file_util'u'file_util'distutils.filelist + +Provides the FileList class, used for poking about the filesystem +and building lists of files. +convert_pathFileListA list of files built by on exploring the filesystem and filtered by + applying various patterns to what we find there. + + Instance attributes: + dir + directory from which files will be taken -- only used if + 'allfiles' not supplied to constructor + files + list of filenames currently being built/filtered/manipulated + allfiles + complete list of files under consideration (ie. without any + filtering applied) + allfilesset_allfilesPrint 'msg' to stdout if the global DEBUG (taken from the + DISTUTILS_DEBUG environment variable) flag is true. + sortable_filessort_tupleremove_duplicates_parse_template_linedir_patternincludeexcludeglobal-includeglobal-exclude'%s' expects ...recursive-includerecursive-exclude'%s' expects

...graftprune'%s' expects a single unknown action '%s'process_template_lineinclude include_patternwarning: no files found matching '%s'exclude exclude_patternwarning: no previously-included files found matching '%s'"warning: no previously-included files ""found matching '%s'"global-include warning: no files found matching '%s' anywhere in distribution"warning: no files found matching '%s' ""anywhere in distribution"global-exclude warning: no previously-included files matching '%s' found anywhere in distribution"warning: no previously-included files matching ""'%s' found anywhere in distribution"recursive-include %s %swarning: no files found matching '%s' under directory '%s'"under directory '%s'"recursive-exclude %s %swarning: no previously-included files matching '%s' found under directory '%s'"'%s' found under directory '%s'"graft warning: no directories found matching '%s'prune no previously-included directories found matching '%s'"no previously-included directories found ""matching '%s'"this cannot happen: invalid action '%s'is_regexSelect strings (presumably filenames) from 'self.files' that + match 'pattern', a Unix-style wildcard (glob) pattern. Patterns + are not quite the same as implemented by the 'fnmatch' module: '*' + and '?' match non-special characters, where "special" is platform- + dependent: slash on Unix; colon, slash, and backslash on + DOS/Windows; and colon on Mac OS. + + If 'anchor' is true (the default), then the pattern match is more + stringent: "*.py" will match "foo.py" but not "foo/bar.py". If + 'anchor' is false, both of these will match. + + If 'prefix' is supplied, then only filenames starting with 'prefix' + (itself a pattern) and ending with 'pattern', with anything in between + them, will match. 'anchor' is ignored in this case. + + If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and + 'pattern' is assumed to be either a string containing a regex or a + regex object -- no translation is done, the regex is just compiled + and used as-is. + + Selected strings will be added to self.files. + + Return True if files are found, False otherwise. + files_foundtranslate_patternpattern_reinclude_pattern: applying regex r'%s' adding Remove strings (presumably filenames) from 'files' that match + 'pattern'. Other parameters are the same as for + 'include_pattern()', above. + The list 'self.files' is modified in place. + Return True if files are found, False otherwise. + exclude_pattern: applying regex r'%s' removing _find_all_simple + Find all files under 'path' + followlinks + Find all files under 'dir' and return the list of full filenames. + Unless dir is '.', return full filenames with dir prepended. + relpathmake_relglob_to_reTranslate a shell-like glob pattern to a regular expression; return + a string containing the regex. Differs from 'fnmatch.translate()' in + that '*' does not match "special characters" (which are + platform-specific). + \\\\\1[^%s]escaped((? ...'u''%s' expects ...'b'recursive-include'u'recursive-include'b'recursive-exclude'u'recursive-exclude'b''%s' expects ...'u''%s' expects ...'b'graft'u'graft'b'prune'u'prune'b''%s' expects a single 'u''%s' expects a single 'b'unknown action '%s''u'unknown action '%s''b'include 'u'include 'b'warning: no files found matching '%s''u'warning: no files found matching '%s''b'exclude 'u'exclude 'b'warning: no previously-included files found matching '%s''u'warning: no previously-included files found matching '%s''b'global-include 'u'global-include 'b'warning: no files found matching '%s' anywhere in distribution'u'warning: no files found matching '%s' anywhere in distribution'b'global-exclude 'u'global-exclude 'b'warning: no previously-included files matching '%s' found anywhere in distribution'u'warning: no previously-included files matching '%s' found anywhere in distribution'b'recursive-include %s %s'u'recursive-include %s %s'b'warning: no files found matching '%s' under directory '%s''u'warning: no files found matching '%s' under directory '%s''b'recursive-exclude %s %s'u'recursive-exclude %s %s'b'warning: no previously-included files matching '%s' found under directory '%s''u'warning: no previously-included files matching '%s' found under directory '%s''b'graft 'u'graft 'b'warning: no directories found matching '%s''u'warning: no directories found matching '%s''b'prune 'u'prune 'b'no previously-included directories found matching '%s''u'no previously-included directories found matching '%s''b'this cannot happen: invalid action '%s''u'this cannot happen: invalid action '%s''b'Select strings (presumably filenames) from 'self.files' that + match 'pattern', a Unix-style wildcard (glob) pattern. Patterns + are not quite the same as implemented by the 'fnmatch' module: '*' + and '?' match non-special characters, where "special" is platform- + dependent: slash on Unix; colon, slash, and backslash on + DOS/Windows; and colon on Mac OS. + + If 'anchor' is true (the default), then the pattern match is more + stringent: "*.py" will match "foo.py" but not "foo/bar.py". If + 'anchor' is false, both of these will match. + + If 'prefix' is supplied, then only filenames starting with 'prefix' + (itself a pattern) and ending with 'pattern', with anything in between + them, will match. 'anchor' is ignored in this case. + + If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and + 'pattern' is assumed to be either a string containing a regex or a + regex object -- no translation is done, the regex is just compiled + and used as-is. + + Selected strings will be added to self.files. + + Return True if files are found, False otherwise. + 'u'Select strings (presumably filenames) from 'self.files' that + match 'pattern', a Unix-style wildcard (glob) pattern. Patterns + are not quite the same as implemented by the 'fnmatch' module: '*' + and '?' match non-special characters, where "special" is platform- + dependent: slash on Unix; colon, slash, and backslash on + DOS/Windows; and colon on Mac OS. + + If 'anchor' is true (the default), then the pattern match is more + stringent: "*.py" will match "foo.py" but not "foo/bar.py". If + 'anchor' is false, both of these will match. + + If 'prefix' is supplied, then only filenames starting with 'prefix' + (itself a pattern) and ending with 'pattern', with anything in between + them, will match. 'anchor' is ignored in this case. + + If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and + 'pattern' is assumed to be either a string containing a regex or a + regex object -- no translation is done, the regex is just compiled + and used as-is. + + Selected strings will be added to self.files. + + Return True if files are found, False otherwise. + 'b'include_pattern: applying regex r'%s''u'include_pattern: applying regex r'%s''b' adding 'u' adding 'b'Remove strings (presumably filenames) from 'files' that match + 'pattern'. Other parameters are the same as for + 'include_pattern()', above. + The list 'self.files' is modified in place. + Return True if files are found, False otherwise. + 'u'Remove strings (presumably filenames) from 'files' that match + 'pattern'. Other parameters are the same as for + 'include_pattern()', above. + The list 'self.files' is modified in place. + Return True if files are found, False otherwise. + 'b'exclude_pattern: applying regex r'%s''u'exclude_pattern: applying regex r'%s''b' removing 'u' removing 'b' + Find all files under 'path' + 'u' + Find all files under 'path' + 'b' + Find all files under 'dir' and return the list of full filenames. + Unless dir is '.', return full filenames with dir prepended. + 'u' + Find all files under 'dir' and return the list of full filenames. + Unless dir is '.', return full filenames with dir prepended. + 'b'Translate a shell-like glob pattern to a regular expression; return + a string containing the regex. Differs from 'fnmatch.translate()' in + that '*' does not match "special characters" (which are + platform-specific). + 'u'Translate a shell-like glob pattern to a regular expression; return + a string containing the regex. Differs from 'fnmatch.translate()' in + that '*' does not match "special characters" (which are + platform-specific). + 'b'\\\\'u'\\\\'b'\1[^%s]'u'\1[^%s]'b'((? b, b.foo-> c, etc, + use this to iterate over all objects in the chain. Iteration is + terminated by getattr(x, attr) is None. + + Args: + obj: the starting object + attr: the name of the chaining attribute + + Yields: + Each successive object in the chain. + for_stmt< 'for' any 'in' node=any ':' any* > + | comp_for< 'for' any 'in' node=any any* > + p0 +power< + ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | + 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) ) + trailer< '(' node=any ')' > + any* +> +p1 +power< + ( 'sorted' | 'enumerate' ) + trailer< '(' arglist ')' > + any* +> +p2pats_builtin_special_context Returns true if node is in an environment where all that is required + of it is being iterable (ie, it doesn't matter if it returns a list + or an iterator). + See test_map_nochange in test_fixers.py for some examples and tests. + compile_patternis_probably_builtin + Check that something isn't an attribute or function name etc. + prev_siblingfuncdefclassdefexpr_stmtparameterstypedargslistfind_indentationFind the indentation of *node*.INDENTmake_suitefind_rootFind the top level namespace.file_inputroot found before file_input node was found.does_tree_import Returns true if name is imported from package at the + top level of the tree which node belongs to. + To cover the case of an import like 'import foo', use + None for the package and 'foo' for the name. find_bindingbindingis_importReturns true if the node is an import statement.import_nametouch_import Works like `does_tree_import` but adds an import statement + if it was not imported. is_import_stmtsimple_stmtinsert_posnode2_def_syms Returns the node which binds variable name, otherwise None. + If optional argument package is supplied, only imports will + be returned. + See test cases for examples.for_stmtif_stmtwhile_stmttry_stmtkidCOLON_is_import_binding_block_syms Will reuturn node if node will import name, or node + will import * from package. None is returned otherwise. + See test cases for examples. dotted_as_namesdotted_as_nameasimport_as_nameSTAR# Author: Collin Winter# Local imports############################################################## Common node-construction "macros"# XXX: May not handle dotted imports properly (eg, package_name='foo.bar')#assert package_name == '.' or '.' not in package_name, "FromImport has "\# "not been tested with dotted package names -- use at your own "\# "peril!"# Pull the leaves out of their old tree### Determine whether a node represents a given literal### Misc# Attribute lookup.# Assignment.# The name of an argument.### The following functions are to find bindings in a suite# Scamper up to the top level namespace# figure out where to insert the new import. First try to find# the first import and then skip to the last one.# if there are no imports where we can insert, find the docstring.# if that also fails, we stick to the beginning of the file# i+3 is the colon, i+4 is the suite# str(...) is used to make life easier here, because# from a.b import parses to ['import', ['a', '.', 'b'], ...]# See test_from_import_as for explanationb'Utility functions, node construction macros, etc.'u'Utility functions, node construction macros, etc.'b'Build an assignment statement'u'Build an assignment statement'b'Return a NAME leaf'u'Return a NAME leaf'b'A node tuple for obj.attr'u'A node tuple for obj.attr'b'A comma leaf'u'A comma leaf'b'A period (.) leaf'u'A period (.) leaf'b'A parenthesised argument list, used by Call()'u'A parenthesised argument list, used by Call()'b'A function call'u'A function call'b'A newline literal'u'A newline literal'b'A blank line'u'A blank line'b'A numeric or string subscript'u'A numeric or string subscript'b'A string leaf'u'A string leaf'b'A list comprehension of the form [xp for fp in it if test]. + + If test is None, the "if test" part is omitted. + 'u'A list comprehension of the form [xp for fp in it if test]. + + If test is None, the "if test" part is omitted. + 'b' Return an import statement in the form: + from package import name_leafs'u' Return an import statement in the form: + from package import name_leafs'b'import'u'import'b'Returns an import statement and calls a method + of the module: + + import module + module.name()'u'Returns an import statement and calls a method + of the module: + + import module + module.name()'b'obj'u'obj'b'lpar'u'lpar'b'rpar'u'rpar'b'Does the node represent a tuple literal?'u'Does the node represent a tuple literal?'b'Does the node represent a list literal?'u'Does the node represent a list literal?'b'sorted'u'sorted'b'list'u'list'b'tuple'u'tuple'b'min'u'min'b'max'u'max'b'enumerate'u'enumerate'b'Follow an attribute chain. + + If you have a chain of objects where a.foo -> b, b.foo-> c, etc, + use this to iterate over all objects in the chain. Iteration is + terminated by getattr(x, attr) is None. + + Args: + obj: the starting object + attr: the name of the chaining attribute + + Yields: + Each successive object in the chain. + 'u'Follow an attribute chain. + + If you have a chain of objects where a.foo -> b, b.foo-> c, etc, + use this to iterate over all objects in the chain. Iteration is + terminated by getattr(x, attr) is None. + + Args: + obj: the starting object + attr: the name of the chaining attribute + + Yields: + Each successive object in the chain. + 'b'for_stmt< 'for' any 'in' node=any ':' any* > + | comp_for< 'for' any 'in' node=any any* > + 'u'for_stmt< 'for' any 'in' node=any ':' any* > + | comp_for< 'for' any 'in' node=any any* > + 'b' +power< + ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | + 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) ) + trailer< '(' node=any ')' > + any* +> +'u' +power< + ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | + 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) ) + trailer< '(' node=any ')' > + any* +> +'b' +power< + ( 'sorted' | 'enumerate' ) + trailer< '(' arglist ')' > + any* +> +'u' +power< + ( 'sorted' | 'enumerate' ) + trailer< '(' arglist ')' > + any* +> +'b' Returns true if node is in an environment where all that is required + of it is being iterable (ie, it doesn't matter if it returns a list + or an iterator). + See test_map_nochange in test_fixers.py for some examples and tests. + 'u' Returns true if node is in an environment where all that is required + of it is being iterable (ie, it doesn't matter if it returns a list + or an iterator). + See test_map_nochange in test_fixers.py for some examples and tests. + 'b'node'u'node'b' + Check that something isn't an attribute or function name etc. + 'u' + Check that something isn't an attribute or function name etc. + 'b'Find the indentation of *node*.'u'Find the indentation of *node*.'b'Find the top level namespace.'u'Find the top level namespace.'b'root found before file_input node was found.'u'root found before file_input node was found.'b' Returns true if name is imported from package at the + top level of the tree which node belongs to. + To cover the case of an import like 'import foo', use + None for the package and 'foo' for the name. 'u' Returns true if name is imported from package at the + top level of the tree which node belongs to. + To cover the case of an import like 'import foo', use + None for the package and 'foo' for the name. 'b'Returns true if the node is an import statement.'u'Returns true if the node is an import statement.'b' Works like `does_tree_import` but adds an import statement + if it was not imported. 'u' Works like `does_tree_import` but adds an import statement + if it was not imported. 'b' Returns the node which binds variable name, otherwise None. + If optional argument package is supplied, only imports will + be returned. + See test cases for examples.'u' Returns the node which binds variable name, otherwise None. + If optional argument package is supplied, only imports will + be returned. + See test cases for examples.'b' Will reuturn node if node will import name, or node + will import * from package. None is returned otherwise. + See test cases for examples. 'u' Will reuturn node if node will import name, or node + will import * from package. None is returned otherwise. + See test cases for examples. 'b'as'u'as'u'lib2to3.fixer_util'u'fixer_util'Filename matching with shell patterns. + +fnmatch(FILENAME, PATTERN) matches according to the local convention. +fnmatchcase(FILENAME, PATTERN) always takes case in account. + +The functions operate by translating the pattern into a regular +expression. They cache the compiled regular expressions for speed. + +The function translate(PATTERN) returns a regular expression +corresponding to PATTERN. (It does not compile it.) +posixpathfnmatchcaseTest whether FILENAME matches PATTERN. + + Patterns are Unix shell style: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + An initial period in FILENAME is not special. + Both FILENAME and PATTERN are first case-normalized + if the operating system requires it. + If you don't want this, use fnmatchcase(FILENAME, PATTERN). + lru_cachetyped_compile_patternISO-8859-1pat_strres_strConstruct a list from those elements of the iterable NAMES that match PAT.Test whether FILENAME matches PATTERN, including case. + + This is a version of fnmatch() which doesn't case-normalize + its arguments. + Translate a shell PATTERN to a regular expression. + + There is no way to quote meta-characters. + .*\[\-([&~|])%s[%s](?s:%s)\Z# normcase on posix is NOP. Optimize it away from the loop.# Escape backslashes and hyphens for set difference (--).# Hyphens that create ranges shouldn't be escaped.# Escape set operations (&&, ~~ and ||).b'Filename matching with shell patterns. + +fnmatch(FILENAME, PATTERN) matches according to the local convention. +fnmatchcase(FILENAME, PATTERN) always takes case in account. + +The functions operate by translating the pattern into a regular +expression. They cache the compiled regular expressions for speed. + +The function translate(PATTERN) returns a regular expression +corresponding to PATTERN. (It does not compile it.) +'u'Filename matching with shell patterns. + +fnmatch(FILENAME, PATTERN) matches according to the local convention. +fnmatchcase(FILENAME, PATTERN) always takes case in account. + +The functions operate by translating the pattern into a regular +expression. They cache the compiled regular expressions for speed. + +The function translate(PATTERN) returns a regular expression +corresponding to PATTERN. (It does not compile it.) +'b'fnmatch'u'fnmatch'b'fnmatchcase'u'fnmatchcase'b'translate'u'translate'b'Test whether FILENAME matches PATTERN. + + Patterns are Unix shell style: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + An initial period in FILENAME is not special. + Both FILENAME and PATTERN are first case-normalized + if the operating system requires it. + If you don't want this, use fnmatchcase(FILENAME, PATTERN). + 'u'Test whether FILENAME matches PATTERN. + + Patterns are Unix shell style: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + An initial period in FILENAME is not special. + Both FILENAME and PATTERN are first case-normalized + if the operating system requires it. + If you don't want this, use fnmatchcase(FILENAME, PATTERN). + 'b'ISO-8859-1'u'ISO-8859-1'b'Construct a list from those elements of the iterable NAMES that match PAT.'u'Construct a list from those elements of the iterable NAMES that match PAT.'b'Test whether FILENAME matches PATTERN, including case. + + This is a version of fnmatch() which doesn't case-normalize + its arguments. + 'u'Test whether FILENAME matches PATTERN, including case. + + This is a version of fnmatch() which doesn't case-normalize + its arguments. + 'b'Translate a shell PATTERN to a regular expression. + + There is no way to quote meta-characters. + 'u'Translate a shell PATTERN to a regular expression. + + There is no way to quote meta-characters. + 'b'.*'u'.*'b'\['u'\['b'\-'u'\-'b'([&~|])'u'([&~|])'b'%s[%s]'u'%s[%s]'b'(?s:%s)\Z'u'(?s:%s)\Z'resource_trackerensure_runningget_inherited_fdsconnect_to_new_processMAXFDS_TO_SENDSIGNED_STRUCTForkServer_forkserver_address_forkserver_alive_fd_forkserver_pid_inherited_fds_preload_modules_stop_stop_unlockedmodules_namesSet list of module names to try to load in forkserver process.module_names must be a list of stringsReturn list of fds inherited from parent process. + + This returns None if the current process was not started by fork + server. + fdsRequest forkserver to create a child process. + + Returns a pair of fds (status_r, data_w). The calling process can read + the child process's pid and (eventually) its returncode from status_r. + The calling process should write to data_w the pickled preparation and + process data. + too many fdsparent_rchild_wchild_rparent_wgetfdallfdssendfdsMake sure that a fork server is running. + + This can be called from any process. Note that usually a child + process will just reuse the forkserver started by its parent, so + ensure_running() will do nothing. + from multiprocessing.forkserver import main; main(%d, %d, %r, **%r)main_pathsys_pathdesired_keysget_preparation_data3840o600alive_ralive_wfds_to_passget_executableexespawnv_passfdslistener_fdpreloadRun forkserver._inheritingimport_main_path_close_stdinsig_rsig_wset_blockingsigchld_handler_unusedpid_to_fdDefaultSelector_forkserverrfdsNot at EOF?stsWIFSIGNALEDWTERMSIGWIFEXITEDChild {0:n} status is {1:n}WEXITSTATUSwrite_signedforkserver: waitpid returned unexpected pid %d'forkserver: waitpid returned ''unexpected pid %d'recvfdsToo many ({0:n}) fds to sendunused_fds_serve_one_resource_tracker_fdparent_sentinel_mainread_signedunexpected EOFshould not get here# large enough for pid_t# Forkserver class# Method used by unit tests to stop the server# close the "alive" file descriptor asks the server to stop# forkserver was launched before, is it still running?# still alive# dead, launch it again# all client processes own the write end of the "alive" pipe;# when they all terminate the read end becomes ready.# Dummy signal handler, doesn't do anything# unblocking SIGCHLD allows the wakeup fd to notify our event loop# protect the process from ^C# calling os.write() in the Python signal handler is racy# map child pids to client fds# EOF because no more client processes left# Got SIGCHLD# exhaust# Scan for child processes# Send exit code to client process# client vanished# This shouldn't happen really# Incoming fork request# Receive fds from client# Child# Send pid to client process# close unnecessary stuff and reset signal handlers# Run process object received over pipe# Read and write signed numbersb'ensure_running'u'ensure_running'b'get_inherited_fds'u'get_inherited_fds'b'connect_to_new_process'u'connect_to_new_process'b'set_forkserver_preload'u'set_forkserver_preload'b'Set list of module names to try to load in forkserver process.'u'Set list of module names to try to load in forkserver process.'b'module_names must be a list of strings'u'module_names must be a list of strings'b'Return list of fds inherited from parent process. + + This returns None if the current process was not started by fork + server. + 'u'Return list of fds inherited from parent process. + + This returns None if the current process was not started by fork + server. + 'b'Request forkserver to create a child process. + + Returns a pair of fds (status_r, data_w). The calling process can read + the child process's pid and (eventually) its returncode from status_r. + The calling process should write to data_w the pickled preparation and + process data. + 'u'Request forkserver to create a child process. + + Returns a pair of fds (status_r, data_w). The calling process can read + the child process's pid and (eventually) its returncode from status_r. + The calling process should write to data_w the pickled preparation and + process data. + 'b'too many fds'u'too many fds'b'Make sure that a fork server is running. + + This can be called from any process. Note that usually a child + process will just reuse the forkserver started by its parent, so + ensure_running() will do nothing. + 'u'Make sure that a fork server is running. + + This can be called from any process. Note that usually a child + process will just reuse the forkserver started by its parent, so + ensure_running() will do nothing. + 'b'from multiprocessing.forkserver import main; 'u'from multiprocessing.forkserver import main; 'b'main(%d, %d, %r, **%r)'u'main(%d, %d, %r, **%r)'b'main_path'u'main_path'b'sys_path'u'sys_path'b'Run forkserver.'u'Run forkserver.'b'Not at EOF?'u'Not at EOF?'b'Child {0:n} status is {1:n}'u'Child {0:n} status is {1:n}'b'forkserver: waitpid returned unexpected pid %d'u'forkserver: waitpid returned unexpected pid %d'b'Too many ({0:n}) fds to send'u'Too many ({0:n}) fds to send'b'unexpected EOF'u'unexpected EOF'b'should not get here'u'should not get here'u'multiprocessing.forkserver'partialmethodfunc_repr at _format_args_and_kwargsFormat function arguments and keyword arguments. + + Special case for a single parameter: ('hello',) is formatted as ('hello'). + Replacement for traceback.extract_stack() that only does the + necessary work for asyncio debug mode. + StackSummaryextractwalk_stacklookup_lines# use reprlib to limit the length of the output# Limit the amount of work to a reasonable amount, as extract_stack()# can be called for each coroutine and future in debug mode.b' at 'u' at 'b'Format function arguments and keyword arguments. + + Special case for a single parameter: ('hello',) is formatted as ('hello'). + 'u'Format function arguments and keyword arguments. + + Special case for a single parameter: ('hello',) is formatted as ('hello'). + 'b'Replacement for traceback.extract_stack() that only does the + necessary work for asyncio debug mode. + 'u'Replacement for traceback.extract_stack() that only does the + necessary work for asyncio debug mode. + 'u'asyncio.format_helpers'u'format_helpers' +Generic framework path manipulation +(?x) +(?P^.*)(?:^|/) +(?P + (?P\w+).framework/ + (?:Versions/(?P[^/]+)/)? + (?P=shortname) + (?:_(?P[^_]+))? +)$ +STRICT_FRAMEWORK_RE + A framework name can take one of the following four forms: + Location/Name.framework/Versions/SomeVersion/Name_Suffix + Location/Name.framework/Versions/SomeVersion/Name + Location/Name.framework/Name_Suffix + Location/Name.framework/Name + + returns None if not found, or a mapping equivalent to: + dict( + location='Location', + name='Name.framework/Versions/SomeVersion/Name_Suffix', + shortname='Name', + version='SomeVersion', + suffix='Suffix', + ) + + Note that SomeVersion and Suffix are optional and may be None + if not present + is_frameworktest_framework_infocompletely/invalid/_debugP/F.frameworkP/F.framework/_debugP/F.framework/FF.framework/FP/F.framework/F_debugF.framework/F_debugP/F.framework/VersionsP/F.framework/Versions/AP/F.framework/Versions/A/FF.framework/Versions/A/FP/F.framework/Versions/A/F_debugF.framework/Versions/A/F_debugb' +Generic framework path manipulation +'u' +Generic framework path manipulation +'b'(?x) +(?P^.*)(?:^|/) +(?P + (?P\w+).framework/ + (?:Versions/(?P[^/]+)/)? + (?P=shortname) + (?:_(?P[^_]+))? +)$ +'u'(?x) +(?P^.*)(?:^|/) +(?P + (?P\w+).framework/ + (?:Versions/(?P[^/]+)/)? + (?P=shortname) + (?:_(?P[^_]+))? +)$ +'b' + A framework name can take one of the following four forms: + Location/Name.framework/Versions/SomeVersion/Name_Suffix + Location/Name.framework/Versions/SomeVersion/Name + Location/Name.framework/Name_Suffix + Location/Name.framework/Name + + returns None if not found, or a mapping equivalent to: + dict( + location='Location', + name='Name.framework/Versions/SomeVersion/Name_Suffix', + shortname='Name', + version='SomeVersion', + suffix='Suffix', + ) + + Note that SomeVersion and Suffix are optional and may be None + if not present + 'u' + A framework name can take one of the following four forms: + Location/Name.framework/Versions/SomeVersion/Name_Suffix + Location/Name.framework/Versions/SomeVersion/Name + Location/Name.framework/Name_Suffix + Location/Name.framework/Name + + returns None if not found, or a mapping equivalent to: + dict( + location='Location', + name='Name.framework/Versions/SomeVersion/Name_Suffix', + shortname='Name', + version='SomeVersion', + suffix='Suffix', + ) + + Note that SomeVersion and Suffix are optional and may be None + if not present + 'b'completely/invalid/_debug'u'completely/invalid/_debug'b'P/F.framework'u'P/F.framework'b'P/F.framework/_debug'u'P/F.framework/_debug'b'P/F.framework/F'u'P/F.framework/F'b'F.framework/F'u'F.framework/F'b'P/F.framework/F_debug'u'P/F.framework/F_debug'b'F.framework/F_debug'u'F.framework/F_debug'b'P/F.framework/Versions'u'P/F.framework/Versions'b'P/F.framework/Versions/A'u'P/F.framework/Versions/A'b'P/F.framework/Versions/A/F'u'P/F.framework/Versions/A/F'b'F.framework/Versions/A/F'u'F.framework/Versions/A/F'b'P/F.framework/Versions/A/F_debug'u'P/F.framework/Versions/A/F_debug'b'F.framework/Versions/A/F_debug'u'F.framework/Versions/A/F_debug'u'ctypes.macholib.framework'u'macholib.framework'u'framework'An FTP client class and some helper functions. + +Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds + +Example: + +>>> from ftplib import FTP +>>> ftp = FTP('ftp.python.org') # connect to host, default port +>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@ +'230 Guest login ok, access restrictions apply.' +>>> ftp.retrlines('LIST') # list directory contents +total 9 +drwxr-xr-x 8 root wheel 1024 Jan 3 1994 . +drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .. +drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin +drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc +d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming +drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib +drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub +drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr +-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg +'226 Transfer complete.' +>>> ftp.quit() +'221 Goodbye.' +>>> + +A nice test that reveals some of the network dialogue would be: +python ftplib.py -d localhost -l -p -l +FTPerror_replyerror_temperror_permerror_protoall_errorsFTP_PORTMAXLINEB_CRLFAn FTP client class. + + To create a connection, call the class using these arguments: + host, user, passwd, acct, timeout + + The first four arguments are all strings, and have default value ''. + timeout must be numeric and defaults to None if not passed, + meaning that no timeout will be set on any ftp socket(s) + If a timeout is passed, then this is now the default timeout for all ftp + socket operations for this instance. + + Then use self.connect() with optional host and port argument. + + To download a file, use ftp.retrlines('RETR ' + filename), + or ftp.retrbinary() with slightly different arguments. + To upload a file, use ftp.storlines() or ftp.storbinary(), + which have an open file as argument (see their definitions + below for details). + The download/upload functions first issue appropriate TYPE + and PORT or PASV commands. + debuggingmaxlinewelcomepassiveservertrust_server_pasv_ipv4_addressuserpasswdacctloginConnect to host. Arguments are: + - host: hostname to connect to (string, default previous host) + - port: port to connect to (integer, default previous port) + - timeout: the timeout to set against the ftp socket(s) + - source_address: a 2-tuple (host, port) for the socket to bind + to as its source address before connecting. + ftplib.connectgetrespgetwelcomeGet the welcome message from the server. + (this is read and squirreled away by connect())*welcome*Set the debugging level. + The required argument level means: + 0: no debugging output (default) + 1: print commands and responses but not body text etc. + 2: also print raw lines read and sent before stripping CR/LFset_pasvUse passive or active mode for data transfers. + With a false argument, use the normal PORT mode, + With a true argument, use the PASV command.pass PASS putlinean illegal newline character should not be containedftplib.sendcmd*put*putcmd*cmd*got more than %d bytes*get*getmultilinenextline*resp*lastrespvoidrespExpect a response beginning with '2'.abortAbort a file transfer. Uses out-of-band data. + This does not follow the procedure from the RFC to send Telnet + IP and Synch; that doesn't seem to work with the servers I've + tried. Instead, just send the ABOR command as OOB data.ABOR*put urgent*sendcmdSend a command and return the response.voidcmdSend a command and expect a response beginning with '2'.sendportSend a PORT command with the current host and the given + port number. + hbytespbytesPORT sendeprtSend an EPRT command with the current host and the given port number.unsupported address familyEPRT makeportCreate a new socket and send a PORT command for it.makepasvInternal: Does the PASV or EPSV handshake -> (address, port)parse227PASVuntrusted_hostparse229EPSVntransfercmdInitiate a transfer over the data connection. + + If the transfer is active, send a port command and the + transfer command, and accept the connection. If the server is + passive, send a pasv command, connect to it, and start the + transfer command. Either way, return the socket for the + connection and the expected size of the transfer. The + expected size may be None if it could not be determined. + + Optional `rest' argument can be a string that is sent as the + argument to a REST command. This is essentially a server + marker used to tell the server to skip over any data up to the + given marker. + REST %sparse150transfercmdLike ntransfercmd() but returns only the socket.Login, default anonymous.anonymousanonymous@USER ACCT retrbinaryRetrieve data in binary mode. A new port is created for you. + + Args: + cmd: A RETR command. + callback: A single parameter callable to be called on each + block of data read. + blocksize: The maximum number of bytes to read from the + socket at one time. [default: 8192] + rest: Passed to transfercmd(). [default: None] + + Returns: + The response code. + TYPE IretrlinesRetrieve data in line mode. A new port is created for you. + + Args: + cmd: A RETR, LIST, or NLST command. + callback: An optional single parameter callable that is called + for each line with the trailing CRLF stripped. + [default: print_line()] + + Returns: + The response code. + print_lineTYPE A*retr*storbinaryStore a file in binary mode. A new port is created for you. + + Args: + cmd: A STOR command. + fp: A file-like object with a read(num_bytes) method. + blocksize: The maximum data size to read from fp and send over + the connection at once. [default: 8192] + callback: An optional single parameter callable that is called on + each block of data after it is sent. [default: None] + rest: Passed to transfercmd(). [default: None] + + Returns: + The response code. + storlinesStore a file in line mode. A new port is created for you. + + Args: + cmd: A STOR command. + fp: A file-like object with a readline() method. + callback: An optional single parameter callable that is called on + each line after it is sent. [default: None] + + Returns: + The response code. + passwordSend new account name.nlstReturn a list of files in a given directory (default the current).NLSTList a directory in long form. + By default list current directory to stdout. + Optional last argument is callback function; all + non-empty arguments before it are concatenated to the + LIST command. (This *should* only be used for a pathname.)LISTmlsdfactsList a directory in a standardized format by using MLSD + command (RFC-3659). If path is omitted the current directory + is assumed. "facts" is a list of strings representing the type + of information desired (e.g. ["type", "size", "perm"]). + + Return a generator object yielding a tuple of two elements + for every file found in path. + First element is the file name, the second one is a dictionary + including a variable number of "facts" depending on the server + and whether "facts" argument has been provided. + OPTS MLST MLSD %sMLSDfacts_foundfactfromnametonameRename a file.RNFR RNTO Delete a file.DELE cwdChange to a directory.CDUPCWD Retrieve the size of a file.SIZE mkdMake a directory, return its full pathname.MKD parse257rmdRemove a directory.RMD pwdReturn current working directory.PWDQuit, and close the connection.Close the connection without assuming anything about it.SSLSocketFTP_TLSA FTP subclass which adds TLS support to FTP as described + in RFC-4217. + + Connect as usual to port 21 implicitly securing the FTP control + connection before authenticating. + + Securing the data connection requires user to explicitly ask + for it by calling prot_p() method. + + Usage example: + >>> from ftplib import FTP_TLS + >>> ftps = FTP_TLS('ftp.python.org') + >>> ftps.login() # login anonymously previously securing control channel + '230 Guest login ok, access restrictions apply.' + >>> ftps.prot_p() # switch to secure data connection + '200 Protection level set to P' + >>> ftps.retrlines('LIST') # list directory content securely + total 9 + drwxr-xr-x 8 root wheel 1024 Jan 3 1994 . + drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .. + drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin + drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc + d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming + drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib + drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub + drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr + -rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg + '226 Transfer complete.' + >>> ftps.quit() + '221 Goodbye.' + >>> + ssl_versionkeyfilecertfilecontext and keyfile arguments are mutually exclusive"context and keyfile arguments are mutually ""exclusive"context and certfile arguments are mutually exclusive"context and certfile arguments are mutually "keyfile and certfile are deprecated, use a custom context instead"keyfile and certfile are deprecated, use a ""custom context instead"_create_stdlib_context_prot_psecureSet up secure control connection by using TLS/SSL.Already using TLSAUTH TLSAUTH SSLcccSwitch back to a clear-text control connection.not using TLSCCCprot_pSet up secure data connection.PBSZ 0PROT Pprot_cSet up clear text data connection.PROT C_150_reParse the '150' response for a RETR request. + Returns the expected transfer size or None; size is not guaranteed to + be present in the 150 message. + 150 .* \((\d+) bytes\)_227_reParse the '227' response for a PASV request. + Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)' + Return ('host.addr.as.numbers', port#) tuple.(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)Parse the '229' response for an EPSV request. + Raises error_proto if it does not contain '(|||port|)' + Return ('host.addr.as.numbers', port#) tuple.Parse the '257' response for a MKD or PWD request. + This is a response to a MKD or PWD request: a directory name. + Returns the directoryname in the 257 reply. "Default retrlines callback to print a line.ftpcpsourcenametargetnameCopy file from one FTP-instance to another.TYPE sourcehostsourceportSTOR treply125RETR sreplyTest program. + Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ... + + -d dir + -l list + -p password + netrcrcfile-rftpuseridnetrcobjauthenticatorsNo account -- using anonymous login.Could not open account file -- using anonymous login."Could not open account file"" -- using anonymous login."CWD-p# Changes and improvements suggested by Steve Majewski.# Modified by Jack to work on the mac.# Modified by Siebren to support docstrings and PASV.# Modified by Phil Schwartz to add storbinary and storlines callbacks.# Modified by Giampaolo Rodola' to add TLS support.# Magic number from # Process data out of band# The standard FTP server control port# The sizehint parameter passed to readline() calls# Exception raised when an error or invalid response is received# unexpected [123]xx reply# 4xx errors# 5xx errors# response does not begin with [1-5]# All exceptions (hopefully) that may be raised here and that aren't# (always) programming errors on our side# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)# The class itself# Disables https://bugs.python.org/issue43285 security if set to True.# Initialization method (called by class instantiation).# Initialize host to localhost, port to standard ftp port# Optional arguments are host (for connect()),# and user, passwd, acct (for login())# Context management protocol: try to quit() if active# Internal: "sanitize" a string for printing# Internal: send one line to the server, appending CRLF# Internal: send one command to the server (through putline())# Internal: return one line from the server, stripping CRLF.# Raise EOFError if the connection is closed# Internal: get a response from the server, which may possibly# consist of multiple lines. Return a single string with no# trailing CRLF. If the response consists of multiple lines,# these are separated by '\n' characters in the string# Internal: get a response from the server.# Raise various errors if the response indicates an error# Get proper port# Get proper host# Some servers apparently send a 200 reply to# a LIST or STOR command, before the 150 reply# (and way before the 226 reply). This seems to# be in violation of the protocol (which only allows# 1xx or error messages for LIST), so we just discard# this response.# See above.# this is conditional in case we received a 125# If there is no anonymous ftp password specified# then we'll just use anonymous@# We don't send any other thing because:# - We want to remain anonymous# - We want to stop SPAM# - We don't want to let ftp sites to discriminate by the user,# host or country.# shutdown ssl layer# does nothing, but could return error# The SIZE command is defined in RFC-3659# fix around non-compliant implementations such as IIS shipped# with Windows server 2003# PROT defines whether or not the data channel is to be protected.# Though RFC-2228 defines four possible protection levels,# RFC-4217 only recommends two, Clear and Private.# Clear (PROT C) means that no security is to be used on the# data-channel, Private (PROT P) means that the data-channel# should be protected by TLS.# PBSZ command MUST still be issued, but must have a parameter of# '0' to indicate that no buffering is taking place and the data# connection should not be encapsulated.# --- Overridden FTP methods# overridden as we can't pass MSG_OOB flag to sendall()# should contain '(|||port|)'# Not compliant to RFC 959, but UNIX ftpd does this# RFC 959: the user must "listen" [...] BEFORE sending the# transfer request.# So: STOR before RETR, because here the target is a "user".# RFC 959# get name of alternate ~/.netrc file:# no account for hostb'An FTP client class and some helper functions. + +Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds + +Example: + +>>> from ftplib import FTP +>>> ftp = FTP('ftp.python.org') # connect to host, default port +>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@ +'230 Guest login ok, access restrictions apply.' +>>> ftp.retrlines('LIST') # list directory contents +total 9 +drwxr-xr-x 8 root wheel 1024 Jan 3 1994 . +drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .. +drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin +drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc +d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming +drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib +drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub +drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr +-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg +'226 Transfer complete.' +>>> ftp.quit() +'221 Goodbye.' +>>> + +A nice test that reveals some of the network dialogue would be: +python ftplib.py -d localhost -l -p -l +'u'An FTP client class and some helper functions. + +Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds + +Example: + +>>> from ftplib import FTP +>>> ftp = FTP('ftp.python.org') # connect to host, default port +>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@ +'230 Guest login ok, access restrictions apply.' +>>> ftp.retrlines('LIST') # list directory contents +total 9 +drwxr-xr-x 8 root wheel 1024 Jan 3 1994 . +drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .. +drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin +drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc +d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming +drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib +drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub +drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr +-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg +'226 Transfer complete.' +>>> ftp.quit() +'221 Goodbye.' +>>> + +A nice test that reveals some of the network dialogue would be: +python ftplib.py -d localhost -l -p -l +'b'FTP'u'FTP'b'error_reply'u'error_reply'b'error_temp'u'error_temp'b'error_perm'u'error_perm'b'error_proto'u'error_proto'b'all_errors'u'all_errors'b'An FTP client class. + + To create a connection, call the class using these arguments: + host, user, passwd, acct, timeout + + The first four arguments are all strings, and have default value ''. + timeout must be numeric and defaults to None if not passed, + meaning that no timeout will be set on any ftp socket(s) + If a timeout is passed, then this is now the default timeout for all ftp + socket operations for this instance. + + Then use self.connect() with optional host and port argument. + + To download a file, use ftp.retrlines('RETR ' + filename), + or ftp.retrbinary() with slightly different arguments. + To upload a file, use ftp.storlines() or ftp.storbinary(), + which have an open file as argument (see their definitions + below for details). + The download/upload functions first issue appropriate TYPE + and PORT or PASV commands. + 'u'An FTP client class. + + To create a connection, call the class using these arguments: + host, user, passwd, acct, timeout + + The first four arguments are all strings, and have default value ''. + timeout must be numeric and defaults to None if not passed, + meaning that no timeout will be set on any ftp socket(s) + If a timeout is passed, then this is now the default timeout for all ftp + socket operations for this instance. + + Then use self.connect() with optional host and port argument. + + To download a file, use ftp.retrlines('RETR ' + filename), + or ftp.retrbinary() with slightly different arguments. + To upload a file, use ftp.storlines() or ftp.storbinary(), + which have an open file as argument (see their definitions + below for details). + The download/upload functions first issue appropriate TYPE + and PORT or PASV commands. + 'b'Connect to host. Arguments are: + - host: hostname to connect to (string, default previous host) + - port: port to connect to (integer, default previous port) + - timeout: the timeout to set against the ftp socket(s) + - source_address: a 2-tuple (host, port) for the socket to bind + to as its source address before connecting. + 'u'Connect to host. Arguments are: + - host: hostname to connect to (string, default previous host) + - port: port to connect to (integer, default previous port) + - timeout: the timeout to set against the ftp socket(s) + - source_address: a 2-tuple (host, port) for the socket to bind + to as its source address before connecting. + 'b'ftplib.connect'u'ftplib.connect'b'Get the welcome message from the server. + (this is read and squirreled away by connect())'u'Get the welcome message from the server. + (this is read and squirreled away by connect())'b'*welcome*'u'*welcome*'b'Set the debugging level. + The required argument level means: + 0: no debugging output (default) + 1: print commands and responses but not body text etc. + 2: also print raw lines read and sent before stripping CR/LF'u'Set the debugging level. + The required argument level means: + 0: no debugging output (default) + 1: print commands and responses but not body text etc. + 2: also print raw lines read and sent before stripping CR/LF'b'Use passive or active mode for data transfers. + With a false argument, use the normal PORT mode, + With a true argument, use the PASV command.'u'Use passive or active mode for data transfers. + With a false argument, use the normal PORT mode, + With a true argument, use the PASV command.'b'pass 'u'pass 'b'PASS 'u'PASS 'b'an illegal newline character should not be contained'u'an illegal newline character should not be contained'b'ftplib.sendcmd'u'ftplib.sendcmd'b'*put*'u'*put*'b'*cmd*'u'*cmd*'b'got more than %d bytes'u'got more than %d bytes'b'*get*'u'*get*'b'*resp*'u'*resp*'b'Expect a response beginning with '2'.'u'Expect a response beginning with '2'.'b'Abort a file transfer. Uses out-of-band data. + This does not follow the procedure from the RFC to send Telnet + IP and Synch; that doesn't seem to work with the servers I've + tried. Instead, just send the ABOR command as OOB data.'u'Abort a file transfer. Uses out-of-band data. + This does not follow the procedure from the RFC to send Telnet + IP and Synch; that doesn't seem to work with the servers I've + tried. Instead, just send the ABOR command as OOB data.'b'ABOR'b'*put urgent*'u'*put urgent*'b'426'u'426'b'225'u'225'b'226'u'226'b'Send a command and return the response.'u'Send a command and return the response.'b'Send a command and expect a response beginning with '2'.'u'Send a command and expect a response beginning with '2'.'b'Send a PORT command with the current host and the given + port number. + 'u'Send a PORT command with the current host and the given + port number. + 'b'PORT 'u'PORT 'b'Send an EPRT command with the current host and the given port number.'u'Send an EPRT command with the current host and the given port number.'b'unsupported address family'u'unsupported address family'b'EPRT 'u'EPRT 'b'Create a new socket and send a PORT command for it.'u'Create a new socket and send a PORT command for it.'b'Internal: Does the PASV or EPSV handshake -> (address, port)'u'Internal: Does the PASV or EPSV handshake -> (address, port)'b'PASV'u'PASV'b'EPSV'u'EPSV'b'Initiate a transfer over the data connection. + + If the transfer is active, send a port command and the + transfer command, and accept the connection. If the server is + passive, send a pasv command, connect to it, and start the + transfer command. Either way, return the socket for the + connection and the expected size of the transfer. The + expected size may be None if it could not be determined. + + Optional `rest' argument can be a string that is sent as the + argument to a REST command. This is essentially a server + marker used to tell the server to skip over any data up to the + given marker. + 'u'Initiate a transfer over the data connection. + + If the transfer is active, send a port command and the + transfer command, and accept the connection. If the server is + passive, send a pasv command, connect to it, and start the + transfer command. Either way, return the socket for the + connection and the expected size of the transfer. The + expected size may be None if it could not be determined. + + Optional `rest' argument can be a string that is sent as the + argument to a REST command. This is essentially a server + marker used to tell the server to skip over any data up to the + given marker. + 'b'REST %s'u'REST %s'b'150'u'150'b'Like ntransfercmd() but returns only the socket.'u'Like ntransfercmd() but returns only the socket.'b'Login, default anonymous.'u'Login, default anonymous.'b'anonymous'u'anonymous'b'anonymous@'u'anonymous@'b'USER 'u'USER 'b'ACCT 'u'ACCT 'b'Retrieve data in binary mode. A new port is created for you. + + Args: + cmd: A RETR command. + callback: A single parameter callable to be called on each + block of data read. + blocksize: The maximum number of bytes to read from the + socket at one time. [default: 8192] + rest: Passed to transfercmd(). [default: None] + + Returns: + The response code. + 'u'Retrieve data in binary mode. A new port is created for you. + + Args: + cmd: A RETR command. + callback: A single parameter callable to be called on each + block of data read. + blocksize: The maximum number of bytes to read from the + socket at one time. [default: 8192] + rest: Passed to transfercmd(). [default: None] + + Returns: + The response code. + 'b'TYPE I'u'TYPE I'b'Retrieve data in line mode. A new port is created for you. + + Args: + cmd: A RETR, LIST, or NLST command. + callback: An optional single parameter callable that is called + for each line with the trailing CRLF stripped. + [default: print_line()] + + Returns: + The response code. + 'u'Retrieve data in line mode. A new port is created for you. + + Args: + cmd: A RETR, LIST, or NLST command. + callback: An optional single parameter callable that is called + for each line with the trailing CRLF stripped. + [default: print_line()] + + Returns: + The response code. + 'b'TYPE A'u'TYPE A'b'*retr*'u'*retr*'b'Store a file in binary mode. A new port is created for you. + + Args: + cmd: A STOR command. + fp: A file-like object with a read(num_bytes) method. + blocksize: The maximum data size to read from fp and send over + the connection at once. [default: 8192] + callback: An optional single parameter callable that is called on + each block of data after it is sent. [default: None] + rest: Passed to transfercmd(). [default: None] + + Returns: + The response code. + 'u'Store a file in binary mode. A new port is created for you. + + Args: + cmd: A STOR command. + fp: A file-like object with a read(num_bytes) method. + blocksize: The maximum data size to read from fp and send over + the connection at once. [default: 8192] + callback: An optional single parameter callable that is called on + each block of data after it is sent. [default: None] + rest: Passed to transfercmd(). [default: None] + + Returns: + The response code. + 'b'Store a file in line mode. A new port is created for you. + + Args: + cmd: A STOR command. + fp: A file-like object with a readline() method. + callback: An optional single parameter callable that is called on + each line after it is sent. [default: None] + + Returns: + The response code. + 'u'Store a file in line mode. A new port is created for you. + + Args: + cmd: A STOR command. + fp: A file-like object with a readline() method. + callback: An optional single parameter callable that is called on + each line after it is sent. [default: None] + + Returns: + The response code. + 'b'Send new account name.'u'Send new account name.'b'Return a list of files in a given directory (default the current).'u'Return a list of files in a given directory (default the current).'b'NLST'u'NLST'b'List a directory in long form. + By default list current directory to stdout. + Optional last argument is callback function; all + non-empty arguments before it are concatenated to the + LIST command. (This *should* only be used for a pathname.)'u'List a directory in long form. + By default list current directory to stdout. + Optional last argument is callback function; all + non-empty arguments before it are concatenated to the + LIST command. (This *should* only be used for a pathname.)'b'LIST'u'LIST'b'List a directory in a standardized format by using MLSD + command (RFC-3659). If path is omitted the current directory + is assumed. "facts" is a list of strings representing the type + of information desired (e.g. ["type", "size", "perm"]). + + Return a generator object yielding a tuple of two elements + for every file found in path. + First element is the file name, the second one is a dictionary + including a variable number of "facts" depending on the server + and whether "facts" argument has been provided. + 'u'List a directory in a standardized format by using MLSD + command (RFC-3659). If path is omitted the current directory + is assumed. "facts" is a list of strings representing the type + of information desired (e.g. ["type", "size", "perm"]). + + Return a generator object yielding a tuple of two elements + for every file found in path. + First element is the file name, the second one is a dictionary + including a variable number of "facts" depending on the server + and whether "facts" argument has been provided. + 'b'OPTS MLST 'u'OPTS MLST 'b'MLSD %s'u'MLSD %s'b'MLSD'u'MLSD'b'Rename a file.'u'Rename a file.'b'RNFR 'u'RNFR 'b'RNTO 'u'RNTO 'b'Delete a file.'u'Delete a file.'b'DELE 'u'DELE 'b'250'u'250'b'200'u'200'b'Change to a directory.'u'Change to a directory.'b'CDUP'u'CDUP'b'CWD 'u'CWD 'b'Retrieve the size of a file.'u'Retrieve the size of a file.'b'SIZE 'u'SIZE 'b'213'u'213'b'Make a directory, return its full pathname.'u'Make a directory, return its full pathname.'b'MKD 'u'MKD 'b'257'u'257'b'Remove a directory.'u'Remove a directory.'b'RMD 'u'RMD 'b'Return current working directory.'u'Return current working directory.'b'PWD'u'PWD'b'Quit, and close the connection.'u'Quit, and close the connection.'b'Close the connection without assuming anything about it.'u'Close the connection without assuming anything about it.'b'A FTP subclass which adds TLS support to FTP as described + in RFC-4217. + + Connect as usual to port 21 implicitly securing the FTP control + connection before authenticating. + + Securing the data connection requires user to explicitly ask + for it by calling prot_p() method. + + Usage example: + >>> from ftplib import FTP_TLS + >>> ftps = FTP_TLS('ftp.python.org') + >>> ftps.login() # login anonymously previously securing control channel + '230 Guest login ok, access restrictions apply.' + >>> ftps.prot_p() # switch to secure data connection + '200 Protection level set to P' + >>> ftps.retrlines('LIST') # list directory content securely + total 9 + drwxr-xr-x 8 root wheel 1024 Jan 3 1994 . + drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .. + drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin + drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc + d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming + drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib + drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub + drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr + -rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg + '226 Transfer complete.' + >>> ftps.quit() + '221 Goodbye.' + >>> + 'u'A FTP subclass which adds TLS support to FTP as described + in RFC-4217. + + Connect as usual to port 21 implicitly securing the FTP control + connection before authenticating. + + Securing the data connection requires user to explicitly ask + for it by calling prot_p() method. + + Usage example: + >>> from ftplib import FTP_TLS + >>> ftps = FTP_TLS('ftp.python.org') + >>> ftps.login() # login anonymously previously securing control channel + '230 Guest login ok, access restrictions apply.' + >>> ftps.prot_p() # switch to secure data connection + '200 Protection level set to P' + >>> ftps.retrlines('LIST') # list directory content securely + total 9 + drwxr-xr-x 8 root wheel 1024 Jan 3 1994 . + drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .. + drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin + drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc + d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming + drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib + drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub + drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr + -rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg + '226 Transfer complete.' + >>> ftps.quit() + '221 Goodbye.' + >>> + 'b'context and keyfile arguments are mutually exclusive'u'context and keyfile arguments are mutually exclusive'b'context and certfile arguments are mutually exclusive'u'context and certfile arguments are mutually exclusive'b'keyfile and certfile are deprecated, use a custom context instead'u'keyfile and certfile are deprecated, use a custom context instead'b'Set up secure control connection by using TLS/SSL.'u'Set up secure control connection by using TLS/SSL.'b'Already using TLS'u'Already using TLS'b'AUTH TLS'u'AUTH TLS'b'AUTH SSL'u'AUTH SSL'b'Switch back to a clear-text control connection.'u'Switch back to a clear-text control connection.'b'not using TLS'u'not using TLS'b'CCC'u'CCC'b'Set up secure data connection.'u'Set up secure data connection.'b'PBSZ 0'u'PBSZ 0'b'PROT P'u'PROT P'b'Set up clear text data connection.'u'Set up clear text data connection.'b'PROT C'u'PROT C'b'FTP_TLS'u'FTP_TLS'b'Parse the '150' response for a RETR request. + Returns the expected transfer size or None; size is not guaranteed to + be present in the 150 message. + 'u'Parse the '150' response for a RETR request. + Returns the expected transfer size or None; size is not guaranteed to + be present in the 150 message. + 'b'150 .* \((\d+) bytes\)'u'150 .* \((\d+) bytes\)'b'Parse the '227' response for a PASV request. + Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)' + Return ('host.addr.as.numbers', port#) tuple.'u'Parse the '227' response for a PASV request. + Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)' + Return ('host.addr.as.numbers', port#) tuple.'b'227'u'227'b'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)'u'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)'b'Parse the '229' response for an EPSV request. + Raises error_proto if it does not contain '(|||port|)' + Return ('host.addr.as.numbers', port#) tuple.'u'Parse the '229' response for an EPSV request. + Raises error_proto if it does not contain '(|||port|)' + Return ('host.addr.as.numbers', port#) tuple.'b'229'u'229'b'Parse the '257' response for a MKD or PWD request. + This is a response to a MKD or PWD request: a directory name. + Returns the directoryname in the 257 reply.'u'Parse the '257' response for a MKD or PWD request. + This is a response to a MKD or PWD request: a directory name. + Returns the directoryname in the 257 reply.'b' "'u' "'b'Default retrlines callback to print a line.'u'Default retrlines callback to print a line.'b'Copy file from one FTP-instance to another.'u'Copy file from one FTP-instance to another.'b'TYPE 'u'TYPE 'b'STOR 'u'STOR 'b'125'u'125'b'RETR 'u'RETR 'b'Test program. + Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ... + + -d dir + -l list + -p password + 'u'Test program. + Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ... + + -d dir + -l list + -p password + 'b'-r'u'-r'b'No account -- using anonymous login.'u'No account -- using anonymous login.'b'Could not open account file -- using anonymous login.'u'Could not open account file -- using anonymous login.'b'CWD'u'CWD'b'-p'u'ftplib'functools.py - Tools for working with functions and callable objects +update_wrapperWRAPPER_ASSIGNMENTSWRAPPER_UPDATESsingledispatchsingledispatchmethodcached_propertywrappedassignedupdatedUpdate a wrapper function to look like the wrapped function + + wrapper is the function to be updated + wrapped is the original function + assigned is a tuple naming the attributes assigned directly + from the wrapped function to the wrapper function (defaults to + functools.WRAPPER_ASSIGNMENTS) + updated is a tuple naming the attributes of the wrapper that + are updated with the corresponding attribute from the wrapped + function (defaults to functools.WRAPPER_UPDATES) + Decorator factory to apply update_wrapper() to a wrapper function + + Returns a decorator that invokes update_wrapper() with the decorated + function as the wrapper argument and the arguments to wraps() as the + remaining arguments. Default arguments are as for update_wrapper(). + This is a convenience function to simplify applying partial() to + update_wrapper(). + _gt_from_ltReturn a > b. Computed by @total_ordering from (not a < b) and (a != b).op_result_le_from_ltReturn a <= b. Computed by @total_ordering from (a < b) or (a == b)._ge_from_ltReturn a >= b. Computed by @total_ordering from (not a < b)._ge_from_leReturn a >= b. Computed by @total_ordering from (not a <= b) or (a == b)._lt_from_leReturn a < b. Computed by @total_ordering from (a <= b) and (a != b)._gt_from_leReturn a > b. Computed by @total_ordering from (not a <= b)._lt_from_gtReturn a < b. Computed by @total_ordering from (not a > b) and (a != b)._ge_from_gtReturn a >= b. Computed by @total_ordering from (a > b) or (a == b)._le_from_gtReturn a <= b. Computed by @total_ordering from (not a > b)._le_from_geReturn a <= b. Computed by @total_ordering from (not a >= b) or (a == b)._gt_from_geReturn a > b. Computed by @total_ordering from (a >= b) and (a != b)._lt_from_geReturn a < b. Computed by @total_ordering from (not a >= b).Class decorator that fills in missing ordering methodsrootsmust define at least one ordering operation: < > <= >=opfuncmycmpConvert a cmp= function into a key= function_initial_missinginitial + reduce(function, sequence[, initial]) -> value + + Apply a function of two arguments cumulatively to the items of a sequence, + from left to right, so as to reduce the sequence to a single value. + For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates + ((((1+2)+3)+4)+5). If initial is present, it is placed before the items + of the sequence in the calculation, and serves as a default when the + sequence is empty. + reduce() of empty sequence with no initial valueNew function with partial application of the given arguments + and keywords. + the first argument must be callablefunctools.argument to __setstate__ must be a tupleexpected 4 items in state, got invalid partial stateMethod descriptor with partial application of the given arguments + and keywords. + + Supports wrapping existing descriptors and handles non-descriptor + callables as instance methods. + descriptor '__init__' of partialmethod needs an argument"descriptor '__init__' of partialmethod "type 'partialmethod' takes at least one argument, got %d"type 'partialmethod' takes at least one argument, ""got %d"{!r} is not callable or a descriptor($self, func, /, *args, **keywords){module}.{cls}({func}, {args}, {keywords})format_string_make_unbound_methodcls_or_self_partialmethodnew_func_unwrap_partialCacheInfomissescurrsize_CacheInfo_HashedSeq This class guarantees that hash() will be called no more than once + per element. This is important because the lru_cache() will hash + the key multiple times on a cache miss. + + hashvalue_make_keykwd_markfasttypesMake a cache key from optionally typed positional and keyword arguments + + The key is constructed in a way that is flat as possible rather than + as a nested structure that would take more memory. + + If there is only a single argument and its data type is known to cache + its hash value, then that argument is returned without a wrapper. This + saves space and improves lookup speed. + + Least-recently-used cache decorator. + + If *maxsize* is set to None, the LRU features are disabled and the cache + can grow without bound. + + If *typed* is True, arguments of different types will be cached separately. + For example, f(3.0) and f(3) will be treated as distinct calls with + distinct results. + + Arguments to the cached function must be hashable. + + View the cache statistics named tuple (hits, misses, maxsize, currsize) + with f.cache_info(). Clear the cache and statistics with f.cache_clear(). + Access the underlying function with f.__wrapped__. + + See: http://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU) + + user_functionExpected first argument to be an integer, a callable, or Nonedecorating_functionsentinelmake_keyPREVNEXTKEYRESULTfullcache_getcache_len_keyoldrootoldkeyoldresultReport cache statisticsClear the cache and cache statistics_c3_mergeMerges MROs in *sequences* to a single MRO using the C3 algorithm. + + Adapted from http://www.python.org/download/releases/2.3/mro/. + + Inconsistent hierarchy_c3_mroabcsComputes the method resolution order using extended C3 linearization. + + If no *abcs* are given, the algorithm works exactly like the built-in C3 + linearization used for method resolution. + + If given, *abcs* is a list of abstract base classes that should be inserted + into the resulting MRO. Unrelated ABCs are ignored and don't end up in the + result. The algorithm inserts ABCs where their functionality is introduced, + i.e. issubclass(cls, abc) returns True for the class itself but returns + False for all its direct base classes. Implicit ABCs for a given class + (either registered or inferred from the presence of a special method like + __len__) are inserted directly after the last ABC explicitly listed in the + MRO of said class. If two implicit ABCs end up next to each other in the + resulting MRO, their ordering depends on the order of types in *abcs*. + + explicit_basesabstract_basesother_basesexplicit_c3_mrosabstract_c3_mrosother_c3_mros_compose_mroCalculates the method resolution order for a given class *cls*. + + Includes relevant abstract base classes (with their respective bases) from + the *types* iterable. Uses a modified C3 linearization algorithm. + + is_relatedis_strict_basetype_setsubcls_find_implReturns the best matching implementation from *registry* for type *cls*. + + Where there is no registered implementation for a specific type, its method + resolution order is used to find a more generic implementation. + + Note: if *registry* does not contain an implementation for the base + *object* type, this function may return None. + + Ambiguous dispatch: {} or {}Single-dispatch generic function decorator. + + Transforms a function into a generic function, which can have different + behaviours depending upon the type of its first argument. The decorated + function acts as the default implementation, and additional + implementations can be registered using the register() attribute of the + generic function. + WeakKeyDictionarydispatch_cachecache_tokengeneric_func.dispatch(cls) -> + + Runs the dispatch algorithm to return the best available implementation + for the given *cls* registered on *generic_func*. + + current_tokengeneric_func.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_func*. + + annInvalid first argument to `register()`: . Use either `@register(some_class)` or plain `@register` on an annotated function.". ""Use either `@register(some_class)` or plain `@register` ""on an annotated function."typingget_type_hintsargnameInvalid annotation for . is not a class. requires at least 1 positional argument' requires at least ''1 positional argument'singledispatch functionSingle-dispatch generic method descriptor. + + Supports wrapping existing descriptors and handles non-descriptor + callables as instance methods. + is not callable or a descriptordispatchergeneric_method.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_method*. + _NOT_FOUND__set_name__Cannot assign the same cached_property to two different names ("Cannot assign the same cached_property to two different names " and ).Cannot use cached_property instance without calling __set_name__ on it.No '__dict__' attribute on instance to cache "instance to cache " property.The '__dict__' attribute on instance does not support item assignment for caching " instance ""does not support item assignment for caching "# Python module wrapper for _functools C module# to allow utilities written in Python to be added# to the functools module.# Written by Nick Coghlan ,# Raymond Hettinger ,# and Łukasz Langa .# Copyright (C) 2006-2013 Python Software Foundation.# See C source code for _functools credits/copyright# import types, weakref # Deferred to single_dispatch()### update_wrapper() and wraps() decorator# update_wrapper() and wraps() are tools to help write# wrapper functions that can handle naive introspection# Issue #17482: set __wrapped__ last so we don't inadvertently copy it# from the wrapped function when updating __dict__# Return the wrapper so this can be used as a decorator via partial()### total_ordering class decorator# The total ordering functions all invoke the root magic method directly# rather than using the corresponding operator. This avoids possible# infinite recursion that could occur when the operator dispatch logic# detects a NotImplemented result and then calls a reflected method.# Find user-defined comparisons (not those inherited from object).# prefer __lt__ to __le__ to __gt__ to __ge__### cmp_to_key() function converter### reduce() sequence to a single item### partial() argument application# Purely functional, no descriptor behaviour# just in case it's a subclass# XXX does it need to be *exactly* dict?# Descriptor version# func could be a descriptor like classmethod which isn't callable,# so we can't inherit from partial (it verifies func is callable)# flattening is mandatory in order to place cls/self before all# other arguments# it's also more efficient since only one function will be called# Assume __get__ returning something new indicates the# creation of an appropriate callable# If the underlying descriptor didn't do anything, treat this# like an instance method# Helper functions### LRU Cache function decorator# All of code below relies on kwds preserving the order input by the user.# Formerly, we sorted() the kwds before looping. The new way is *much*# faster; however, it means that f(x=1, y=2) will now be treated as a# distinct call from f(y=2, x=1) which will be cached separately.# Users should only access the lru_cache through its public API:# cache_info, cache_clear, and f.__wrapped__# The internals of the lru_cache are encapsulated for thread safety and# to allow the implementation to change (including a possible C version).# Negative maxsize is treated as 0# The user_function was passed in directly via the maxsize argument# Constants shared by all lru cache instances:# unique object used to signal cache misses# build a key from the function arguments# names for the link fields# bound method to lookup a key or return None# get cache size without calling len()# because linkedlist updates aren't threadsafe# root of the circular doubly linked list# initialize by pointing to self# No caching -- just a statistics update# Simple caching without ordering or size limit# Size limited caching that tracks accesses by recency# Move the link to the front of the circular queue# Getting here means that this same key was added to the# cache while the lock was released. Since the link# update is already done, we need only return the# computed result and update the count of misses.# Use the old root to store the new key and result.# Empty the oldest link and make it the new root.# Keep a reference to the old key and old result to# prevent their ref counts from going to zero during the# update. That will prevent potentially arbitrary object# clean-up code (i.e. __del__) from running while we're# still adjusting the links.# Now update the cache dictionary.# Save the potentially reentrant cache[key] assignment# for last, after the root and links have been put in# a consistent state.# Put result in a new link at the front of the queue.# Use the cache_len bound method instead of the len() function# which could potentially be wrapped in an lru_cache itself.### singledispatch() - single-dispatch generic function decorator# purge empty sequences# find merge candidates among seq heads# reject the current head, it appears later# remove the chosen candidate# Bases up to the last explicit ABC are considered first.# If *cls* is the class that introduces behaviour described by# an ABC *base*, insert said ABC to its MRO.# Remove entries which are already present in the __mro__ or unrelated.# Remove entries which are strict bases of other entries (they will end up# in the MRO anyway.# Subclasses of the ABCs in *types* which are also implemented by# *cls* can be used to stabilize ABC ordering.# Favor subclasses with the biggest number of useful bases# If *match* is an implicit ABC but there is another unrelated,# equally matching implicit ABC, refuse the temptation to guess.# There are many programs that use functools without singledispatch, so we# trade-off making singledispatch marginally slower for the benefit of# making start-up of such applications slightly faster.# only import typing if annotation parsing is necessary### cached_property() - computed once per instance, cached as attribute# not all objects have __dict__ (e.g. class defines slots)# check if another thread filled cache while we awaited lockb'functools.py - Tools for working with functions and callable objects +'u'functools.py - Tools for working with functions and callable objects +'b'update_wrapper'u'update_wrapper'b'wraps'u'wraps'b'WRAPPER_ASSIGNMENTS'u'WRAPPER_ASSIGNMENTS'b'WRAPPER_UPDATES'u'WRAPPER_UPDATES'b'total_ordering'u'total_ordering'b'cmp_to_key'u'cmp_to_key'b'lru_cache'u'lru_cache'b'partial'u'partial'b'partialmethod'u'partialmethod'b'singledispatch'u'singledispatch'b'singledispatchmethod'u'singledispatchmethod'b'cached_property'u'cached_property'b'__annotations__'u'__annotations__'b'Update a wrapper function to look like the wrapped function + + wrapper is the function to be updated + wrapped is the original function + assigned is a tuple naming the attributes assigned directly + from the wrapped function to the wrapper function (defaults to + functools.WRAPPER_ASSIGNMENTS) + updated is a tuple naming the attributes of the wrapper that + are updated with the corresponding attribute from the wrapped + function (defaults to functools.WRAPPER_UPDATES) + 'u'Update a wrapper function to look like the wrapped function + + wrapper is the function to be updated + wrapped is the original function + assigned is a tuple naming the attributes assigned directly + from the wrapped function to the wrapper function (defaults to + functools.WRAPPER_ASSIGNMENTS) + updated is a tuple naming the attributes of the wrapper that + are updated with the corresponding attribute from the wrapped + function (defaults to functools.WRAPPER_UPDATES) + 'b'Decorator factory to apply update_wrapper() to a wrapper function + + Returns a decorator that invokes update_wrapper() with the decorated + function as the wrapper argument and the arguments to wraps() as the + remaining arguments. Default arguments are as for update_wrapper(). + This is a convenience function to simplify applying partial() to + update_wrapper(). + 'u'Decorator factory to apply update_wrapper() to a wrapper function + + Returns a decorator that invokes update_wrapper() with the decorated + function as the wrapper argument and the arguments to wraps() as the + remaining arguments. Default arguments are as for update_wrapper(). + This is a convenience function to simplify applying partial() to + update_wrapper(). + 'b'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).'u'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).'b'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).'u'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).'b'Return a >= b. Computed by @total_ordering from (not a < b).'u'Return a >= b. Computed by @total_ordering from (not a < b).'b'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).'u'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).'b'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).'u'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).'b'Return a > b. Computed by @total_ordering from (not a <= b).'u'Return a > b. Computed by @total_ordering from (not a <= b).'b'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).'u'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).'b'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).'u'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).'b'Return a <= b. Computed by @total_ordering from (not a > b).'u'Return a <= b. Computed by @total_ordering from (not a > b).'b'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).'u'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).'b'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).'u'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).'b'Return a < b. Computed by @total_ordering from (not a >= b).'u'Return a < b. Computed by @total_ordering from (not a >= b).'b'__gt__'u'__gt__'b'__le__'u'__le__'b'__ge__'u'__ge__'b'__lt__'u'__lt__'b'Class decorator that fills in missing ordering methods'u'Class decorator that fills in missing ordering methods'b'must define at least one ordering operation: < > <= >='u'must define at least one ordering operation: < > <= >='b'Convert a cmp= function into a key= function'u'Convert a cmp= function into a key= function'b' + reduce(function, sequence[, initial]) -> value + + Apply a function of two arguments cumulatively to the items of a sequence, + from left to right, so as to reduce the sequence to a single value. + For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates + ((((1+2)+3)+4)+5). If initial is present, it is placed before the items + of the sequence in the calculation, and serves as a default when the + sequence is empty. + 'u' + reduce(function, sequence[, initial]) -> value + + Apply a function of two arguments cumulatively to the items of a sequence, + from left to right, so as to reduce the sequence to a single value. + For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates + ((((1+2)+3)+4)+5). If initial is present, it is placed before the items + of the sequence in the calculation, and serves as a default when the + sequence is empty. + 'b'reduce() of empty sequence with no initial value'u'reduce() of empty sequence with no initial value'b'New function with partial application of the given arguments + and keywords. + 'u'New function with partial application of the given arguments + and keywords. + 'b'args'b'keywords'b'the first argument must be callable'u'the first argument must be callable'b'functools.'u'functools.'b'argument to __setstate__ must be a tuple'u'argument to __setstate__ must be a tuple'b'expected 4 items in state, got 'u'expected 4 items in state, got 'b'invalid partial state'u'invalid partial state'b'Method descriptor with partial application of the given arguments + and keywords. + + Supports wrapping existing descriptors and handles non-descriptor + callables as instance methods. + 'u'Method descriptor with partial application of the given arguments + and keywords. + + Supports wrapping existing descriptors and handles non-descriptor + callables as instance methods. + 'b'descriptor '__init__' of partialmethod needs an argument'u'descriptor '__init__' of partialmethod needs an argument'b'type 'partialmethod' takes at least one argument, got %d'u'type 'partialmethod' takes at least one argument, got %d'b'{!r} is not callable or a descriptor'u'{!r} is not callable or a descriptor'b'($self, func, /, *args, **keywords)'u'($self, func, /, *args, **keywords)'b'{module}.{cls}({func}, {args}, {keywords})'u'{module}.{cls}({func}, {args}, {keywords})'b'CacheInfo'u'CacheInfo'b'hits'u'hits'b'misses'u'misses'b'currsize'u'currsize'b' This class guarantees that hash() will be called no more than once + per element. This is important because the lru_cache() will hash + the key multiple times on a cache miss. + + 'u' This class guarantees that hash() will be called no more than once + per element. This is important because the lru_cache() will hash + the key multiple times on a cache miss. + + 'b'hashvalue'u'hashvalue'b'Make a cache key from optionally typed positional and keyword arguments + + The key is constructed in a way that is flat as possible rather than + as a nested structure that would take more memory. + + If there is only a single argument and its data type is known to cache + its hash value, then that argument is returned without a wrapper. This + saves space and improves lookup speed. + + 'u'Make a cache key from optionally typed positional and keyword arguments + + The key is constructed in a way that is flat as possible rather than + as a nested structure that would take more memory. + + If there is only a single argument and its data type is known to cache + its hash value, then that argument is returned without a wrapper. This + saves space and improves lookup speed. + + 'b'Least-recently-used cache decorator. + + If *maxsize* is set to None, the LRU features are disabled and the cache + can grow without bound. + + If *typed* is True, arguments of different types will be cached separately. + For example, f(3.0) and f(3) will be treated as distinct calls with + distinct results. + + Arguments to the cached function must be hashable. + + View the cache statistics named tuple (hits, misses, maxsize, currsize) + with f.cache_info(). Clear the cache and statistics with f.cache_clear(). + Access the underlying function with f.__wrapped__. + + See: http://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU) + + 'u'Least-recently-used cache decorator. + + If *maxsize* is set to None, the LRU features are disabled and the cache + can grow without bound. + + If *typed* is True, arguments of different types will be cached separately. + For example, f(3.0) and f(3) will be treated as distinct calls with + distinct results. + + Arguments to the cached function must be hashable. + + View the cache statistics named tuple (hits, misses, maxsize, currsize) + with f.cache_info(). Clear the cache and statistics with f.cache_clear(). + Access the underlying function with f.__wrapped__. + + See: http://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU) + + 'b'Expected first argument to be an integer, a callable, or None'u'Expected first argument to be an integer, a callable, or None'b'Report cache statistics'u'Report cache statistics'b'Clear the cache and cache statistics'u'Clear the cache and cache statistics'b'Merges MROs in *sequences* to a single MRO using the C3 algorithm. + + Adapted from http://www.python.org/download/releases/2.3/mro/. + + 'u'Merges MROs in *sequences* to a single MRO using the C3 algorithm. + + Adapted from http://www.python.org/download/releases/2.3/mro/. + + 'b'Inconsistent hierarchy'u'Inconsistent hierarchy'b'Computes the method resolution order using extended C3 linearization. + + If no *abcs* are given, the algorithm works exactly like the built-in C3 + linearization used for method resolution. + + If given, *abcs* is a list of abstract base classes that should be inserted + into the resulting MRO. Unrelated ABCs are ignored and don't end up in the + result. The algorithm inserts ABCs where their functionality is introduced, + i.e. issubclass(cls, abc) returns True for the class itself but returns + False for all its direct base classes. Implicit ABCs for a given class + (either registered or inferred from the presence of a special method like + __len__) are inserted directly after the last ABC explicitly listed in the + MRO of said class. If two implicit ABCs end up next to each other in the + resulting MRO, their ordering depends on the order of types in *abcs*. + + 'u'Computes the method resolution order using extended C3 linearization. + + If no *abcs* are given, the algorithm works exactly like the built-in C3 + linearization used for method resolution. + + If given, *abcs* is a list of abstract base classes that should be inserted + into the resulting MRO. Unrelated ABCs are ignored and don't end up in the + result. The algorithm inserts ABCs where their functionality is introduced, + i.e. issubclass(cls, abc) returns True for the class itself but returns + False for all its direct base classes. Implicit ABCs for a given class + (either registered or inferred from the presence of a special method like + __len__) are inserted directly after the last ABC explicitly listed in the + MRO of said class. If two implicit ABCs end up next to each other in the + resulting MRO, their ordering depends on the order of types in *abcs*. + + 'b'Calculates the method resolution order for a given class *cls*. + + Includes relevant abstract base classes (with their respective bases) from + the *types* iterable. Uses a modified C3 linearization algorithm. + + 'u'Calculates the method resolution order for a given class *cls*. + + Includes relevant abstract base classes (with their respective bases) from + the *types* iterable. Uses a modified C3 linearization algorithm. + + 'b'Returns the best matching implementation from *registry* for type *cls*. + + Where there is no registered implementation for a specific type, its method + resolution order is used to find a more generic implementation. + + Note: if *registry* does not contain an implementation for the base + *object* type, this function may return None. + + 'u'Returns the best matching implementation from *registry* for type *cls*. + + Where there is no registered implementation for a specific type, its method + resolution order is used to find a more generic implementation. + + Note: if *registry* does not contain an implementation for the base + *object* type, this function may return None. + + 'b'Ambiguous dispatch: {} or {}'u'Ambiguous dispatch: {} or {}'b'Single-dispatch generic function decorator. + + Transforms a function into a generic function, which can have different + behaviours depending upon the type of its first argument. The decorated + function acts as the default implementation, and additional + implementations can be registered using the register() attribute of the + generic function. + 'u'Single-dispatch generic function decorator. + + Transforms a function into a generic function, which can have different + behaviours depending upon the type of its first argument. The decorated + function acts as the default implementation, and additional + implementations can be registered using the register() attribute of the + generic function. + 'b'generic_func.dispatch(cls) -> + + Runs the dispatch algorithm to return the best available implementation + for the given *cls* registered on *generic_func*. + + 'u'generic_func.dispatch(cls) -> + + Runs the dispatch algorithm to return the best available implementation + for the given *cls* registered on *generic_func*. + + 'b'generic_func.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_func*. + + 'u'generic_func.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_func*. + + 'b'Invalid first argument to `register()`: 'u'Invalid first argument to `register()`: 'b'. Use either `@register(some_class)` or plain `@register` on an annotated function.'u'. Use either `@register(some_class)` or plain `@register` on an annotated function.'b'Invalid annotation for 'u'Invalid annotation for 'b'. 'u'. 'b' is not a class.'u' is not a class.'b' requires at least 1 positional argument'u' requires at least 1 positional argument'b'singledispatch function'u'singledispatch function'b'Single-dispatch generic method descriptor. + + Supports wrapping existing descriptors and handles non-descriptor + callables as instance methods. + 'u'Single-dispatch generic method descriptor. + + Supports wrapping existing descriptors and handles non-descriptor + callables as instance methods. + 'b' is not callable or a descriptor'u' is not callable or a descriptor'b'generic_method.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_method*. + 'u'generic_method.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_method*. + 'b'Cannot assign the same cached_property to two different names ('u'Cannot assign the same cached_property to two different names ('b' and 'u' and 'b').'u').'b'Cannot use cached_property instance without calling __set_name__ on it.'u'Cannot use cached_property instance without calling __set_name__ on it.'b'No '__dict__' attribute on 'u'No '__dict__' attribute on 'b' instance to cache 'u' instance to cache 'b' property.'u' property.'b'The '__dict__' attribute on 'u'The '__dict__' attribute on 'b' instance does not support item assignment for caching 'u' instance does not support item assignment for caching 'A Future class similar to the one in PEP 3148.STACK_DEBUGThis class is *almost* compatible with concurrent.futures.Future. + + Differences: + + - This class is not thread-safe. + + - result() and exception() do not take a timeout argument and + raise an exception when the future isn't done yet. + + - Callbacks registered with add_done_callback() are always called + via the event loop's call_soon(). + + - This class is not compatible with the wait() and as_completed() + methods in the concurrent.futures package. + + (In Python 3.4 or later we may be able to unify the implementations.) + __log_tracebackInitialize the future. + + The optional event_loop argument allows explicitly setting the event + loop object used by the future. If it's not provided, the future uses + the default event loop. + <{} {}> exception was never retrieved_log_traceback can only be set to FalseReturn the event loop the Future is bound to.Future object is not initialized.Cancel the future and schedule callbacks. + + If the future is already done or cancelled, return False. Otherwise, + change the future's state to cancelled, schedule the callbacks and + return True. + __schedule_callbacksInternal: Ask the event loop to call all callbacks. + + The callbacks are scheduled to be called as soon as possible. Also + clears the callback list. + callbacksReturn True if the future is done. + + Done means either that a result / exception are available, or that the + future was cancelled. + Return the result this future represents. + + If the future has been cancelled, raises CancelledError. If the + future's result isn't yet available, raises InvalidStateError. If + the future is done and has an exception set, this exception is raised. + Result is not ready.Return the exception that was set on this future. + + The exception (or None if no exception was set) is returned only if + the future is done. If the future has been cancelled, raises + CancelledError. If the future isn't done yet, raises + InvalidStateError. + Exception is not set.Add a callback to be run when the future becomes done. + + The callback is called with a single argument - the future object. If + the future is already done when this is called, the callback is + scheduled with call_soon. + Remove all instances of a callback from the "call when done" list. + + Returns the number of callbacks removed. + filtered_callbacksremoved_countMark the future done and set its result. + + If the future is already done when this method is called, raises + InvalidStateError. + Mark the future done and set an exception. + + If the future is already done when this method is called, raises + InvalidStateError. + StopIteration interacts badly with generators and cannot be raised into a Future"StopIteration interacts badly with generators ""and cannot be raised into a Future"await wasn't used with future_PyFuture_set_result_unless_cancelledHelper setting the result only if the future was not cancelled._convert_future_excexc_class_set_concurrent_future_stateCopy state from a future to a concurrent.futures.Future._copy_future_stateInternal helper to copy state from another Future. + + The other Future may be a concurrent.futures.Future. + _chain_futuredestinationChain two futures so that when one completes, so does the other. + + The result (or exception) of source will be copied to destination. + If destination is cancelled, source gets cancelled too. + Compatible with both asyncio.Future and concurrent.futures.Future. + A future is required for source argumentA future is required for destination argumentsource_loopdest_loop_set_state_call_check_cancel_call_set_stateWrap concurrent.futures.Future object.concurrent.futures.Future is expected, got new_future_CFuture# heavy-duty debugging# Class variables serving as defaults for instance variables.# This field is used for a dual purpose:# - Its presence is a marker to declare that a class implements# the Future protocol (i.e. is intended to be duck-type compatible).# The value must also be not-None, to enable a subclass to declare# that it is not compatible by setting this to None.# - It is set by __iter__() below so that Task._step() can tell# the difference between# `await Future()` or`yield from Future()` (correct) vs.# `yield Future()` (incorrect).# set_exception() was not called, or result() or exception()# has consumed the exception# Don't implement running(); see http://bugs.python.org/issue18699# New method not in PEP 3148.# So-called internal methods (note: no set_running_or_notify_cancel()).# This tells Task to wait for completion.# May raise too.# make compatible with 'yield from'.# Needed for testing purposes.# Tries to call Future.get_loop() if it's available.# Otherwise fallbacks to using the old '_loop' property.# _CFuture is needed for tests.b'A Future class similar to the one in PEP 3148.'u'A Future class similar to the one in PEP 3148.'b'wrap_future'u'wrap_future'b'isfuture'u'isfuture'b'This class is *almost* compatible with concurrent.futures.Future. + + Differences: + + - This class is not thread-safe. + + - result() and exception() do not take a timeout argument and + raise an exception when the future isn't done yet. + + - Callbacks registered with add_done_callback() are always called + via the event loop's call_soon(). + + - This class is not compatible with the wait() and as_completed() + methods in the concurrent.futures package. + + (In Python 3.4 or later we may be able to unify the implementations.) + 'u'This class is *almost* compatible with concurrent.futures.Future. + + Differences: + + - This class is not thread-safe. + + - result() and exception() do not take a timeout argument and + raise an exception when the future isn't done yet. + + - Callbacks registered with add_done_callback() are always called + via the event loop's call_soon(). + + - This class is not compatible with the wait() and as_completed() + methods in the concurrent.futures package. + + (In Python 3.4 or later we may be able to unify the implementations.) + 'b'Initialize the future. + + The optional event_loop argument allows explicitly setting the event + loop object used by the future. If it's not provided, the future uses + the default event loop. + 'u'Initialize the future. + + The optional event_loop argument allows explicitly setting the event + loop object used by the future. If it's not provided, the future uses + the default event loop. + 'b'<{} {}>'u'<{} {}>'b' exception was never retrieved'u' exception was never retrieved'b'future'u'future'b'_log_traceback can only be set to False'u'_log_traceback can only be set to False'b'Return the event loop the Future is bound to.'u'Return the event loop the Future is bound to.'b'Future object is not initialized.'u'Future object is not initialized.'b'Cancel the future and schedule callbacks. + + If the future is already done or cancelled, return False. Otherwise, + change the future's state to cancelled, schedule the callbacks and + return True. + 'u'Cancel the future and schedule callbacks. + + If the future is already done or cancelled, return False. Otherwise, + change the future's state to cancelled, schedule the callbacks and + return True. + 'b'Internal: Ask the event loop to call all callbacks. + + The callbacks are scheduled to be called as soon as possible. Also + clears the callback list. + 'u'Internal: Ask the event loop to call all callbacks. + + The callbacks are scheduled to be called as soon as possible. Also + clears the callback list. + 'b'Return True if the future is done. + + Done means either that a result / exception are available, or that the + future was cancelled. + 'u'Return True if the future is done. + + Done means either that a result / exception are available, or that the + future was cancelled. + 'b'Return the result this future represents. + + If the future has been cancelled, raises CancelledError. If the + future's result isn't yet available, raises InvalidStateError. If + the future is done and has an exception set, this exception is raised. + 'u'Return the result this future represents. + + If the future has been cancelled, raises CancelledError. If the + future's result isn't yet available, raises InvalidStateError. If + the future is done and has an exception set, this exception is raised. + 'b'Result is not ready.'u'Result is not ready.'b'Return the exception that was set on this future. + + The exception (or None if no exception was set) is returned only if + the future is done. If the future has been cancelled, raises + CancelledError. If the future isn't done yet, raises + InvalidStateError. + 'u'Return the exception that was set on this future. + + The exception (or None if no exception was set) is returned only if + the future is done. If the future has been cancelled, raises + CancelledError. If the future isn't done yet, raises + InvalidStateError. + 'b'Exception is not set.'u'Exception is not set.'b'Add a callback to be run when the future becomes done. + + The callback is called with a single argument - the future object. If + the future is already done when this is called, the callback is + scheduled with call_soon. + 'u'Add a callback to be run when the future becomes done. + + The callback is called with a single argument - the future object. If + the future is already done when this is called, the callback is + scheduled with call_soon. + 'b'Remove all instances of a callback from the "call when done" list. + + Returns the number of callbacks removed. + 'u'Remove all instances of a callback from the "call when done" list. + + Returns the number of callbacks removed. + 'b'Mark the future done and set its result. + + If the future is already done when this method is called, raises + InvalidStateError. + 'u'Mark the future done and set its result. + + If the future is already done when this method is called, raises + InvalidStateError. + 'b'Mark the future done and set an exception. + + If the future is already done when this method is called, raises + InvalidStateError. + 'u'Mark the future done and set an exception. + + If the future is already done when this method is called, raises + InvalidStateError. + 'b'StopIteration interacts badly with generators and cannot be raised into a Future'u'StopIteration interacts badly with generators and cannot be raised into a Future'b'await wasn't used with future'u'await wasn't used with future'b'Helper setting the result only if the future was not cancelled.'u'Helper setting the result only if the future was not cancelled.'b'Copy state from a future to a concurrent.futures.Future.'u'Copy state from a future to a concurrent.futures.Future.'b'Internal helper to copy state from another Future. + + The other Future may be a concurrent.futures.Future. + 'u'Internal helper to copy state from another Future. + + The other Future may be a concurrent.futures.Future. + 'b'Chain two futures so that when one completes, so does the other. + + The result (or exception) of source will be copied to destination. + If destination is cancelled, source gets cancelled too. + Compatible with both asyncio.Future and concurrent.futures.Future. + 'u'Chain two futures so that when one completes, so does the other. + + The result (or exception) of source will be copied to destination. + If destination is cancelled, source gets cancelled too. + Compatible with both asyncio.Future and concurrent.futures.Future. + 'b'A future is required for source argument'u'A future is required for source argument'b'A future is required for destination argument'u'A future is required for destination argument'b'Wrap concurrent.futures.Future object.'u'Wrap concurrent.futures.Future object.'b'concurrent.futures.Future is expected, got 'u'concurrent.futures.Future is expected, got 'u'asyncio.futures'DEBUG_COLLECTABLEDEBUG_LEAKDEBUG_SAVEALLDEBUG_STATSDEBUG_UNCOLLECTABLEu'This module provides access to the garbage collector for reference cycles. + +enable() -- Enable automatic garbage collection. +disable() -- Disable automatic garbage collection. +isenabled() -- Returns true if automatic collection is enabled. +collect() -- Do a full collection right now. +get_count() -- Return the current collection counts. +get_stats() -- Return list of dictionaries containing per-generation stats. +set_debug() -- Set debugging flags. +get_debug() -- Get debugging flags. +set_threshold() -- Set the collection thresholds. +get_threshold() -- Return the current the collection thresholds. +get_objects() -- Return a list of all objects tracked by the collector. +is_tracked() -- Returns true if a given object is tracked. +get_referrers() -- Return the list of objects that refer to an object. +get_referents() -- Return the list of objects that an object refers to. +freeze() -- Freeze all tracked objects and ignore them for future collections. +unfreeze() -- Unfreeze all objects in the permanent generation. +get_freeze_count() -- Return the number of objects in the permanent generation. +'freezegarbageget_countget_freeze_countget_objectsget_referentsget_referrersget_statsget_thresholdis_trackedset_thresholdunfreeze +Path operations common to more than one OS +Do not use directly. The OS specific modules import the appropriate +functions from this module themselves. +commonprefixgetatimegetctimegetsizesameopenfilesamestatTest whether a path exists. Returns False for broken symbolic linksTest whether a path is a regular fileReturn true if the pathname refers to an existing directory.Return the size of a file, reported by os.stat().Return the last modification time of a file, reported by os.stat().Return the last access time of a file, reported by os.stat().st_atimeReturn the metadata change time of a file, reported by os.stat().st_ctimeGiven a list of pathnames, returns the longest common leading componentTest whether two stat buffers reference the same filest_inost_devf1Test whether two pathnames reference the same actual file or directory + + This is determined by the device number and i-node number and + raises an exception if an os.stat() call on either pathname fails. + fp1fp2Test whether two open file objects reference the same filefstat_splitextaltsepextsepSplit the extension from a pathname. + + Extension is everything from the last dot to the end, ignoring + leading dots. Returns "(root, ext)"; ext may be empty.sepIndexaltsepIndexdotIndexfilenameIndex_check_arg_typeshasstrhasbytes() argument must be str, bytes, or os.PathLike object, not '() argument must be str, bytes, or ''os.PathLike object, not 'Can't mix strings and bytes in path components# Does a path exist?# This is false for dangling symbolic links on systems that support them.# This follows symbolic links, so both islink() and isdir() can be true# for the same path on systems that support symlinks# Is a path a directory?# This follows symbolic links, so both islink() and isdir()# can be true for the same path on systems that support symlinks# Return the longest prefix of all list elements.# Some people pass in a list of pathname parts to operate in an OS-agnostic# fashion; don't try to translate in that case as that's an abuse of the# API and they are already doing what they need to be OS-agnostic and so# they most likely won't be using an os.PathLike object in the sublists.# Are two stat buffers (obtained from stat, fstat or lstat)# describing the same file?# Are two filenames really pointing to the same file?# Are two open files really referencing the same file?# (Not necessarily the same file descriptor!)# Split a path in root and extension.# The extension is everything starting at the last dot in the last# pathname component; the root is everything before that.# It is always true that root + ext == p.# Generic implementation of splitext, to be parametrized with# the separators# NOTE: This code must work for text and bytes strings.# skip all leading dotsb' +Path operations common to more than one OS +Do not use directly. The OS specific modules import the appropriate +functions from this module themselves. +'u' +Path operations common to more than one OS +Do not use directly. The OS specific modules import the appropriate +functions from this module themselves. +'b'commonprefix'u'commonprefix'b'getatime'u'getatime'b'getctime'u'getctime'b'getmtime'u'getmtime'b'getsize'u'getsize'b'isdir'u'isdir'b'isfile'u'isfile'b'samefile'u'samefile'b'sameopenfile'u'sameopenfile'b'samestat'u'samestat'b'Test whether a path exists. Returns False for broken symbolic links'u'Test whether a path exists. Returns False for broken symbolic links'b'Test whether a path is a regular file'u'Test whether a path is a regular file'b'Return true if the pathname refers to an existing directory.'u'Return true if the pathname refers to an existing directory.'b'Return the size of a file, reported by os.stat().'u'Return the size of a file, reported by os.stat().'b'Return the last modification time of a file, reported by os.stat().'u'Return the last modification time of a file, reported by os.stat().'b'Return the last access time of a file, reported by os.stat().'u'Return the last access time of a file, reported by os.stat().'b'Return the metadata change time of a file, reported by os.stat().'u'Return the metadata change time of a file, reported by os.stat().'b'Given a list of pathnames, returns the longest common leading component'u'Given a list of pathnames, returns the longest common leading component'b'Test whether two stat buffers reference the same file'u'Test whether two stat buffers reference the same file'b'Test whether two pathnames reference the same actual file or directory + + This is determined by the device number and i-node number and + raises an exception if an os.stat() call on either pathname fails. + 'u'Test whether two pathnames reference the same actual file or directory + + This is determined by the device number and i-node number and + raises an exception if an os.stat() call on either pathname fails. + 'b'Test whether two open file objects reference the same file'u'Test whether two open file objects reference the same file'b'Split the extension from a pathname. + + Extension is everything from the last dot to the end, ignoring + leading dots. Returns "(root, ext)"; ext may be empty.'u'Split the extension from a pathname. + + Extension is everything from the last dot to the end, ignoring + leading dots. Returns "(root, ext)"; ext may be empty.'b'() argument must be str, bytes, or os.PathLike object, not 'u'() argument must be str, bytes, or os.PathLike object, not 'b'Can't mix strings and bytes in path components'u'Can't mix strings and bytes in path components'u'genericpath'Parser for command line options. + +This module helps scripts to parse the command line arguments in +sys.argv. It supports the same conventions as the Unix getopt() +function (including the special meanings of arguments of the form `-' +and `--'). Long options similar to those supported by GNU software +may be used as well via an optional third argument. This module +provides two functions and an exception: + +getopt() -- Parse command line options +gnu_getopt() -- Like getopt(), but allow option and non-option arguments +to be intermixed. +GetoptError -- exception (class) raised with 'opt' attribute, which is the +option involved with the exception. +GetoptErrorgnu_getoptshortoptslongoptsgetopt(args, options[, long_options]) -> opts, args + + Parses command line options and parameter list. args is the + argument list to be parsed, without the leading reference to the + running program. Typically, this means "sys.argv[1:]". shortopts + is the string of option letters that the script wants to + recognize, with options that require an argument followed by a + colon (i.e., the same format that Unix getopt() uses). If + specified, longopts is a list of strings with the names of the + long options which should be supported. The leading '--' + characters should not be included in the option name. Options + which require an argument should be followed by an equal sign + ('='). + + The return value consists of two elements: the first is a list of + (option, value) pairs; the second is the list of program arguments + left after the option list was stripped (this is a trailing slice + of the first argument). Each option-and-value pair returned has + the option as its first element, prefixed with a hyphen (e.g., + '-x'), and the option argument as its second element, or an empty + string if the option has no argument. The options occur in the + list in the same order in which they were found, thus allowing + multiple occurrences. Long and short options may be mixed. + + do_longsdo_shortsgetopt(args, options[, long_options]) -> opts, args + + This function works like getopt(), except that GNU style scanning + mode is used by default. This means that option and non-option + arguments may be intermixed. The getopt() function stops + processing options as soon as a non-option argument is + encountered. + + If the first character of the option string is `+', or if the + environment variable POSIXLY_CORRECT is set, then option + processing stops as soon as a non-option argument is encountered. + + prog_argsall_options_firstPOSIXLY_CORRECToptarglong_has_argshas_argoption --%s requires argumentoption --%s must not have an argumentoption --%s not recognizedoption --%s not a unique prefixunique_matchoptstringshort_has_argoption -%s requires argumentoption -%s not recognizeda:balpha=# Long option support added by Lars Wirzenius .# Gerrit Holl moved the string-based exceptions# to class-based exceptions.# Peter Åstrand added gnu_getopt().# TODO for gnu_getopt():# - GNU getopt_long_only mechanism# - allow the caller to specify ordering# - RETURN_IN_ORDER option# - GNU extension with '-' as first character of option string# - optional arguments, specified by double colons# - an option string with a W followed by semicolon should# treat "-W foo" as "--foo"# Bootstrapping Python: gettext's dependencies not built yet# Allow options after non-option arguments?# Return:# has_arg?# full option name# Is there an exact match?# No exact match, so better be unique.# XXX since possibilities contains all valid continuations, might be# nice to work them into the error msgb'Parser for command line options. + +This module helps scripts to parse the command line arguments in +sys.argv. It supports the same conventions as the Unix getopt() +function (including the special meanings of arguments of the form `-' +and `--'). Long options similar to those supported by GNU software +may be used as well via an optional third argument. This module +provides two functions and an exception: + +getopt() -- Parse command line options +gnu_getopt() -- Like getopt(), but allow option and non-option arguments +to be intermixed. +GetoptError -- exception (class) raised with 'opt' attribute, which is the +option involved with the exception. +'u'Parser for command line options. + +This module helps scripts to parse the command line arguments in +sys.argv. It supports the same conventions as the Unix getopt() +function (including the special meanings of arguments of the form `-' +and `--'). Long options similar to those supported by GNU software +may be used as well via an optional third argument. This module +provides two functions and an exception: + +getopt() -- Parse command line options +gnu_getopt() -- Like getopt(), but allow option and non-option arguments +to be intermixed. +GetoptError -- exception (class) raised with 'opt' attribute, which is the +option involved with the exception. +'b'GetoptError'u'GetoptError'b'getopt'u'getopt'b'gnu_getopt'u'gnu_getopt'b'getopt(args, options[, long_options]) -> opts, args + + Parses command line options and parameter list. args is the + argument list to be parsed, without the leading reference to the + running program. Typically, this means "sys.argv[1:]". shortopts + is the string of option letters that the script wants to + recognize, with options that require an argument followed by a + colon (i.e., the same format that Unix getopt() uses). If + specified, longopts is a list of strings with the names of the + long options which should be supported. The leading '--' + characters should not be included in the option name. Options + which require an argument should be followed by an equal sign + ('='). + + The return value consists of two elements: the first is a list of + (option, value) pairs; the second is the list of program arguments + left after the option list was stripped (this is a trailing slice + of the first argument). Each option-and-value pair returned has + the option as its first element, prefixed with a hyphen (e.g., + '-x'), and the option argument as its second element, or an empty + string if the option has no argument. The options occur in the + list in the same order in which they were found, thus allowing + multiple occurrences. Long and short options may be mixed. + + 'u'getopt(args, options[, long_options]) -> opts, args + + Parses command line options and parameter list. args is the + argument list to be parsed, without the leading reference to the + running program. Typically, this means "sys.argv[1:]". shortopts + is the string of option letters that the script wants to + recognize, with options that require an argument followed by a + colon (i.e., the same format that Unix getopt() uses). If + specified, longopts is a list of strings with the names of the + long options which should be supported. The leading '--' + characters should not be included in the option name. Options + which require an argument should be followed by an equal sign + ('='). + + The return value consists of two elements: the first is a list of + (option, value) pairs; the second is the list of program arguments + left after the option list was stripped (this is a trailing slice + of the first argument). Each option-and-value pair returned has + the option as its first element, prefixed with a hyphen (e.g., + '-x'), and the option argument as its second element, or an empty + string if the option has no argument. The options occur in the + list in the same order in which they were found, thus allowing + multiple occurrences. Long and short options may be mixed. + + 'b'getopt(args, options[, long_options]) -> opts, args + + This function works like getopt(), except that GNU style scanning + mode is used by default. This means that option and non-option + arguments may be intermixed. The getopt() function stops + processing options as soon as a non-option argument is + encountered. + + If the first character of the option string is `+', or if the + environment variable POSIXLY_CORRECT is set, then option + processing stops as soon as a non-option argument is encountered. + + 'u'getopt(args, options[, long_options]) -> opts, args + + This function works like getopt(), except that GNU style scanning + mode is used by default. This means that option and non-option + arguments may be intermixed. The getopt() function stops + processing options as soon as a non-option argument is + encountered. + + If the first character of the option string is `+', or if the + environment variable POSIXLY_CORRECT is set, then option + processing stops as soon as a non-option argument is encountered. + + 'b'POSIXLY_CORRECT'u'POSIXLY_CORRECT'b'option --%s requires argument'u'option --%s requires argument'b'option --%s must not have an argument'u'option --%s must not have an argument'b'option --%s not recognized'u'option --%s not recognized'b'option --%s not a unique prefix'u'option --%s not a unique prefix'b'option -%s requires argument'u'option -%s requires argument'b'option -%s not recognized'u'option -%s not recognized'b'a:b'u'a:b'b'alpha='u'alpha='Internationalization and localization support. + +This module provides internationalization (I18N) and localization (L10N) +support for your Python programs by providing an interface to the GNU gettext +message catalog library. + +I18N refers to the operation by which a program is made aware of multiple +languages. L10N refers to the adaptation of your program, once +internationalized, to the local language and cultural habits. + +NullTranslationsGNUTranslationsCatalogtranslationinstalldngettextlgettextldgettextldngettextlngettextpgettextdpgettextnpgettextdnpgettextshare_default_localedir + (?P[ \t]+) | # spaces and horizontal tabs + (?P[0-9]+\b) | # decimal integer + (?Pn\b) | # only n is allowed + (?P[()]) | + (?P[-*/%+?:]|[>, + # <=, >=, ==, !=, &&, ||, + # ? : + # unary and bitwise ops + # not allowed + (?P\w+|.) # invalid token + _token_pattern_tokenizelastgroupWHITESPACESINVALIDinvalid token in plural form: %sunexpected token in plural form: %sunexpected end of plural form||&&!=<=>=_binary_ops_c2py_opsnexttoknot unbalanced parenthesis in plural form%s%s%s%d(%s)if_trueif_false%s if %s else %s_as_intPlural value must be an integer, got %sc2pyGets a C expression as used in PO files for plural forms and returns a + Python function that implements an equivalent expression. + plural form expression is too longplural form expression is too complexif True: + def func(n): + if not isinstance(n, int): + n = _as_int(n) + return int(%s) + _expand_langCOMPONENT_CODESETCOMPONENT_TERRITORYCOMPONENT_MODIFIERmaskmodifiercodesetterritorylanguage_info_output_charset_fallbackadd_fallbacklgettext() is deprecated, use gettext() instead.*\blgettext\b.*msgid1msgid2lngettext() is deprecated, use ngettext() instead.*\blngettext\b.*tmsgoutput_charset() is deprecatedset_output_charsetset_output_charset() is deprecatedallowed25000721580x950412deLE_MAGIC37257227730xde120495BE_MAGIC%s%sCONTEXTVERSIONS_get_versionsReturns a tuple of major version, minor versionOverride this method to support alternative .mo formats._catalogcatalogbuflen4I>IIBad magic numbermajor_versionminor_versionBad version number mlenmoffmendtlentofftendFile is corruptlastkb_item#-#-#-#-#content-typecharset=plural-formsplural=ctxt_msg_idlocaledirlanguagesenvarLANGUAGELANGnelangsnelang%s.momofile_translationsunspecified_unspecifiedmofilesNo translation file found for domainparameter codeset is deprecated.*\bset_output_charset\b.*_localedirs_localecodesetsmessages_current_domainbind_textdomain_codeset() is deprecatedldgettext() is deprecated, use dgettext() instead.*\bparameter codeset\b.*ldngettext() is deprecated, use dngettext() instead.*\bldgettext\b.*.*\bldngettext\b.*# This module represents the integration of work, contributions, feedback, and# suggestions from the following people:# Martin von Loewis, who wrote the initial implementation of the underlying# C-based libintlmodule (later renamed _gettext), along with a skeletal# gettext.py implementation.# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule,# which also included a pure-Python implementation to read .mo files if# intlmodule wasn't available.# James Henstridge, who also wrote a gettext.py module, which has some# interesting, but currently unsupported experimental features: the notion of# a Catalog class and instances, and the ability to add to a catalog file via# a Python API.# Barry Warsaw integrated these modules, wrote the .install() API and code,# and conformed all C and Python code to Python's coding standards.# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this# J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs.# TODO:# - Lazy loading of .mo files. Currently the entire catalog is loaded into# memory, but that's probably bad for large translated programs. Instead,# the lexical sort of original strings in GNU .mo files should be exploited# to do binary searches and lazy initializations. Or you might want to use# the undocumented double-hash algorithm for .mo files with hash tables, but# you'll need to study the GNU gettext code to do this.# - Support Solaris .mo file formats. Unfortunately, we've been unable to# find this format documented anywhere.# Expression parsing for plural form selection.# The gettext library supports a small subset of C syntax. The only# incompatible difference is that integer literals starting with zero are# decimal.# https://www.gnu.org/software/gettext/manual/gettext.html#Plural-forms# http://git.savannah.gnu.org/cgit/gettext.git/tree/gettext-runtime/intl/plural.y# Break chained comparisons# '==', '!=', '<', '>', '<=', '>='# Replace some C operators by their Python equivalents# '<', '>', '<=', '>='# Python compiler limit is about 90.# The most complex example has 2.# Recursion error can be raised in _parse() or exec().# split up the locale into its base components# if all components for this combo exist ...# Magic number of .mo files# The encoding of a msgctxt and a msgid in a .mo file is# msgctxt + "\x04" + msgid (gettext version >= 0.15)# Acceptable .mo versions# Delay struct import for speeding up gettext import when .mo files# are not used.# Parse the .mo file header, which consists of 5 little endian 32# bit words.# germanic plural by default# Are we big endian or little endian?# Now put all messages from the .mo file buffer into the catalog# dictionary.# See if we're looking at GNU .mo conventions for metadata# Catalog description# Skip over comment lines:# Note: we unconditionally convert both msgids and msgstrs to# Unicode using the character encoding specified in the charset# parameter of the Content-Type header. The gettext documentation# strongly encourages msgids to be us-ascii, but some applications# require alternative encodings (e.g. Zope's ZCML and ZPT). For# traditional gettext applications, the msgid conversion will# cause no problems since us-ascii should always be a subset of# the charset encoding. We may want to fall back to 8-bit msgids# if the Unicode conversion fails.# Plural forms# advance to next entry in the seek tables# Locate a .mo file using the gettext strategy# Get some reasonable defaults for arguments that were not supplied# now normalize and expand the languages# select a language# a mapping between absolute .mo file path and Translation object# Avoid opening, reading, and parsing the .mo file after it's been done# once.# Copy the translation object to allow setting fallbacks and# output charset. All other instance data is shared with the# cached object.# Delay copy import for speeding up gettext import when .mo files# a mapping b/w domains and locale directories# a mapping b/w domains and codesets# current global domain, `messages' used for compatibility w/ GNU gettext# dcgettext() has been deemed unnecessary and is not implemented.# James Henstridge's Catalog constructor from GNOME gettext. Documented usage# was:# import gettext# cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR)# _ = cat.gettext# print _('Hello World')# The resulting catalog object currently don't support access through a# dictionary API, which was supported (but apparently unused) in GNOME# gettext.b'Internationalization and localization support. + +This module provides internationalization (I18N) and localization (L10N) +support for your Python programs by providing an interface to the GNU gettext +message catalog library. + +I18N refers to the operation by which a program is made aware of multiple +languages. L10N refers to the adaptation of your program, once +internationalized, to the local language and cultural habits. + +'u'Internationalization and localization support. + +This module provides internationalization (I18N) and localization (L10N) +support for your Python programs by providing an interface to the GNU gettext +message catalog library. + +I18N refers to the operation by which a program is made aware of multiple +languages. L10N refers to the adaptation of your program, once +internationalized, to the local language and cultural habits. + +'b'NullTranslations'u'NullTranslations'b'GNUTranslations'u'GNUTranslations'b'Catalog'u'Catalog'b'translation'u'translation'b'install'u'install'b'textdomain'u'textdomain'b'bindtextdomain'u'bindtextdomain'b'bind_textdomain_codeset'u'bind_textdomain_codeset'b'dgettext'u'dgettext'b'dngettext'u'dngettext'b'gettext'u'gettext'b'lgettext'u'lgettext'b'ldgettext'u'ldgettext'b'ldngettext'u'ldngettext'b'lngettext'u'lngettext'b'ngettext'u'ngettext'b'pgettext'u'pgettext'b'dpgettext'u'dpgettext'b'npgettext'u'npgettext'b'dnpgettext'u'dnpgettext'b'share'u'share'b'locale'b' + (?P[ \t]+) | # spaces and horizontal tabs + (?P[0-9]+\b) | # decimal integer + (?Pn\b) | # only n is allowed + (?P[()]) | + (?P[-*/%+?:]|[>, + # <=, >=, ==, !=, &&, ||, + # ? : + # unary and bitwise ops + # not allowed + (?P\w+|.) # invalid token + 'u' + (?P[ \t]+) | # spaces and horizontal tabs + (?P[0-9]+\b) | # decimal integer + (?Pn\b) | # only n is allowed + (?P[()]) | + (?P[-*/%+?:]|[>, + # <=, >=, ==, !=, &&, ||, + # ? : + # unary and bitwise ops + # not allowed + (?P\w+|.) # invalid token + 'b'WHITESPACES'u'WHITESPACES'b'INVALID'u'INVALID'b'invalid token in plural form: %s'u'invalid token in plural form: %s'b'unexpected token in plural form: %s'u'unexpected token in plural form: %s'b'unexpected end of plural form'u'unexpected end of plural form'b'||'u'||'b'&&'u'&&'u'=='b'!='u'!='b'<='u'<='b'>='u'>='b'not 'u'not 'b'unbalanced parenthesis in plural form'u'unbalanced parenthesis in plural form'b'%s%s'u'%s%s'b'%s%d'u'%s%d'b'(%s)'u'(%s)'b'%s if %s else %s'u'%s if %s else %s'b'Plural value must be an integer, got %s'u'Plural value must be an integer, got %s'b'Gets a C expression as used in PO files for plural forms and returns a + Python function that implements an equivalent expression. + 'u'Gets a C expression as used in PO files for plural forms and returns a + Python function that implements an equivalent expression. + 'b'plural form expression is too long'u'plural form expression is too long'b'plural form expression is too complex'u'plural form expression is too complex'b'_as_int'u'_as_int'b'if True: + def func(n): + if not isinstance(n, int): + n = _as_int(n) + return int(%s) + 'u'if True: + def func(n): + if not isinstance(n, int): + n = _as_int(n) + return int(%s) + 'b'lgettext() is deprecated, use gettext() instead'u'lgettext() is deprecated, use gettext() instead'b'.*\blgettext\b.*'u'.*\blgettext\b.*'b'lngettext() is deprecated, use ngettext() instead'u'lngettext() is deprecated, use ngettext() instead'b'.*\blngettext\b.*'u'.*\blngettext\b.*'b'output_charset() is deprecated'u'output_charset() is deprecated'b'set_output_charset() is deprecated'u'set_output_charset() is deprecated'b'%s%s'u'%s%s'b'Returns a tuple of major version, minor version'u'Returns a tuple of major version, minor version'b'Override this method to support alternative .mo formats.'u'Override this method to support alternative .mo formats.'b'4I'u'>4I'b'>II'u'>II'b'Bad magic number'u'Bad magic number'b'Bad version number 'u'Bad version number 'b'File is corrupt'u'File is corrupt'b'#-#-#-#-#'u'#-#-#-#-#'b'content-type'u'content-type'b'charset='u'charset='b'plural-forms'u'plural-forms'b'plural='u'plural='b'LANGUAGE'u'LANGUAGE'b'LC_ALL'u'LC_ALL'b'LC_MESSAGES'u'LC_MESSAGES'b'LANG'u'LANG'b'C'u'C'b'%s.mo'u'%s.mo'b'unspecified'u'unspecified'b'No translation file found for domain'u'No translation file found for domain'b'parameter codeset is deprecated'u'parameter codeset is deprecated'b'.*\bset_output_charset\b.*'u'.*\bset_output_charset\b.*'b'messages'u'messages'b'bind_textdomain_codeset() is deprecated'u'bind_textdomain_codeset() is deprecated'b'ldgettext() is deprecated, use dgettext() instead'u'ldgettext() is deprecated, use dgettext() instead'b'.*\bparameter codeset\b.*'u'.*\bparameter codeset\b.*'b'ldngettext() is deprecated, use dngettext() instead'u'ldngettext() is deprecated, use dngettext() instead'b'.*\bldgettext\b.*'u'.*\bldgettext\b.*'b'.*\bldngettext\b.*'u'.*\bldngettext\b.*'Filename globbing utility.iglobReturn a list of paths matching a pathname pattern. + + The pattern may contain simple shell-style wildcards a la + fnmatch. However, unlike fnmatch, filenames starting with a + dot are special cases that are not matched by '*' and '?' + patterns. + + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. + Return an iterator which yields the paths matching a pathname pattern. + + The pattern may contain simple shell-style wildcards a la + fnmatch. However, unlike fnmatch, filenames starting with a + dot are special cases that are not matched by '*' and '?' + patterns. + + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. + glob.glob_iglob_isrecursivedironlyhas_magic_glob2_glob1glob_in_dir_glob0_iterdir_ishiddenglob0glob1_rlistdirscandiris_dir([*?[])magic_checkmagic_check_bytes**Escape all special characters. + [\1]# skip empty string# Patterns ending with a slash should match only directories# `os.path.split()` returns the argument itself as a dirname if it is a# drive or UNC path. Prevent an infinite recursion if a drive or UNC path# contains magic characters (i.e. r'\\?\C:').# These 2 helper functions non-recursively glob inside a literal directory.# They return a list of basenames. _glob1 accepts a pattern while _glob0# takes a literal basename (so it only has to check for its existence).# `os.path.split()` returns an empty basename for paths ending with a# directory separator. 'q*x/' should match only directories.# Following functions are not public but can be used by third-party code.# This helper function recursively yields relative pathnames inside a literal# directory.# If dironly is false, yields all file names inside a directory.# If dironly is true, yields only directory names.# Recursively yields relative pathnames inside a literal directory.# Escaping is done by wrapping any of "*?[" between square brackets.# Metacharacters do not work in the drive part and shouldn't be escaped.b'Filename globbing utility.'u'Filename globbing utility.'b'glob'u'glob'b'iglob'u'iglob'b'Return a list of paths matching a pathname pattern. + + The pattern may contain simple shell-style wildcards a la + fnmatch. However, unlike fnmatch, filenames starting with a + dot are special cases that are not matched by '*' and '?' + patterns. + + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. + 'u'Return a list of paths matching a pathname pattern. + + The pattern may contain simple shell-style wildcards a la + fnmatch. However, unlike fnmatch, filenames starting with a + dot are special cases that are not matched by '*' and '?' + patterns. + + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. + 'b'Return an iterator which yields the paths matching a pathname pattern. + + The pattern may contain simple shell-style wildcards a la + fnmatch. However, unlike fnmatch, filenames starting with a + dot are special cases that are not matched by '*' and '?' + patterns. + + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. + 'u'Return an iterator which yields the paths matching a pathname pattern. + + The pattern may contain simple shell-style wildcards a la + fnmatch. However, unlike fnmatch, filenames starting with a + dot are special cases that are not matched by '*' and '?' + patterns. + + If recursive is true, the pattern '**' will match any files and + zero or more directories and subdirectories. + 'b'glob.glob'u'glob.glob'b'([*?[])'u'([*?[])'b'**'u'**'b'Escape all special characters. + 'u'Escape all special characters. + 'b'[\1]'u'[\1]'This module defines the data structures used to represent a grammar. + +These are a bit arcane because they are derived from the data +structures used by Python's 'pgen' parser generator. + +There's also a table here mapping operators to their names in the +token module; the Python tokenize module reports all operators as the +fallback token code OP, but the parser needs the actual token code. + +Pgen parsing tables conversion class. + + Once initialized, this class supplies the grammar tables for the + parsing engine implemented by parse.py. The parsing engine + accesses the instance variables directly. The class here does not + provide initialization of the tables; several subclasses exist to + do this (see the conv and pgen modules). + + The load() method reads the tables from a pickle file, which is + much faster than the other ways offered by subclasses. The pickle + file is written by calling dump() (after loading the grammar + tables using a subclass). The report() method prints a readable + representation of the tables to stdout, for debugging. + + The instance variables are as follows: + + symbol2number -- a dict mapping symbol names to numbers. Symbol + numbers are always 256 or higher, to distinguish + them from token numbers, which are between 0 and + 255 (inclusive). + + number2symbol -- a dict mapping numbers to symbol names; + these two are each other's inverse. + + states -- a list of DFAs, where each DFA is a list of + states, each state is a list of arcs, and each + arc is a (i, j) pair where i is a label and j is + a state number. The DFA number is the index into + this list. (This name is slightly confusing.) + Final states are represented by a special arc of + the form (0, j) where j is its own state number. + + dfas -- a dict mapping symbol numbers to (DFA, first) + pairs, where DFA is an item from the states list + above, and first is a set of tokens that can + begin this grammar rule (represented by a dict + whose values are always 1). + + labels -- a list of (x, y) pairs where x is either a token + number or a symbol number, and y is either None + or a string; the strings are keywords. The label + number is the index in this list; label numbers + are used to mark state transitions (arcs) in the + DFAs. + + start -- the number of the grammar's start symbol. + + keywords -- a dict mapping keyword strings to arc labels. + + tokens -- a dict mapping token numbers to arc labels. + + symbol2numbernumber2symbolstatesdfasEMPTYsymbol2labelDump the grammar tables to a pickle file.HIGHEST_PROTOCOLLoad the grammar tables from a pickle file.pklLoad the grammar tables from a pickle bytes object. + Copy the grammar. + dict_attrDump the grammar tables to standard output, for debugging.s2nn2s +( LPAR +) RPAR +[ LSQB +] RSQB +: COLON +, COMMA +; SEMI ++ PLUS +- MINUS +* STAR +/ SLASH +| VBAR +& AMPER +< LESS +> GREATER += EQUAL +. DOT +% PERCENT +` BACKQUOTE +{ LBRACE +} RBRACE +@ AT +@= ATEQUAL +== EQEQUAL +!= NOTEQUAL +<> NOTEQUAL +<= LESSEQUAL +>= GREATEREQUAL +~ TILDE +^ CIRCUMFLEX +<< LEFTSHIFT +>> RIGHTSHIFT +** DOUBLESTAR ++= PLUSEQUAL +-= MINEQUAL +*= STAREQUAL +/= SLASHEQUAL +%= PERCENTEQUAL +&= AMPEREQUAL +|= VBAREQUAL +^= CIRCUMFLEXEQUAL +<<= LEFTSHIFTEQUAL +>>= RIGHTSHIFTEQUAL +**= DOUBLESTAREQUAL +// DOUBLESLASH +//= DOUBLESLASHEQUAL +-> RARROW +:= COLONEQUAL +opmap_raw# Map from operator to number (since tokenize doesn't do this)b'This module defines the data structures used to represent a grammar. + +These are a bit arcane because they are derived from the data +structures used by Python's 'pgen' parser generator. + +There's also a table here mapping operators to their names in the +token module; the Python tokenize module reports all operators as the +fallback token code OP, but the parser needs the actual token code. + +'u'This module defines the data structures used to represent a grammar. + +These are a bit arcane because they are derived from the data +structures used by Python's 'pgen' parser generator. + +There's also a table here mapping operators to their names in the +token module; the Python tokenize module reports all operators as the +fallback token code OP, but the parser needs the actual token code. + +'b'Pgen parsing tables conversion class. + + Once initialized, this class supplies the grammar tables for the + parsing engine implemented by parse.py. The parsing engine + accesses the instance variables directly. The class here does not + provide initialization of the tables; several subclasses exist to + do this (see the conv and pgen modules). + + The load() method reads the tables from a pickle file, which is + much faster than the other ways offered by subclasses. The pickle + file is written by calling dump() (after loading the grammar + tables using a subclass). The report() method prints a readable + representation of the tables to stdout, for debugging. + + The instance variables are as follows: + + symbol2number -- a dict mapping symbol names to numbers. Symbol + numbers are always 256 or higher, to distinguish + them from token numbers, which are between 0 and + 255 (inclusive). + + number2symbol -- a dict mapping numbers to symbol names; + these two are each other's inverse. + + states -- a list of DFAs, where each DFA is a list of + states, each state is a list of arcs, and each + arc is a (i, j) pair where i is a label and j is + a state number. The DFA number is the index into + this list. (This name is slightly confusing.) + Final states are represented by a special arc of + the form (0, j) where j is its own state number. + + dfas -- a dict mapping symbol numbers to (DFA, first) + pairs, where DFA is an item from the states list + above, and first is a set of tokens that can + begin this grammar rule (represented by a dict + whose values are always 1). + + labels -- a list of (x, y) pairs where x is either a token + number or a symbol number, and y is either None + or a string; the strings are keywords. The label + number is the index in this list; label numbers + are used to mark state transitions (arcs) in the + DFAs. + + start -- the number of the grammar's start symbol. + + keywords -- a dict mapping keyword strings to arc labels. + + tokens -- a dict mapping token numbers to arc labels. + + 'u'Pgen parsing tables conversion class. + + Once initialized, this class supplies the grammar tables for the + parsing engine implemented by parse.py. The parsing engine + accesses the instance variables directly. The class here does not + provide initialization of the tables; several subclasses exist to + do this (see the conv and pgen modules). + + The load() method reads the tables from a pickle file, which is + much faster than the other ways offered by subclasses. The pickle + file is written by calling dump() (after loading the grammar + tables using a subclass). The report() method prints a readable + representation of the tables to stdout, for debugging. + + The instance variables are as follows: + + symbol2number -- a dict mapping symbol names to numbers. Symbol + numbers are always 256 or higher, to distinguish + them from token numbers, which are between 0 and + 255 (inclusive). + + number2symbol -- a dict mapping numbers to symbol names; + these two are each other's inverse. + + states -- a list of DFAs, where each DFA is a list of + states, each state is a list of arcs, and each + arc is a (i, j) pair where i is a label and j is + a state number. The DFA number is the index into + this list. (This name is slightly confusing.) + Final states are represented by a special arc of + the form (0, j) where j is its own state number. + + dfas -- a dict mapping symbol numbers to (DFA, first) + pairs, where DFA is an item from the states list + above, and first is a set of tokens that can + begin this grammar rule (represented by a dict + whose values are always 1). + + labels -- a list of (x, y) pairs where x is either a token + number or a symbol number, and y is either None + or a string; the strings are keywords. The label + number is the index in this list; label numbers + are used to mark state transitions (arcs) in the + DFAs. + + start -- the number of the grammar's start symbol. + + keywords -- a dict mapping keyword strings to arc labels. + + tokens -- a dict mapping token numbers to arc labels. + + 'b'EMPTY'u'EMPTY'b'Dump the grammar tables to a pickle file.'u'Dump the grammar tables to a pickle file.'b'Load the grammar tables from a pickle file.'u'Load the grammar tables from a pickle file.'b'Load the grammar tables from a pickle bytes object.'u'Load the grammar tables from a pickle bytes object.'b' + Copy the grammar. + 'u' + Copy the grammar. + 'b'symbol2number'u'symbol2number'b'number2symbol'u'number2symbol'b'dfas'u'dfas'b'tokens'u'tokens'b'symbol2label'u'symbol2label'b'Dump the grammar tables to standard output, for debugging.'u'Dump the grammar tables to standard output, for debugging.'b's2n'u's2n'b'n2s'u'n2s'b'states'u'states'b'labels'u'labels'b' +( LPAR +) RPAR +[ LSQB +] RSQB +: COLON +, COMMA +; SEMI ++ PLUS +- MINUS +* STAR +/ SLASH +| VBAR +& AMPER +< LESS +> GREATER += EQUAL +. DOT +% PERCENT +` BACKQUOTE +{ LBRACE +} RBRACE +@ AT +@= ATEQUAL +== EQEQUAL +!= NOTEQUAL +<> NOTEQUAL +<= LESSEQUAL +>= GREATEREQUAL +~ TILDE +^ CIRCUMFLEX +<< LEFTSHIFT +>> RIGHTSHIFT +** DOUBLESTAR ++= PLUSEQUAL +-= MINEQUAL +*= STAREQUAL +/= SLASHEQUAL +%= PERCENTEQUAL +&= AMPEREQUAL +|= VBAREQUAL +^= CIRCUMFLEXEQUAL +<<= LEFTSHIFTEQUAL +>>= RIGHTSHIFTEQUAL +**= DOUBLESTAREQUAL +// DOUBLESLASH +//= DOUBLESLASHEQUAL +-> RARROW +:= COLONEQUAL +'u' +( LPAR +) RPAR +[ LSQB +] RSQB +: COLON +, COMMA +; SEMI ++ PLUS +- MINUS +* STAR +/ SLASH +| VBAR +& AMPER +< LESS +> GREATER += EQUAL +. DOT +% PERCENT +` BACKQUOTE +{ LBRACE +} RBRACE +@ AT +@= ATEQUAL +== EQEQUAL +!= NOTEQUAL +<> NOTEQUAL +<= LESSEQUAL +>= GREATEREQUAL +~ TILDE +^ CIRCUMFLEX +<< LEFTSHIFT +>> RIGHTSHIFT +** DOUBLESTAR ++= PLUSEQUAL +-= MINEQUAL +*= STAREQUAL +/= SLASHEQUAL +%= PERCENTEQUAL +&= AMPEREQUAL +|= VBAREQUAL +^= CIRCUMFLEXEQUAL +<<= LEFTSHIFTEQUAL +>>= RIGHTSHIFTEQUAL +**= DOUBLESTAREQUAL +// DOUBLESLASH +//= DOUBLESLASHEQUAL +-> RARROW +:= COLONEQUAL +'u'lib2to3.pgen2.grammar'u'pgen2.grammar'u'grammar'u'Access to the Unix group database. + +Group entries are reported as 4-tuples containing the following fields +from the group database, in order: + + gr_name - name of the group + gr_passwd - group password (encrypted); often empty + gr_gid - numeric ID of the group + gr_mem - list of members + +The gid is an integer, name and password are strings. (Note that most +users are not explicitly listed as members of the groups they are in +according to the password database. Check both databases to get +complete membership information.)'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/grp.cpython-38-darwin.so'u'grp'getgrallgetgrgidgetgrnamu'grp.struct_group: Results from getgr*() routines. + +This object may be accessed either as a tuple of + (gr_name,gr_passwd,gr_gid,gr_mem) +or via the object attributes as named in the above tuple. +'gr_gidgr_memgr_namegr_passwdgrp.struct_groupstruct_groupgrpFunctions that read and write gzipped files. + +The user of the file doesn't have to worry about the compression, +but random access is not allowed.BadGzipFileFTEXTFHCRCFEXTRAFNAMEFCOMMENTREADWRITE_COMPRESS_LEVEL_FAST_COMPRESS_LEVEL_TRADEOFF_COMPRESS_LEVEL_BESTOpen a gzip-compressed file in binary or text mode. + + The filename argument can be an actual filename (a str or bytes object), or + an existing file object to read from or write to. + + The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or "ab" for + binary mode, or "rt", "wt", "xt" or "at" for text mode. The default mode is + "rb", and the default compresslevel is 9. + + For binary mode, this function is equivalent to the GzipFile constructor: + GzipFile(filename, mode, compresslevel). In this case, the encoding, errors + and newline arguments must not be provided. + + For text mode, a GzipFile object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error handling + behavior, and line ending(s). + + gz_modefilename must be a str or bytes object, or a filewrite32u= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + + Do a rollover, as described in __init__(). + %s.%dsfndfn.1 + Determine if rollover should occur. + + Basically, see if the supplied record would cause the file to exceed + the size limit we have. + TimedRotatingFileHandler + Handler for logging to a file, rotating the log file at certain timed + intervals. + + If backupCount is > 0, when rollover is done, no more than backupCount + files are kept - the oldest ones are deleted. + atTime%Y-%m-%d_%H-%M-%S^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$extMatch%Y-%m-%d_%H-%M^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$%Y-%m-%d_%H^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$MIDNIGHT%Y-%m-%d^\d{4}-\d{2}-\d{2}(\.\w+)?$You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %sInvalid day specified for weekly rollover: %sdayOfWeekInvalid rollover interval specified: %scomputeRolloverrolloverAt + Work out the rollover time based on the specified time. + currentHourcurrentMinutecurrentSecondcurrentDayrotate_tsdaysToWaitnewRolloverAtdstNowdstAtRolloveraddend + Determine if rollover should occur. + + record is not used, as we are just comparing times, but it is needed so + the method signatures are the same + getFilesToDelete + Determine the files to delete when rolling over. + + More specific than the earlier method, which just used glob.glob(). + dirNamefileNamesplen + do a rollover; in this case, a date/time stamp is appended to the filename + when the rollover happens. However, you want the file to be named for the + start of the interval, not the current time. If there is a backup count, + then we have to get a list of matching filenames, sort them and remove + the one with the oldest suffix. + timeTupledstThenWatchedFileHandler + A handler for logging to a file, which watches the file + to see if it has changed while in use. This can happen because of + usage of programs such as newsyslog and logrotate which perform + log file rotation. This handler, intended for use under Unix, + watches the file to see if it has changed since the last emit. + (A file has changed if its device or inode have changed.) + If it has changed, the old file stream is closed, and the file + opened to get a new stream. + + This handler is not appropriate for use under Windows, because + under Windows open files cannot be moved or renamed - logging + opens the files with exclusive locks - and so there is no need + for such a handler. Furthermore, ST_INO is not supported under + Windows; stat always returns zero for this value. + + This handler is based on a suggestion and patch by Chad J. + Schroeder. + devino_statstreamsresreopenIfNeeded + Reopen log file if needed. + + Checks if the underlying file has changed, and if it + has, close the old stream and reopen the file to get the + current stream. + + Emit a record. + + If underlying file has changed, reopen the file before emitting the + record to it. + SocketHandler + A handler class which writes logging records, in pickle format, to + a streaming socket. The socket is kept open across logging calls. + If the peer resets it, an attempt is made to reconnect on the next call. + The pickle which is sent is that of the LogRecord's attribute dictionary + (__dict__), so that the receiver does not need to have the logging module + installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + + Initializes the handler with a specific host address and port. + + When the attribute *closeOnError* is set to True - if a socket error + occurs, the socket is silently closed and then reopened on the next + logging call. + closeOnErrorretryTimeretryStartretryMaxretryFactormakeSocket + A factory method which allows subclasses to define the precise + type of socket they want. + createSocket + Try to create a socket, using an exponential backoff with + a max retry time. Thanks to Robert Olson for the original patch + (SF #815911) which has been slightly refactored. + attemptretryPeriod + Send a pickled string to the socket. + + This function allows for partial sends which can happen when the + network is busy. + makePickle + Pickles the record in binary format with a length prefix, and + returns it ready for transmission across the socket. + dummy>Lslen + Handle an error during logging. + + An error has occurred during logging. Most likely cause - + connection lost. Close the socket so that we can retry on the + next event. + + Emit a record. + + Pickles the record and writes it to the socket in binary format. + If there is an error with the socket, silently drop the packet. + If there was a problem with the socket, re-establishes the + socket. + + Closes the socket. + DatagramHandler + A handler class which writes logging records, in pickle format, to + a datagram socket. The pickle which is sent is that of the LogRecord's + attribute dictionary (__dict__), so that the receiver does not need to + have the logging module installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + + + Initializes the handler with a specific host address and port. + + The factory method of SocketHandler is here overridden to create + a UDP socket (SOCK_DGRAM). + + Send a pickled string to a socket. + + This function no longer allows for partial sends which can happen + when the network is busy - UDP does not guarantee delivery and + can deliver packets out of sequence. + SysLogHandler + A handler class which sends formatted logging records to a syslog + server. Based on Sam Rushing's syslog module: + http://www.nightmare.com/squirl/python-ext/misc/syslog.py + Contributed by Nicolas Untz (after which minor refactoring changes + have been made). + LOG_EMERGLOG_ALERTLOG_CRITLOG_ERRLOG_WARNINGLOG_NOTICELOG_INFOLOG_DEBUGLOG_KERNLOG_USERLOG_MAILLOG_DAEMONLOG_AUTHLOG_SYSLOGLOG_LPRLOG_NEWSLOG_UUCPLOG_CRONLOG_AUTHPRIVLOG_FTPLOG_LOCAL0LOG_LOCAL1LOG_LOCAL2LOG_LOCAL3LOG_LOCAL4LOG_LOCAL5LOG_LOCAL6LOG_LOCAL7alertcritemergnoticepanicpriority_namesauthprivcrondaemonkernlprmailnewssecuritysysloguucplocal0local1local2local3local4local5local6local7facility_namespriority_mapfacility + Initialize a handler. + + If address is specified as a string, a UNIX socket is used. To log to a + local syslogd, "SysLogHandler(address="/dev/log")" can be used. + If facility is not specified, LOG_USER is used. If socktype is + specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific + socket type will be used. For Unix sockets, you can also specify a + socktype of None, in which case socket.SOCK_DGRAM will be used, falling + back to socket.SOCK_STREAM. + unixsocket_connect_unixsocketressgetaddrinfo returns an empty listuse_socktypeencodePriority + Encode the facility and priority. You can pass in strings or + integers - if strings are passed, the facility_names and + priority_names mapping dictionaries are used to convert them to + integers. + mapPriority + Map a logging level name to a key in the priority_names map. + This is useful in two scenarios: when custom levels are being + used, and in the case where you can't do a straightforward + mapping by lowercasing the logging level name because of locale- + specific issues (see SF #1524081). + identappend_nul + Emit a record. + + The record is formatted, and then sent to the syslog server. If + exception information is present, it is NOT sent to the server. + <%d>prioSMTPHandler + A handler class which sends an SMTP email for each logging event. + 5.0mailhostfromaddrtoaddrssubjectcredentials + Initialize the handler. + + Initialize the instance with the from and to addresses and subject + line of the email. To specify a non-standard SMTP port, use the + (host, port) tuple format for the mailhost argument. To specify + authentication credentials, supply a (username, password) tuple + for the credentials argument. To specify the use of a secure + protocol (TLS), pass in a tuple for the secure argument. This will + only be used when authentication credentials are supplied. The tuple + will be either an empty tuple, or a single-value tuple with the name + of a keyfile, or a 2-value tuple with the names of the keyfile and + certificate file. (This tuple is passed to the `starttls` method). + A timeout in seconds can be specified for the SMTP connection (the + default is one second). + mailportusernamegetSubject + Determine the subject for the email. + + If you want to specify a subject line which is record-dependent, + override this method. + + Emit a record. + + Format the record and send it to the specified addressees. + smtplibEmailMessageSMTP_PORTSMTPsmtpFromToSubjectDateset_contentehlostarttlssend_messageNTEventLogHandler + A handler class which sends events to the NT Event Log. Adds a + registry entry for the specified application name. If no dllname is + provided, win32service.pyd (which contains some basic message + placeholders) is used. Note that use of these placeholders will make + your event logs big, as the entire message source is held in the log. + If you want slimmer logs, you have to pass in the name of your own DLL + which contains the message definitions you want to use in the event log. + Applicationappnamedllnamelogtypewin32evtlogutilwin32evtlog_weluwin32service.pydAddSourceToRegistryEVENTLOG_ERROR_TYPEdeftypeEVENTLOG_INFORMATION_TYPEEVENTLOG_WARNING_TYPEtypemapThe Python Win32 extensions for NT (service, event logging) appear not to be available."The Python Win32 extensions for NT (service, event ""logging) appear not to be available."getMessageID + Return the message ID for the event record. If you are using your + own messages, you could do this by having the msg passed to the + logger being an ID rather than a formatting string. Then, in here, + you could use a dictionary lookup to get the message ID. This + version returns 1, which is the base message ID in win32service.pyd. + getEventCategory + Return the event category for the record. + + Override this if you want to specify your own categories. This version + returns 0. + getEventType + Return the event type for the record. + + Override this if you want to specify your own types. This version does + a mapping using the handler's typemap attribute, which is set up in + __init__() to a dictionary which contains mappings for DEBUG, INFO, + WARNING, ERROR and CRITICAL. If you are using your own levels you will + either need to override this method or place a suitable dictionary in + the handler's typemap attribute. + + Emit a record. + + Determine the message ID, event category and event type. Then + log the message in the NT event log. + ReportEvent + Clean up this handler. + + You can remove the application name from the registry as a + source of event log entries. However, if you do this, you will + not be able to see the events as you intended in the Event Log + Viewer - it needs to be able to access the registry to get the + DLL name. + HTTPHandler + A class which sends records to a Web server, using either GET or + POST semantics. + GET + Initialize the instance with the host, the request URL, and the method + ("GET" or "POST") + method must be GET or POSTcontext parameter only makes sense with secure=True"context parameter only makes sense ""with secure=True"mapLogRecord + Default implementation of mapping the log record into a dict + that is sent as the CGI data. Overwrite in your class. + Contributed by Franz Glasner. + + Emit a record. + + Send the record to the Web server as a percent-encoded dictionary + %c%sContent-typeapplication/x-www-form-urlencodedContent-length + A handler class which buffers logging records in memory. Whenever each + record is added to the buffer, a check is made to see if the buffer should + be flushed. If it should, then flush() is expected to do what's needed. + capacity + Initialize the handler with the buffer size. + + Should the handler flush its buffer? + + Returns true if the buffer is up to capacity. This method can be + overridden to implement custom flushing strategies. + + Emit a record. + + Append the record. If shouldFlush() tells us to, call flush() to process + the buffer. + + Override to implement custom flushing behaviour. + + This version just zaps the buffer to empty. + + Close the handler. + + This version just flushes and chains to the parent class' close(). + MemoryHandler + A handler class which buffers logging records in memory, periodically + flushing them to a target handler. Flushing occurs whenever the buffer + is full, or when an event of a certain severity or greater is seen. + flushLevelflushOnClose + Initialize the handler with the buffer size, the level at which + flushing should occur and an optional target. + + Note that without a target being set either here or via setTarget(), + a MemoryHandler is no use to anyone! + + The ``flushOnClose`` argument is ``True`` for backward compatibility + reasons - the old behaviour is that when the handler is closed, the + buffer is flushed, even if the flush level hasn't been exceeded nor the + capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. + + Check for buffer full or a record at the flushLevel or higher. + setTarget + Set the target handler for this handler. + + For a MemoryHandler, flushing means just sending the buffered + records to the target, if there is one. Override if you want + different behaviour. + + The record buffer is also cleared by this operation. + + Flush, if appropriately configured, set the target to None and lose the + buffer. + QueueHandler + This handler sends events to a queue. Typically, it would be used together + with a multiprocessing Queue to centralise logging to file in one process + (in a multi-process application), so as to avoid file write contention + between processes. + + This code is new in Python 3.2, but this class can be copy pasted into + user code for use with earlier Python versions. + + Initialise an instance, using the passed queue. + enqueue + Enqueue a record. + + The base implementation uses put_nowait. You may want to override + this method if you want to use blocking, timeouts or custom queue + implementations. + prepare + Prepares a record for queuing. The object returned by this method is + enqueued. + + The base implementation formats the record to merge the message + and arguments, and removes unpickleable items from the record + in-place. + + You might want to override this method if you want to convert + the record to a dict or JSON string, or send a modified copy + of the record while leaving the original intact. + + Emit a record. + + Writes the LogRecord to the queue, preparing it for pickling first. + QueueListener + This class implements an internal threaded listener which watches for + LogRecords being added to a queue, removes them and passes them to a + list of handlers for processing. + respect_handler_level + Initialise an instance with the specified queue and + handlers. + dequeueblock + Dequeue a record and return it, optionally blocking. + + The base implementation uses get. You may want to override this method + if you want to use timeouts or work with custom queue implementations. + + Start the listener. + + This starts up a background thread to monitor the queue for + LogRecords to process. + Thread_monitor + Prepare a record for handling. + + This method just returns the passed-in record. You may want to + override this method if you need to do any custom marshalling or + manipulation of the record before passing it to the handlers. + + Handle a record. + + This just loops through the handlers offering them the record + to handle. + + Monitor the queue for records, and ask the handler + to deal with them. + + This method runs on a separate, internal thread. + The thread will terminate if it sees a sentinel object in the queue. + has_task_doneenqueue_sentinel + This is used to enqueue the sentinel record. + + The base implementation uses put_nowait. You may want to override this + method if you want to use timeouts or work with custom queue + implementations. + + Stop the listener. + + This asks the thread to terminate, and then waits for it to do so. + Note that if you don't call this before your application exits, there + may be some records still left on the queue, which won't be processed. + # Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.# Some constants...# number of seconds in a day# Issue 18940: A file may not have been created if delay is True.# If rotation/rollover is wanted, it doesn't make sense to use another# mode. If for example 'w' were specified, then if there were multiple# runs of the calling application, the logs from previous runs would be# lost if the 'w' is respected, because the log file would be truncated# on each run.# delay was set...# are we rolling over?#due to non-posix-compliant Windows feature# Calculate the real rollover interval, which is just the number of# seconds between rollovers. Also set the filename suffix used when# a rollover occurs. Current 'when' events supported:# S - Seconds# M - Minutes# H - Hours# D - Days# midnight - roll over at midnight# W{0-6} - roll over on a certain day; 0 - Monday# Case of the 'when' specifier is not important; lower or upper case# will work.# one second# one minute# one hour# one day# one week# multiply by units requested# The following line added because the filename passed in could be a# path object (see Issue #27493), but self.baseFilename will be a string# If we are rolling over at midnight or weekly, then the interval is already known.# What we need to figure out is WHEN the next interval is. In other words,# if you are rolling over at midnight, then your base interval is 1 day,# but you want to start that one day clock at midnight, not now. So, we# have to fudge the rolloverAt value in order to trigger the first rollover# at the right time. After that, the regular interval will take care of# the rest. Note that this code doesn't care about leap seconds. :)# This could be done with less code, but I wanted it to be clear# r is the number of seconds left between now and the next rotation# Rotate time is before the current time (for example when# self.rotateAt is 13:45 and it now 14:15), rotation is# tomorrow.# If we are rolling over on a certain day, add in the number of days until# the next rollover, but offset by 1 since we just calculated the time# until the next day starts. There are three cases:# Case 1) The day to rollover is today; in this case, do nothing# Case 2) The day to rollover is further in the interval (i.e., today is# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to# next rollover is simply 6 - 2 - 1, or 3.# Case 3) The day to rollover is behind us in the interval (i.e., today# is day 5 (Saturday) and rollover is on day 3 (Thursday).# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the# number of days left in the current week (1) plus the number# of days in the next week until the rollover day (3).# The calculations described in 2) and 3) above need to have a day added.# This is because the above time calculation takes us to midnight on this# day, i.e. the start of the next day.# 0 is Monday# DST kicks in before next rollover, so we need to deduct an hour# DST bows out before next rollover, so we need to add an hour# get the time that this sequence started at and make it a TimeTuple#If DST changes and midnight or weekly rollover, adjust for this.# Reduce the chance of race conditions by stat'ing by path only# once and then fstat'ing our new fd if we opened a new log stream.# See issue #14632: Thanks to John Mulligan for the problem report# and patch.# stat the file by path, checking for existence# compare file system stat with that of our stream file handle# we have an open file handle, clean it up# See Issue #21742: _open () might fail.# open a new file handle and get new stat info from that fd# Exponential backoff parameters.# Issue 19182# Either retryTime is None, in which case this# is the first time back after a disconnect, or# we've waited long enough.# next time, no delay before trying#Creation failed, so set the retry time and return.#self.sock can be None either because we haven't reached the retry#time yet, or because we have reached the retry time and retried,#but are still unable to connect.# so we can call createSocket next time# just to get traceback text into record.exc_text ...# See issue #14436: If msg or args are objects, they may not be# available on the receiving end. So we convert the msg % args# to a string, save it as msg and zap the args.# Issue #25685: delete 'message' if present: redundant with 'msg'#try to reconnect next time# from :# ======================================================================# priorities/facilities are encoded into a single 32-bit quantity, where# the bottom 3 bits are the priority (0-7) and the top 28 bits are the# facility (0-big number). Both the priorities and the facilities map# roughly one-to-one to strings in the syslogd(8) source code. This# mapping is included in this file.# priorities (these are ordered)# system is unusable# action must be taken immediately# critical conditions# error conditions# warning conditions# normal but significant condition# informational# debug-level messages# facility codes# kernel messages# random user-level messages# mail system# system daemons# security/authorization messages# messages generated internally by syslogd# line printer subsystem# network news subsystem# UUCP subsystem# clock daemon# security/authorization messages (private)# FTP daemon# other codes through 15 reserved for system use# reserved for local use# DEPRECATED#The map below appears to be trivially lowercasing the key. However,#there's more to it than meets the eye - in some locales, lowercasing#gives unexpected results. See SF #1524081: in the Turkish locale,#"INFO".lower() != "info"# Syslog server may be unavailable during handler initialisation.# C's openlog() function also ignores connection errors.# Moreover, we ignore these errors while logging, so it not worse# to ignore it also here.# it worked, so set self.socktype to the used type# user didn't specify falling back, so fail# prepended to all messages# some old syslog daemons expect a NUL terminator# We need to convert record level to lowercase, maybe this will# change in the future.# Message is a string. Convert to bytes as required by RFC 5424#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)# support multiple hosts on one IP address...# need to strip optional :port from host, if present# See issue #30904: putrequest call above already adds this header# on Python 3.x.# h.putheader("Host", host)#can't do anything with the result# See Issue #26559 for why this has been added# The format operation gets traceback text into record.exc_text# (if there's exception data), and also returns the formatted# message. We can then use this to replace the original# msg + args, as these might be unpickleable. We also zap the# exc_info and exc_text attributes, as they are no longer# needed and, if not None, will typically not be pickleable.# bpo-35726: make copy of record to avoid affecting other handlers in the chain.b' +Additional handlers for the logging package for Python. The core package is +based on PEP 282 and comments thereto in comp.lang.python. + +Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging.handlers' and log away! +'u' +Additional handlers for the logging package for Python. The core package is +based on PEP 282 and comments thereto in comp.lang.python. + +Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging.handlers' and log away! +'b' + Base class for handlers that rotate log files at a certain point. + Not meant to be instantiated directly. Instead, use RotatingFileHandler + or TimedRotatingFileHandler. + 'u' + Base class for handlers that rotate log files at a certain point. + Not meant to be instantiated directly. Instead, use RotatingFileHandler + or TimedRotatingFileHandler. + 'b' + Use the specified filename for streamed logging + 'u' + Use the specified filename for streamed logging + 'b' + Emit a record. + + Output the record to the file, catering for rollover as described + in doRollover(). + 'u' + Emit a record. + + Output the record to the file, catering for rollover as described + in doRollover(). + 'b' + Modify the filename of a log file when rotating. + + This is provided so that a custom filename can be provided. + + The default implementation calls the 'namer' attribute of the + handler, if it's callable, passing the default name to + it. If the attribute isn't callable (the default is None), the name + is returned unchanged. + + :param default_name: The default name for the log file. + 'u' + Modify the filename of a log file when rotating. + + This is provided so that a custom filename can be provided. + + The default implementation calls the 'namer' attribute of the + handler, if it's callable, passing the default name to + it. If the attribute isn't callable (the default is None), the name + is returned unchanged. + + :param default_name: The default name for the log file. + 'b' + When rotating, rotate the current log. + + The default implementation calls the 'rotator' attribute of the + handler, if it's callable, passing the source and dest arguments to + it. If the attribute isn't callable (the default is None), the source + is simply renamed to the destination. + + :param source: The source filename. This is normally the base + filename, e.g. 'test.log' + :param dest: The destination filename. This is normally + what the source is rotated to, e.g. 'test.log.1'. + 'u' + When rotating, rotate the current log. + + The default implementation calls the 'rotator' attribute of the + handler, if it's callable, passing the source and dest arguments to + it. If the attribute isn't callable (the default is None), the source + is simply renamed to the destination. + + :param source: The source filename. This is normally the base + filename, e.g. 'test.log' + :param dest: The destination filename. This is normally + what the source is rotated to, e.g. 'test.log.1'. + 'b' + Handler for logging to a set of files, which switches from one file + to the next when the current file reaches a certain size. + 'u' + Handler for logging to a set of files, which switches from one file + to the next when the current file reaches a certain size. + 'b' + Open the specified file and use it as the stream for logging. + + By default, the file grows indefinitely. You can specify particular + values of maxBytes and backupCount to allow the file to rollover at + a predetermined size. + + Rollover occurs whenever the current log file is nearly maxBytes in + length. If backupCount is >= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + 'u' + Open the specified file and use it as the stream for logging. + + By default, the file grows indefinitely. You can specify particular + values of maxBytes and backupCount to allow the file to rollover at + a predetermined size. + + Rollover occurs whenever the current log file is nearly maxBytes in + length. If backupCount is >= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + 'b' + Do a rollover, as described in __init__(). + 'u' + Do a rollover, as described in __init__(). + 'b'%s.%d'u'%s.%d'b'.1'u'.1'b' + Determine if rollover should occur. + + Basically, see if the supplied record would cause the file to exceed + the size limit we have. + 'u' + Determine if rollover should occur. + + Basically, see if the supplied record would cause the file to exceed + the size limit we have. + 'b' + Handler for logging to a file, rotating the log file at certain timed + intervals. + + If backupCount is > 0, when rollover is done, no more than backupCount + files are kept - the oldest ones are deleted. + 'u' + Handler for logging to a file, rotating the log file at certain timed + intervals. + + If backupCount is > 0, when rollover is done, no more than backupCount + files are kept - the oldest ones are deleted. + 'b'S'u'S'b'%Y-%m-%d_%H-%M-%S'u'%Y-%m-%d_%H-%M-%S'b'^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$'u'^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$'b'M'u'M'b'%Y-%m-%d_%H-%M'u'%Y-%m-%d_%H-%M'b'^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$'u'^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$'b'%Y-%m-%d_%H'u'%Y-%m-%d_%H'b'^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$'u'^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$'b'D'u'D'b'MIDNIGHT'u'MIDNIGHT'b'%Y-%m-%d'u'%Y-%m-%d'b'^\d{4}-\d{2}-\d{2}(\.\w+)?$'u'^\d{4}-\d{2}-\d{2}(\.\w+)?$'b'W'u'W'b'You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s'u'You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s'b'Invalid day specified for weekly rollover: %s'u'Invalid day specified for weekly rollover: %s'b'Invalid rollover interval specified: %s'u'Invalid rollover interval specified: %s'b' + Work out the rollover time based on the specified time. + 'u' + Work out the rollover time based on the specified time. + 'b' + Determine if rollover should occur. + + record is not used, as we are just comparing times, but it is needed so + the method signatures are the same + 'u' + Determine if rollover should occur. + + record is not used, as we are just comparing times, but it is needed so + the method signatures are the same + 'b' + Determine the files to delete when rolling over. + + More specific than the earlier method, which just used glob.glob(). + 'u' + Determine the files to delete when rolling over. + + More specific than the earlier method, which just used glob.glob(). + 'b' + do a rollover; in this case, a date/time stamp is appended to the filename + when the rollover happens. However, you want the file to be named for the + start of the interval, not the current time. If there is a backup count, + then we have to get a list of matching filenames, sort them and remove + the one with the oldest suffix. + 'u' + do a rollover; in this case, a date/time stamp is appended to the filename + when the rollover happens. However, you want the file to be named for the + start of the interval, not the current time. If there is a backup count, + then we have to get a list of matching filenames, sort them and remove + the one with the oldest suffix. + 'b' + A handler for logging to a file, which watches the file + to see if it has changed while in use. This can happen because of + usage of programs such as newsyslog and logrotate which perform + log file rotation. This handler, intended for use under Unix, + watches the file to see if it has changed since the last emit. + (A file has changed if its device or inode have changed.) + If it has changed, the old file stream is closed, and the file + opened to get a new stream. + + This handler is not appropriate for use under Windows, because + under Windows open files cannot be moved or renamed - logging + opens the files with exclusive locks - and so there is no need + for such a handler. Furthermore, ST_INO is not supported under + Windows; stat always returns zero for this value. + + This handler is based on a suggestion and patch by Chad J. + Schroeder. + 'u' + A handler for logging to a file, which watches the file + to see if it has changed while in use. This can happen because of + usage of programs such as newsyslog and logrotate which perform + log file rotation. This handler, intended for use under Unix, + watches the file to see if it has changed since the last emit. + (A file has changed if its device or inode have changed.) + If it has changed, the old file stream is closed, and the file + opened to get a new stream. + + This handler is not appropriate for use under Windows, because + under Windows open files cannot be moved or renamed - logging + opens the files with exclusive locks - and so there is no need + for such a handler. Furthermore, ST_INO is not supported under + Windows; stat always returns zero for this value. + + This handler is based on a suggestion and patch by Chad J. + Schroeder. + 'b' + Reopen log file if needed. + + Checks if the underlying file has changed, and if it + has, close the old stream and reopen the file to get the + current stream. + 'u' + Reopen log file if needed. + + Checks if the underlying file has changed, and if it + has, close the old stream and reopen the file to get the + current stream. + 'b' + Emit a record. + + If underlying file has changed, reopen the file before emitting the + record to it. + 'u' + Emit a record. + + If underlying file has changed, reopen the file before emitting the + record to it. + 'b' + A handler class which writes logging records, in pickle format, to + a streaming socket. The socket is kept open across logging calls. + If the peer resets it, an attempt is made to reconnect on the next call. + The pickle which is sent is that of the LogRecord's attribute dictionary + (__dict__), so that the receiver does not need to have the logging module + installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + 'u' + A handler class which writes logging records, in pickle format, to + a streaming socket. The socket is kept open across logging calls. + If the peer resets it, an attempt is made to reconnect on the next call. + The pickle which is sent is that of the LogRecord's attribute dictionary + (__dict__), so that the receiver does not need to have the logging module + installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + 'b' + Initializes the handler with a specific host address and port. + + When the attribute *closeOnError* is set to True - if a socket error + occurs, the socket is silently closed and then reopened on the next + logging call. + 'u' + Initializes the handler with a specific host address and port. + + When the attribute *closeOnError* is set to True - if a socket error + occurs, the socket is silently closed and then reopened on the next + logging call. + 'b' + A factory method which allows subclasses to define the precise + type of socket they want. + 'u' + A factory method which allows subclasses to define the precise + type of socket they want. + 'b' + Try to create a socket, using an exponential backoff with + a max retry time. Thanks to Robert Olson for the original patch + (SF #815911) which has been slightly refactored. + 'u' + Try to create a socket, using an exponential backoff with + a max retry time. Thanks to Robert Olson for the original patch + (SF #815911) which has been slightly refactored. + 'b' + Send a pickled string to the socket. + + This function allows for partial sends which can happen when the + network is busy. + 'u' + Send a pickled string to the socket. + + This function allows for partial sends which can happen when the + network is busy. + 'b' + Pickles the record in binary format with a length prefix, and + returns it ready for transmission across the socket. + 'u' + Pickles the record in binary format with a length prefix, and + returns it ready for transmission across the socket. + 'b'exc_info'u'exc_info'b'>L'u'>L'b' + Handle an error during logging. + + An error has occurred during logging. Most likely cause - + connection lost. Close the socket so that we can retry on the + next event. + 'u' + Handle an error during logging. + + An error has occurred during logging. Most likely cause - + connection lost. Close the socket so that we can retry on the + next event. + 'b' + Emit a record. + + Pickles the record and writes it to the socket in binary format. + If there is an error with the socket, silently drop the packet. + If there was a problem with the socket, re-establishes the + socket. + 'u' + Emit a record. + + Pickles the record and writes it to the socket in binary format. + If there is an error with the socket, silently drop the packet. + If there was a problem with the socket, re-establishes the + socket. + 'b' + Closes the socket. + 'u' + Closes the socket. + 'b' + A handler class which writes logging records, in pickle format, to + a datagram socket. The pickle which is sent is that of the LogRecord's + attribute dictionary (__dict__), so that the receiver does not need to + have the logging module installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + + 'u' + A handler class which writes logging records, in pickle format, to + a datagram socket. The pickle which is sent is that of the LogRecord's + attribute dictionary (__dict__), so that the receiver does not need to + have the logging module installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + + 'b' + Initializes the handler with a specific host address and port. + 'u' + Initializes the handler with a specific host address and port. + 'b' + The factory method of SocketHandler is here overridden to create + a UDP socket (SOCK_DGRAM). + 'u' + The factory method of SocketHandler is here overridden to create + a UDP socket (SOCK_DGRAM). + 'b' + Send a pickled string to a socket. + + This function no longer allows for partial sends which can happen + when the network is busy - UDP does not guarantee delivery and + can deliver packets out of sequence. + 'u' + Send a pickled string to a socket. + + This function no longer allows for partial sends which can happen + when the network is busy - UDP does not guarantee delivery and + can deliver packets out of sequence. + 'b' + A handler class which sends formatted logging records to a syslog + server. Based on Sam Rushing's syslog module: + http://www.nightmare.com/squirl/python-ext/misc/syslog.py + Contributed by Nicolas Untz (after which minor refactoring changes + have been made). + 'u' + A handler class which sends formatted logging records to a syslog + server. Based on Sam Rushing's syslog module: + http://www.nightmare.com/squirl/python-ext/misc/syslog.py + Contributed by Nicolas Untz (after which minor refactoring changes + have been made). + 'b'alert'u'alert'b'crit'u'crit'b'emerg'u'emerg'b'err'u'err'b'notice'u'notice'b'panic'u'panic'b'auth'u'auth'b'authpriv'u'authpriv'b'cron'u'cron'b'daemon'u'daemon'b'ftp'u'ftp'b'kern'u'kern'b'lpr'u'lpr'b'mail'u'mail'b'news'u'news'b'security'u'security'b'syslog'u'syslog'b'user'u'user'b'uucp'u'uucp'b'local0'u'local0'b'local1'u'local1'b'local2'u'local2'b'local3'u'local3'b'local4'u'local4'b'local5'u'local5'b'local6'u'local6'b'local7'u'local7'b' + Initialize a handler. + + If address is specified as a string, a UNIX socket is used. To log to a + local syslogd, "SysLogHandler(address="/dev/log")" can be used. + If facility is not specified, LOG_USER is used. If socktype is + specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific + socket type will be used. For Unix sockets, you can also specify a + socktype of None, in which case socket.SOCK_DGRAM will be used, falling + back to socket.SOCK_STREAM. + 'u' + Initialize a handler. + + If address is specified as a string, a UNIX socket is used. To log to a + local syslogd, "SysLogHandler(address="/dev/log")" can be used. + If facility is not specified, LOG_USER is used. If socktype is + specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific + socket type will be used. For Unix sockets, you can also specify a + socktype of None, in which case socket.SOCK_DGRAM will be used, falling + back to socket.SOCK_STREAM. + 'b'getaddrinfo returns an empty list'u'getaddrinfo returns an empty list'b' + Encode the facility and priority. You can pass in strings or + integers - if strings are passed, the facility_names and + priority_names mapping dictionaries are used to convert them to + integers. + 'u' + Encode the facility and priority. You can pass in strings or + integers - if strings are passed, the facility_names and + priority_names mapping dictionaries are used to convert them to + integers. + 'b' + Map a logging level name to a key in the priority_names map. + This is useful in two scenarios: when custom levels are being + used, and in the case where you can't do a straightforward + mapping by lowercasing the logging level name because of locale- + specific issues (see SF #1524081). + 'u' + Map a logging level name to a key in the priority_names map. + This is useful in two scenarios: when custom levels are being + used, and in the case where you can't do a straightforward + mapping by lowercasing the logging level name because of locale- + specific issues (see SF #1524081). + 'b' + Emit a record. + + The record is formatted, and then sent to the syslog server. If + exception information is present, it is NOT sent to the server. + 'u' + Emit a record. + + The record is formatted, and then sent to the syslog server. If + exception information is present, it is NOT sent to the server. + 'b'<%d>'u'<%d>'b' + A handler class which sends an SMTP email for each logging event. + 'u' + A handler class which sends an SMTP email for each logging event. + 'b' + Initialize the handler. + + Initialize the instance with the from and to addresses and subject + line of the email. To specify a non-standard SMTP port, use the + (host, port) tuple format for the mailhost argument. To specify + authentication credentials, supply a (username, password) tuple + for the credentials argument. To specify the use of a secure + protocol (TLS), pass in a tuple for the secure argument. This will + only be used when authentication credentials are supplied. The tuple + will be either an empty tuple, or a single-value tuple with the name + of a keyfile, or a 2-value tuple with the names of the keyfile and + certificate file. (This tuple is passed to the `starttls` method). + A timeout in seconds can be specified for the SMTP connection (the + default is one second). + 'u' + Initialize the handler. + + Initialize the instance with the from and to addresses and subject + line of the email. To specify a non-standard SMTP port, use the + (host, port) tuple format for the mailhost argument. To specify + authentication credentials, supply a (username, password) tuple + for the credentials argument. To specify the use of a secure + protocol (TLS), pass in a tuple for the secure argument. This will + only be used when authentication credentials are supplied. The tuple + will be either an empty tuple, or a single-value tuple with the name + of a keyfile, or a 2-value tuple with the names of the keyfile and + certificate file. (This tuple is passed to the `starttls` method). + A timeout in seconds can be specified for the SMTP connection (the + default is one second). + 'b' + Determine the subject for the email. + + If you want to specify a subject line which is record-dependent, + override this method. + 'u' + Determine the subject for the email. + + If you want to specify a subject line which is record-dependent, + override this method. + 'b' + Emit a record. + + Format the record and send it to the specified addressees. + 'u' + Emit a record. + + Format the record and send it to the specified addressees. + 'b'From'u'From'b'To'u'To'b'Subject'u'Subject'b'Date'u'Date'b' + A handler class which sends events to the NT Event Log. Adds a + registry entry for the specified application name. If no dllname is + provided, win32service.pyd (which contains some basic message + placeholders) is used. Note that use of these placeholders will make + your event logs big, as the entire message source is held in the log. + If you want slimmer logs, you have to pass in the name of your own DLL + which contains the message definitions you want to use in the event log. + 'u' + A handler class which sends events to the NT Event Log. Adds a + registry entry for the specified application name. If no dllname is + provided, win32service.pyd (which contains some basic message + placeholders) is used. Note that use of these placeholders will make + your event logs big, as the entire message source is held in the log. + If you want slimmer logs, you have to pass in the name of your own DLL + which contains the message definitions you want to use in the event log. + 'b'Application'u'Application'b'win32service.pyd'u'win32service.pyd'b'The Python Win32 extensions for NT (service, event logging) appear not to be available.'u'The Python Win32 extensions for NT (service, event logging) appear not to be available.'b' + Return the message ID for the event record. If you are using your + own messages, you could do this by having the msg passed to the + logger being an ID rather than a formatting string. Then, in here, + you could use a dictionary lookup to get the message ID. This + version returns 1, which is the base message ID in win32service.pyd. + 'u' + Return the message ID for the event record. If you are using your + own messages, you could do this by having the msg passed to the + logger being an ID rather than a formatting string. Then, in here, + you could use a dictionary lookup to get the message ID. This + version returns 1, which is the base message ID in win32service.pyd. + 'b' + Return the event category for the record. + + Override this if you want to specify your own categories. This version + returns 0. + 'u' + Return the event category for the record. + + Override this if you want to specify your own categories. This version + returns 0. + 'b' + Return the event type for the record. + + Override this if you want to specify your own types. This version does + a mapping using the handler's typemap attribute, which is set up in + __init__() to a dictionary which contains mappings for DEBUG, INFO, + WARNING, ERROR and CRITICAL. If you are using your own levels you will + either need to override this method or place a suitable dictionary in + the handler's typemap attribute. + 'u' + Return the event type for the record. + + Override this if you want to specify your own types. This version does + a mapping using the handler's typemap attribute, which is set up in + __init__() to a dictionary which contains mappings for DEBUG, INFO, + WARNING, ERROR and CRITICAL. If you are using your own levels you will + either need to override this method or place a suitable dictionary in + the handler's typemap attribute. + 'b' + Emit a record. + + Determine the message ID, event category and event type. Then + log the message in the NT event log. + 'u' + Emit a record. + + Determine the message ID, event category and event type. Then + log the message in the NT event log. + 'b' + Clean up this handler. + + You can remove the application name from the registry as a + source of event log entries. However, if you do this, you will + not be able to see the events as you intended in the Event Log + Viewer - it needs to be able to access the registry to get the + DLL name. + 'u' + Clean up this handler. + + You can remove the application name from the registry as a + source of event log entries. However, if you do this, you will + not be able to see the events as you intended in the Event Log + Viewer - it needs to be able to access the registry to get the + DLL name. + 'b' + A class which sends records to a Web server, using either GET or + POST semantics. + 'u' + A class which sends records to a Web server, using either GET or + POST semantics. + 'b'GET'u'GET'b' + Initialize the instance with the host, the request URL, and the method + ("GET" or "POST") + 'u' + Initialize the instance with the host, the request URL, and the method + ("GET" or "POST") + 'b'method must be GET or POST'u'method must be GET or POST'b'context parameter only makes sense with secure=True'u'context parameter only makes sense with secure=True'b' + Default implementation of mapping the log record into a dict + that is sent as the CGI data. Overwrite in your class. + Contributed by Franz Glasner. + 'u' + Default implementation of mapping the log record into a dict + that is sent as the CGI data. Overwrite in your class. + Contributed by Franz Glasner. + 'b' + Emit a record. + + Send the record to the Web server as a percent-encoded dictionary + 'u' + Emit a record. + + Send the record to the Web server as a percent-encoded dictionary + 'b'%c%s'u'%c%s'b'Content-type'u'Content-type'b'application/x-www-form-urlencoded'u'application/x-www-form-urlencoded'b'Content-length'u'Content-length'b' + A handler class which buffers logging records in memory. Whenever each + record is added to the buffer, a check is made to see if the buffer should + be flushed. If it should, then flush() is expected to do what's needed. + 'u' + A handler class which buffers logging records in memory. Whenever each + record is added to the buffer, a check is made to see if the buffer should + be flushed. If it should, then flush() is expected to do what's needed. + 'b' + Initialize the handler with the buffer size. + 'u' + Initialize the handler with the buffer size. + 'b' + Should the handler flush its buffer? + + Returns true if the buffer is up to capacity. This method can be + overridden to implement custom flushing strategies. + 'u' + Should the handler flush its buffer? + + Returns true if the buffer is up to capacity. This method can be + overridden to implement custom flushing strategies. + 'b' + Emit a record. + + Append the record. If shouldFlush() tells us to, call flush() to process + the buffer. + 'u' + Emit a record. + + Append the record. If shouldFlush() tells us to, call flush() to process + the buffer. + 'b' + Override to implement custom flushing behaviour. + + This version just zaps the buffer to empty. + 'u' + Override to implement custom flushing behaviour. + + This version just zaps the buffer to empty. + 'b' + Close the handler. + + This version just flushes and chains to the parent class' close(). + 'u' + Close the handler. + + This version just flushes and chains to the parent class' close(). + 'b' + A handler class which buffers logging records in memory, periodically + flushing them to a target handler. Flushing occurs whenever the buffer + is full, or when an event of a certain severity or greater is seen. + 'u' + A handler class which buffers logging records in memory, periodically + flushing them to a target handler. Flushing occurs whenever the buffer + is full, or when an event of a certain severity or greater is seen. + 'b' + Initialize the handler with the buffer size, the level at which + flushing should occur and an optional target. + + Note that without a target being set either here or via setTarget(), + a MemoryHandler is no use to anyone! + + The ``flushOnClose`` argument is ``True`` for backward compatibility + reasons - the old behaviour is that when the handler is closed, the + buffer is flushed, even if the flush level hasn't been exceeded nor the + capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. + 'u' + Initialize the handler with the buffer size, the level at which + flushing should occur and an optional target. + + Note that without a target being set either here or via setTarget(), + a MemoryHandler is no use to anyone! + + The ``flushOnClose`` argument is ``True`` for backward compatibility + reasons - the old behaviour is that when the handler is closed, the + buffer is flushed, even if the flush level hasn't been exceeded nor the + capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. + 'b' + Check for buffer full or a record at the flushLevel or higher. + 'u' + Check for buffer full or a record at the flushLevel or higher. + 'b' + Set the target handler for this handler. + 'u' + Set the target handler for this handler. + 'b' + For a MemoryHandler, flushing means just sending the buffered + records to the target, if there is one. Override if you want + different behaviour. + + The record buffer is also cleared by this operation. + 'u' + For a MemoryHandler, flushing means just sending the buffered + records to the target, if there is one. Override if you want + different behaviour. + + The record buffer is also cleared by this operation. + 'b' + Flush, if appropriately configured, set the target to None and lose the + buffer. + 'u' + Flush, if appropriately configured, set the target to None and lose the + buffer. + 'b' + This handler sends events to a queue. Typically, it would be used together + with a multiprocessing Queue to centralise logging to file in one process + (in a multi-process application), so as to avoid file write contention + between processes. + + This code is new in Python 3.2, but this class can be copy pasted into + user code for use with earlier Python versions. + 'u' + This handler sends events to a queue. Typically, it would be used together + with a multiprocessing Queue to centralise logging to file in one process + (in a multi-process application), so as to avoid file write contention + between processes. + + This code is new in Python 3.2, but this class can be copy pasted into + user code for use with earlier Python versions. + 'b' + Initialise an instance, using the passed queue. + 'u' + Initialise an instance, using the passed queue. + 'b' + Enqueue a record. + + The base implementation uses put_nowait. You may want to override + this method if you want to use blocking, timeouts or custom queue + implementations. + 'u' + Enqueue a record. + + The base implementation uses put_nowait. You may want to override + this method if you want to use blocking, timeouts or custom queue + implementations. + 'b' + Prepares a record for queuing. The object returned by this method is + enqueued. + + The base implementation formats the record to merge the message + and arguments, and removes unpickleable items from the record + in-place. + + You might want to override this method if you want to convert + the record to a dict or JSON string, or send a modified copy + of the record while leaving the original intact. + 'u' + Prepares a record for queuing. The object returned by this method is + enqueued. + + The base implementation formats the record to merge the message + and arguments, and removes unpickleable items from the record + in-place. + + You might want to override this method if you want to convert + the record to a dict or JSON string, or send a modified copy + of the record while leaving the original intact. + 'b' + Emit a record. + + Writes the LogRecord to the queue, preparing it for pickling first. + 'u' + Emit a record. + + Writes the LogRecord to the queue, preparing it for pickling first. + 'b' + This class implements an internal threaded listener which watches for + LogRecords being added to a queue, removes them and passes them to a + list of handlers for processing. + 'u' + This class implements an internal threaded listener which watches for + LogRecords being added to a queue, removes them and passes them to a + list of handlers for processing. + 'b' + Initialise an instance with the specified queue and + handlers. + 'u' + Initialise an instance with the specified queue and + handlers. + 'b' + Dequeue a record and return it, optionally blocking. + + The base implementation uses get. You may want to override this method + if you want to use timeouts or work with custom queue implementations. + 'u' + Dequeue a record and return it, optionally blocking. + + The base implementation uses get. You may want to override this method + if you want to use timeouts or work with custom queue implementations. + 'b' + Start the listener. + + This starts up a background thread to monitor the queue for + LogRecords to process. + 'u' + Start the listener. + + This starts up a background thread to monitor the queue for + LogRecords to process. + 'b' + Prepare a record for handling. + + This method just returns the passed-in record. You may want to + override this method if you need to do any custom marshalling or + manipulation of the record before passing it to the handlers. + 'u' + Prepare a record for handling. + + This method just returns the passed-in record. You may want to + override this method if you need to do any custom marshalling or + manipulation of the record before passing it to the handlers. + 'b' + Handle a record. + + This just loops through the handlers offering them the record + to handle. + 'u' + Handle a record. + + This just loops through the handlers offering them the record + to handle. + 'b' + Monitor the queue for records, and ask the handler + to deal with them. + + This method runs on a separate, internal thread. + The thread will terminate if it sees a sentinel object in the queue. + 'u' + Monitor the queue for records, and ask the handler + to deal with them. + + This method runs on a separate, internal thread. + The thread will terminate if it sees a sentinel object in the queue. + 'b'task_done'u'task_done'b' + This is used to enqueue the sentinel record. + + The base implementation uses put_nowait. You may want to override this + method if you want to use timeouts or work with custom queue + implementations. + 'u' + This is used to enqueue the sentinel record. + + The base implementation uses put_nowait. You may want to override this + method if you want to use timeouts or work with custom queue + implementations. + 'b' + Stop the listener. + + This asks the thread to terminate, and then waits for it to do so. + Note that if you don't call this before your application exits, there + may be some records still left on the queue, which won't be processed. + 'u' + Stop the listener. + + This asks the thread to terminate, and then waits for it to do so. + Note that if you don't call this before your application exits, there + may be some records still left on the queue, which won't be processed. + 'u'logging.handlers'hashlib module - A common interface to many hash functions. + +new(name, data=b'', **kwargs) - returns a new hash object implementing the + given hash function; initializing the hash + using the given binary data. + +Named constructor functions are also available, these are faster +than using new(name): + +md5(), sha1(), sha224(), sha256(), sha384(), sha512(), blake2b(), blake2s(), +sha3_224, sha3_256, sha3_384, sha3_512, shake_128, and shake_256. + +More algorithms may be available on your platform but the above are guaranteed +to exist. See the algorithms_guaranteed and algorithms_available attributes +to find out what algorithm names can be passed to new(). + +NOTE: If you want the adler32 or crc32 hash functions they are available in +the zlib module. + +Choose your hash function wisely. Some have known collision weaknesses. +sha384 and sha512 will be slow on 32 bit platforms. + +Hash objects have these methods: + - update(data): Update the hash object with the bytes in data. Repeated calls + are equivalent to a single call with the concatenation of all + the arguments. + - digest(): Return the digest of the bytes passed to the update() method + so far as a bytes object. + - hexdigest(): Like digest() except the digest is returned as a string + of double length, containing only hexadecimal digits. + - copy(): Return a copy (clone) of the hash object. This can be used to + efficiently compute the digests of datas that share a common + initial substring. + +For example, to obtain the digest of the byte string 'Nobody inspects the +spammish repetition': + + >>> import hashlib + >>> m = hashlib.md5() + >>> m.update(b"Nobody inspects") + >>> m.update(b" the spammish repetition") + >>> m.digest() + b'\xbbd\x9c\x83\xdd\x1e\xa5\xc9\xd9\xde\xc9\xa1\x8d\xf0\xff\xe9' + +More condensed: + + >>> hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest() + 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' + +__always_supportedalgorithms_guaranteedalgorithms_available__builtin_constructor_cache__block_openssl_constructor__get_builtin_constructorSHA1MD5SHA256SHA224SHA512SHA384unsupported hash type __get_openssl_constructoropenssl___py_newnew(name, data=b'', **kwargs) - Return a new hashing object using the + named algorithm; optionally initialized with data (which must be + a bytes-like object). + __hash_newnew(name, data=b'') - Return a new hashing object using the named algorithm; + optionally initialized with data (which must be a bytes-like object). + __get_hash0x5C_trans_5C0x36_trans_36hash_namesaltiterationsdklenPassword based key derivation function 2 (PKCS #5 v2.0) + + This Python implementations based on the hmac module about as fast + as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster + for long passwords. + outerprficpyocpydkeyrkey__func_namecode for hash %s was not found.#. Copyright (C) 2005-2010 Gregory P. Smith (greg@krypto.org)# Licensed to PSF under a Contributor Agreement.# This tuple and __get_builtin_constructor() must be modified if a new# always available algorithm is added.# no extension module, this hash is unsupported.# Prefer our blake2 and sha3 implementation.# Allow the C module to raise ValueError. The function will be# defined but the hash not actually available thanks to OpenSSL.# Use the C function directly (very fast)# Prefer our blake2 and sha3 implementation# OpenSSL 1.1.0 comes with a limited implementation of blake2b/s.# It does neither support keyed blake2 nor advanced features like# salt, personal, tree hashing or SSE.# If the _hashlib module (OpenSSL) doesn't support the named# hash, try using our builtin implementations.# This allows for SHA224/256 and SHA384/512 support even though# the OpenSSL library prior to 0.9.8 doesn't provide them.# OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA# Fast inline HMAC implementation# PBKDF2_HMAC uses the password as key. We can re-use the same# digest objects and just update copies to skip initialization.# endianness doesn't matter here as long to / from use the same# rkey = rkey ^ prev# OpenSSL's scrypt requires OpenSSL 1.1+# try them all, some may not work due to the OpenSSL# version not supporting that algorithm.# Cleanup locals()b'hashlib module - A common interface to many hash functions. + +new(name, data=b'', **kwargs) - returns a new hash object implementing the + given hash function; initializing the hash + using the given binary data. + +Named constructor functions are also available, these are faster +than using new(name): + +md5(), sha1(), sha224(), sha256(), sha384(), sha512(), blake2b(), blake2s(), +sha3_224, sha3_256, sha3_384, sha3_512, shake_128, and shake_256. + +More algorithms may be available on your platform but the above are guaranteed +to exist. See the algorithms_guaranteed and algorithms_available attributes +to find out what algorithm names can be passed to new(). + +NOTE: If you want the adler32 or crc32 hash functions they are available in +the zlib module. + +Choose your hash function wisely. Some have known collision weaknesses. +sha384 and sha512 will be slow on 32 bit platforms. + +Hash objects have these methods: + - update(data): Update the hash object with the bytes in data. Repeated calls + are equivalent to a single call with the concatenation of all + the arguments. + - digest(): Return the digest of the bytes passed to the update() method + so far as a bytes object. + - hexdigest(): Like digest() except the digest is returned as a string + of double length, containing only hexadecimal digits. + - copy(): Return a copy (clone) of the hash object. This can be used to + efficiently compute the digests of datas that share a common + initial substring. + +For example, to obtain the digest of the byte string 'Nobody inspects the +spammish repetition': + + >>> import hashlib + >>> m = hashlib.md5() + >>> m.update(b"Nobody inspects") + >>> m.update(b" the spammish repetition") + >>> m.digest() + b'\xbbd\x9c\x83\xdd\x1e\xa5\xc9\xd9\xde\xc9\xa1\x8d\xf0\xff\xe9' + +More condensed: + + >>> hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest() + 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' + +'u'hashlib module - A common interface to many hash functions. + +new(name, data=b'', **kwargs) - returns a new hash object implementing the + given hash function; initializing the hash + using the given binary data. + +Named constructor functions are also available, these are faster +than using new(name): + +md5(), sha1(), sha224(), sha256(), sha384(), sha512(), blake2b(), blake2s(), +sha3_224, sha3_256, sha3_384, sha3_512, shake_128, and shake_256. + +More algorithms may be available on your platform but the above are guaranteed +to exist. See the algorithms_guaranteed and algorithms_available attributes +to find out what algorithm names can be passed to new(). + +NOTE: If you want the adler32 or crc32 hash functions they are available in +the zlib module. + +Choose your hash function wisely. Some have known collision weaknesses. +sha384 and sha512 will be slow on 32 bit platforms. + +Hash objects have these methods: + - update(data): Update the hash object with the bytes in data. Repeated calls + are equivalent to a single call with the concatenation of all + the arguments. + - digest(): Return the digest of the bytes passed to the update() method + so far as a bytes object. + - hexdigest(): Like digest() except the digest is returned as a string + of double length, containing only hexadecimal digits. + - copy(): Return a copy (clone) of the hash object. This can be used to + efficiently compute the digests of datas that share a common + initial substring. + +For example, to obtain the digest of the byte string 'Nobody inspects the +spammish repetition': + + >>> import hashlib + >>> m = hashlib.md5() + >>> m.update(b"Nobody inspects") + >>> m.update(b" the spammish repetition") + >>> m.digest() + b'\xbbd\x9c\x83\xdd\x1e\xa5\xc9\xd9\xde\xc9\xa1\x8d\xf0\xff\xe9' + +More condensed: + + >>> hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest() + 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2' + +'b'sha1'u'sha1'b'sha224'u'sha224'b'sha256'u'sha256'b'sha384'u'sha384'b'sha512'u'sha512'b'blake2b'u'blake2b'b'blake2s'u'blake2s'b'sha3_224'u'sha3_224'b'sha3_256'u'sha3_256'b'sha3_384'u'sha3_384'b'sha3_512'u'sha3_512'b'shake_128'u'shake_128'b'shake_256'u'shake_256'b'new'u'new'b'algorithms_guaranteed'u'algorithms_guaranteed'b'algorithms_available'u'algorithms_available'b'pbkdf2_hmac'u'pbkdf2_hmac'b'SHA1'u'SHA1'b'MD5'u'MD5'b'SHA256'u'SHA256'b'SHA224'u'SHA224'b'SHA512'u'SHA512'b'SHA384'u'SHA384'b'unsupported hash type 'u'unsupported hash type 'b'openssl_'u'openssl_'b'new(name, data=b'', **kwargs) - Return a new hashing object using the + named algorithm; optionally initialized with data (which must be + a bytes-like object). + 'u'new(name, data=b'', **kwargs) - Return a new hashing object using the + named algorithm; optionally initialized with data (which must be + a bytes-like object). + 'b'new(name, data=b'') - Return a new hashing object using the named algorithm; + optionally initialized with data (which must be a bytes-like object). + 'u'new(name, data=b'') - Return a new hashing object using the named algorithm; + optionally initialized with data (which must be a bytes-like object). + 'b'Password based key derivation function 2 (PKCS #5 v2.0) + + This Python implementations based on the hmac module about as fast + as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster + for long passwords. + 'u'Password based key derivation function 2 (PKCS #5 v2.0) + + This Python implementations based on the hmac module about as fast + as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster + for long passwords. + 'b'block_size'u'block_size'b'code for hash %s was not found.'u'code for hash %s was not found.'u'hashlib'Header encoding and decoding functionality.decode_headermake_headeremail.errorsBSPACESPACE8MAXLINELENUSASCIIUTF8 + =\? # literal =? + (?P[^?]*?) # non-greedy up to the next ? is the charset + \? # literal ? + (?P[qQbB]) # either a "q" or a "b", case insensitive + \? # literal ? + (?P.*?) # non-greedy up to the next ?= is the encoded string + \?= # literal ?= + ecre[\041-\176]+:$fcre\n[^ \t]+:_embedded_header_max_appendDecode a message header value without converting charset. + + Returns a list of (string, charset) pairs containing each of the decoded + parts of the header. Charset is None for non-encoded parts of the header, + otherwise a lower-case string containing the name of the character set + specified in the encoded string. + + header may be a string that may or may not contain RFC2047 encoded words, + or it may be a Header object. + + An email.errors.HeaderParseError may be raised when certain decoding error + occurs (e.g. a base64 decoding exception). + _chunksunencodeddroplistdecoded_wordsencoded_stringheader_decodepaderrBase64 decoding errorUnexpected encoding: collapsedlast_wordlast_charsetdecoded_seqcontinuation_wsCreate a Header from a sequence of pairs as returned by decode_header() + + decode_header() takes a header value string and returns a sequence of + pairs of the format (decoded_string, charset) where charset is the string + name of the character set. + + This function takes one of those sequence of pairs and returns a Header + instance. Optional maxlinelen, header_name, and continuation_ws are as in + the Header constructor. + Create a MIME-compliant header that can contain many character sets. + + Optional s is the initial header value. If None, the initial header + value is not set. You can later append to the header with .append() + method calls. s may be a byte string or a Unicode string, but see the + .append() documentation for semantics. + + Optional charset serves two purposes: it has the same meaning as the + charset argument to the .append() method. It also sets the default + character set for all subsequent .append() calls that omit the charset + argument. If charset is not provided in the constructor, the us-ascii + charset is used both as s's initial charset and as the default for + subsequent .append() calls. + + The maximum line length can be specified explicitly via maxlinelen. For + splitting the first line to a shorter value (to account for the field + header which isn't included in s, e.g. `Subject') pass in the name of + the field in header_name. The default maxlinelen is 78 as recommended + by RFC 2822. + + continuation_ws must be RFC 2822 compliant folding whitespace (usually + either a space or a hard tab) which will be prepended to continuation + lines. + + errors is passed through to the .append() call. + _continuation_ws_maxlinelen_headerlenReturn the string value of the header.uchunkslastcslastspacenextcsoriginal_bytes_nonctexthasspaceAppend a string to the MIME header. + + Optional charset, if given, should be a Charset instance or the name + of a character set (which will be converted to a Charset instance). A + value of None (the default) means that the charset given in the + constructor is used. + + s may be a byte string or a Unicode string. If it is a byte string + (i.e. isinstance(s, str) is false), then charset is the encoding of + that byte string, and a UnicodeError will be raised if the string + cannot be decoded with that charset. If s is a Unicode string, then + charset is a hint specifying the character set of the characters in + the string. In either case, when producing an RFC 2822 compliant + header using RFC 2047 rules, the string will be encoded using the + output codec of the charset. If the string cannot be encoded to the + output codec, a UnicodeError will be raised. + + Optional `errors' is passed as the errors argument to the decode + call if s is a byte string. + True if string s is not a ctext character of RFC822. + ;, splitcharsEncode a message header into an RFC-compliant format. + + There are many issues involved in converting a given string for use in + an email header. Only certain character sets are readable in most + email clients, and as header strings can only contain a subset of + 7-bit ASCII, care must be taken to properly convert and encode (with + Base64 or quoted-printable) header strings. In addition, there is a + 75-character length limit on any given encoded header field, so + line-wrapping must be performed, even with double-byte character sets. + + Optional maxlinelen specifies the maximum length of each generated + line, exclusive of the linesep string. Individual lines may be longer + than maxlinelen if a folding point cannot be found. The first line + will be shorter by the length of the header name plus ": " if a header + name was specified at Header construction time. The default value for + maxlinelen is determined at header construction time. + + Optional splitchars is a string containing characters which should be + given extra weight by the splitting algorithm during normal header + wrapping. This is in very rough support of RFC 2822's `higher level + syntactic breaks': split points preceded by a splitchar are preferred + during line splitting, with the characters preferred in the order in + which they appear in the string. Space and tab may be included in the + string to indicate whether preference should be given to one over the + other as a split point when other split chars do not appear in the line + being split. Splitchars does not affect RFC 2047 encoded lines. + + Optional linesep is a string to be used to separate the lines of + the value. The default value is the most useful for typical + Python applications, but it can be set to \r\n to produce RFC-compliant + line separators when needed. + _ValueFormatteradd_transitionslinefwsheader value appears to contain an embedded header: {!r}"header value appears to contain ""an embedded header: {!r}"last_chunkheaderlen_maxlen_continuation_ws_len_splitchars_Accumulator_current_lineend_of_lineis_onlyws_ascii_split_maxlengthsencoded_lines_append_chunklast_line([]+)part_countprevpart_initial_sizepop_frominitial_sizepoppedstartval# Match encoded-word strings in the form =?charset?q?Hello_World?=# Field name regexp, including trailing colon, but not separating whitespace,# according to RFC 2822. Character range is from tilde to exclamation mark.# For use with .match()# Find a header embedded in a putative header value. Used to check for# header injection attack.# If it is a Header object, we can just return the encoded chunks.# If no encoding, just return the header with no charset.# First step is to parse all the encoded parts into triplets of the form# (encoded_string, encoding, charset). For unencoded strings, the last# two parts will be None.# Now loop over words and remove words that consist of whitespace# between two encoded strings.# The next step is to decode each encoded word by applying the reverse# base64 or quopri transformation. decoded_words is now a list of the# form (decoded_word, charset).# This is an unencoded word.# Postel's law: add missing padding# Now convert all words to bytes and collapse consecutive runs of# similarly encoded words.# None means us-ascii but we can simply pass it on to h.append()# Take the separating colon and space into account.# We must preserve spaces between encoded and non-encoded word# boundaries, which means for us we need to add a space when we go# from a charset to None/us-ascii, or from None/us-ascii to a# charset. Only do this for the second and subsequent chunks.# Don't add a space if the None/us-ascii string already has# a space (trailing or leading depending on transition)# Rich comparison operators for equality only. BAW: does it make sense to# have or explicitly disable <, <=, >, >= operators?# other may be a Header or a string. Both are fine so coerce# ourselves to a unicode (of the unencoded header value), swap the# args and do another comparison.# Ensure that the bytes we're storing can be decoded to the output# character set, otherwise an early error is raised.# A maxlinelen of 0 means don't wrap. For all practical purposes,# choosing a huge number here accomplishes that and makes the# _ValueFormatter algorithm much simpler.# Step 1: Normalize the chunks so that all runs of identical charsets# get collapsed into a single unicode string.# If the charset has no header encoding (i.e. it is an ASCII encoding)# then we must split the header at the "highest level syntactic break"# possible. Note that we don't have a lot of smarts about field# syntax; we just try to break on semi-colons, then commas, then# whitespace. Eventually, this should be pluggable.# Otherwise, we're doing either a Base64 or a quoted-printable# encoding which means we don't need to split the line on syntactic# breaks. We can basically just find enough characters to fit on the# current line, minus the RFC 2047 chrome. What makes this trickier# though is that we have to split at octet boundaries, not character# boundaries but it's only safe to split at character boundaries so at# best we can only get close.# The first element extends the current line, but if it's None then# nothing more fit on the current line so start a new line.# There are no encoded lines, so we're done.# There was only one line.# Everything else are full lines in themselves.# The first line's length.# The RFC 2822 header folding algorithm is simple in principle but# complex in practice. Lines may be folded any place where "folding# white space" appears by inserting a linesep character in front of the# FWS. The complication is that not all spaces or tabs qualify as FWS,# and we are also supposed to prefer to break at "higher level# syntactic breaks". We can't do either of these without intimate# knowledge of the structure of structured headers, which we don't have# here. So the best we can do here is prefer to break at the specified# splitchars, and hope that we don't choose any spaces or tabs that# aren't legal FWS. (This is at least better than the old algorithm,# where we would sometimes *introduce* FWS after a splitchar, or the# algorithm before that, where we would turn all white space runs into# single spaces or tabs.)# Find the best split point, working backward from the end.# There might be none, on a long first line.# There will be a header, so leave it on a line by itself.# We don't use continuation_ws here because the whitespace# after a header should always be a space.b'Header encoding and decoding functionality.'u'Header encoding and decoding functionality.'b'Header'u'Header'b'decode_header'u'decode_header'b'make_header'u'make_header'b' + =\? # literal =? + (?P[^?]*?) # non-greedy up to the next ? is the charset + \? # literal ? + (?P[qQbB]) # either a "q" or a "b", case insensitive + \? # literal ? + (?P.*?) # non-greedy up to the next ?= is the encoded string + \?= # literal ?= + 'u' + =\? # literal =? + (?P[^?]*?) # non-greedy up to the next ? is the charset + \? # literal ? + (?P[qQbB]) # either a "q" or a "b", case insensitive + \? # literal ? + (?P.*?) # non-greedy up to the next ?= is the encoded string + \?= # literal ?= + 'b'[\041-\176]+:$'u'[\041-\176]+:$'b'\n[^ \t]+:'u'\n[^ \t]+:'b'Decode a message header value without converting charset. + + Returns a list of (string, charset) pairs containing each of the decoded + parts of the header. Charset is None for non-encoded parts of the header, + otherwise a lower-case string containing the name of the character set + specified in the encoded string. + + header may be a string that may or may not contain RFC2047 encoded words, + or it may be a Header object. + + An email.errors.HeaderParseError may be raised when certain decoding error + occurs (e.g. a base64 decoding exception). + 'u'Decode a message header value without converting charset. + + Returns a list of (string, charset) pairs containing each of the decoded + parts of the header. Charset is None for non-encoded parts of the header, + otherwise a lower-case string containing the name of the character set + specified in the encoded string. + + header may be a string that may or may not contain RFC2047 encoded words, + or it may be a Header object. + + An email.errors.HeaderParseError may be raised when certain decoding error + occurs (e.g. a base64 decoding exception). + 'b'_chunks'u'_chunks'u'==='b'Base64 decoding error'u'Base64 decoding error'b'Unexpected encoding: 'u'Unexpected encoding: 'b'Create a Header from a sequence of pairs as returned by decode_header() + + decode_header() takes a header value string and returns a sequence of + pairs of the format (decoded_string, charset) where charset is the string + name of the character set. + + This function takes one of those sequence of pairs and returns a Header + instance. Optional maxlinelen, header_name, and continuation_ws are as in + the Header constructor. + 'u'Create a Header from a sequence of pairs as returned by decode_header() + + decode_header() takes a header value string and returns a sequence of + pairs of the format (decoded_string, charset) where charset is the string + name of the character set. + + This function takes one of those sequence of pairs and returns a Header + instance. Optional maxlinelen, header_name, and continuation_ws are as in + the Header constructor. + 'b'Create a MIME-compliant header that can contain many character sets. + + Optional s is the initial header value. If None, the initial header + value is not set. You can later append to the header with .append() + method calls. s may be a byte string or a Unicode string, but see the + .append() documentation for semantics. + + Optional charset serves two purposes: it has the same meaning as the + charset argument to the .append() method. It also sets the default + character set for all subsequent .append() calls that omit the charset + argument. If charset is not provided in the constructor, the us-ascii + charset is used both as s's initial charset and as the default for + subsequent .append() calls. + + The maximum line length can be specified explicitly via maxlinelen. For + splitting the first line to a shorter value (to account for the field + header which isn't included in s, e.g. `Subject') pass in the name of + the field in header_name. The default maxlinelen is 78 as recommended + by RFC 2822. + + continuation_ws must be RFC 2822 compliant folding whitespace (usually + either a space or a hard tab) which will be prepended to continuation + lines. + + errors is passed through to the .append() call. + 'u'Create a MIME-compliant header that can contain many character sets. + + Optional s is the initial header value. If None, the initial header + value is not set. You can later append to the header with .append() + method calls. s may be a byte string or a Unicode string, but see the + .append() documentation for semantics. + + Optional charset serves two purposes: it has the same meaning as the + charset argument to the .append() method. It also sets the default + character set for all subsequent .append() calls that omit the charset + argument. If charset is not provided in the constructor, the us-ascii + charset is used both as s's initial charset and as the default for + subsequent .append() calls. + + The maximum line length can be specified explicitly via maxlinelen. For + splitting the first line to a shorter value (to account for the field + header which isn't included in s, e.g. `Subject') pass in the name of + the field in header_name. The default maxlinelen is 78 as recommended + by RFC 2822. + + continuation_ws must be RFC 2822 compliant folding whitespace (usually + either a space or a hard tab) which will be prepended to continuation + lines. + + errors is passed through to the .append() call. + 'b'Return the string value of the header.'u'Return the string value of the header.'b'Append a string to the MIME header. + + Optional charset, if given, should be a Charset instance or the name + of a character set (which will be converted to a Charset instance). A + value of None (the default) means that the charset given in the + constructor is used. + + s may be a byte string or a Unicode string. If it is a byte string + (i.e. isinstance(s, str) is false), then charset is the encoding of + that byte string, and a UnicodeError will be raised if the string + cannot be decoded with that charset. If s is a Unicode string, then + charset is a hint specifying the character set of the characters in + the string. In either case, when producing an RFC 2822 compliant + header using RFC 2047 rules, the string will be encoded using the + output codec of the charset. If the string cannot be encoded to the + output codec, a UnicodeError will be raised. + + Optional `errors' is passed as the errors argument to the decode + call if s is a byte string. + 'u'Append a string to the MIME header. + + Optional charset, if given, should be a Charset instance or the name + of a character set (which will be converted to a Charset instance). A + value of None (the default) means that the charset given in the + constructor is used. + + s may be a byte string or a Unicode string. If it is a byte string + (i.e. isinstance(s, str) is false), then charset is the encoding of + that byte string, and a UnicodeError will be raised if the string + cannot be decoded with that charset. If s is a Unicode string, then + charset is a hint specifying the character set of the characters in + the string. In either case, when producing an RFC 2822 compliant + header using RFC 2047 rules, the string will be encoded using the + output codec of the charset. If the string cannot be encoded to the + output codec, a UnicodeError will be raised. + + Optional `errors' is passed as the errors argument to the decode + call if s is a byte string. + 'b'True if string s is not a ctext character of RFC822. + 'u'True if string s is not a ctext character of RFC822. + 'b';, 'u';, 'b'Encode a message header into an RFC-compliant format. + + There are many issues involved in converting a given string for use in + an email header. Only certain character sets are readable in most + email clients, and as header strings can only contain a subset of + 7-bit ASCII, care must be taken to properly convert and encode (with + Base64 or quoted-printable) header strings. In addition, there is a + 75-character length limit on any given encoded header field, so + line-wrapping must be performed, even with double-byte character sets. + + Optional maxlinelen specifies the maximum length of each generated + line, exclusive of the linesep string. Individual lines may be longer + than maxlinelen if a folding point cannot be found. The first line + will be shorter by the length of the header name plus ": " if a header + name was specified at Header construction time. The default value for + maxlinelen is determined at header construction time. + + Optional splitchars is a string containing characters which should be + given extra weight by the splitting algorithm during normal header + wrapping. This is in very rough support of RFC 2822's `higher level + syntactic breaks': split points preceded by a splitchar are preferred + during line splitting, with the characters preferred in the order in + which they appear in the string. Space and tab may be included in the + string to indicate whether preference should be given to one over the + other as a split point when other split chars do not appear in the line + being split. Splitchars does not affect RFC 2047 encoded lines. + + Optional linesep is a string to be used to separate the lines of + the value. The default value is the most useful for typical + Python applications, but it can be set to \r\n to produce RFC-compliant + line separators when needed. + 'u'Encode a message header into an RFC-compliant format. + + There are many issues involved in converting a given string for use in + an email header. Only certain character sets are readable in most + email clients, and as header strings can only contain a subset of + 7-bit ASCII, care must be taken to properly convert and encode (with + Base64 or quoted-printable) header strings. In addition, there is a + 75-character length limit on any given encoded header field, so + line-wrapping must be performed, even with double-byte character sets. + + Optional maxlinelen specifies the maximum length of each generated + line, exclusive of the linesep string. Individual lines may be longer + than maxlinelen if a folding point cannot be found. The first line + will be shorter by the length of the header name plus ": " if a header + name was specified at Header construction time. The default value for + maxlinelen is determined at header construction time. + + Optional splitchars is a string containing characters which should be + given extra weight by the splitting algorithm during normal header + wrapping. This is in very rough support of RFC 2822's `higher level + syntactic breaks': split points preceded by a splitchar are preferred + during line splitting, with the characters preferred in the order in + which they appear in the string. Space and tab may be included in the + string to indicate whether preference should be given to one over the + other as a split point when other split chars do not appear in the line + being split. Splitchars does not affect RFC 2047 encoded lines. + + Optional linesep is a string to be used to separate the lines of + the value. The default value is the most useful for typical + Python applications, but it can be set to \r\n to produce RFC-compliant + line separators when needed. + 'b'header value appears to contain an embedded header: {!r}'u'header value appears to contain an embedded header: {!r}'b'(['u'(['b']+)'u']+)'u'email.header'Heap queue algorithm (a.k.a. priority queue). + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +Usage: + +heap = [] # creates an empty heap +heappush(heap, item) # pushes a new item on the heap +item = heappop(heap) # pops the smallest item from the heap +item = heap[0] # smallest item on the heap without popping it +heapify(x) # transforms list into a heap, in-place, in linear time +item = heapreplace(heap, item) # pops and returns smallest item, and adds + # new item; the heap size is unchanged + +Our API differs from textbook heap algorithms as follows: + +- We use 0-based indexing. This makes the relationship between the + index for a node and the indexes for its children slightly less + obvious, but is more suitable since Python uses 0-based indexing. + +- Our heappop() method returns the smallest item, not the largest. + +These two make it possible to view the heap as a regular Python list +without surprises: heap[0] is the smallest item, and heap.sort() +maintains the heap invariant! +Heap queues + +[explanation by François Pinard] + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +The strange invariant above is meant to be an efficient memory +representation for a tournament. The numbers below are `k', not a[k]: + + 0 + + 1 2 + + 3 4 5 6 + + 7 8 9 10 11 12 13 14 + + 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 + + +In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In +a usual binary tournament we see in sports, each cell is the winner +over the two cells it tops, and we can trace the winner down the tree +to see all opponents s/he had. However, in many computer applications +of such tournaments, we do not need to trace the history of a winner. +To be more memory efficient, when a winner is promoted, we try to +replace it by something else at a lower level, and the rule becomes +that a cell and the two cells it tops contain three different items, +but the top cell "wins" over the two topped cells. + +If this heap invariant is protected at all time, index 0 is clearly +the overall winner. The simplest algorithmic way to remove it and +find the "next" winner is to move some loser (let's say cell 30 in the +diagram above) into the 0 position, and then percolate this new 0 down +the tree, exchanging values, until the invariant is re-established. +This is clearly logarithmic on the total number of items in the tree. +By iterating over all items, you get an O(n ln n) sort. + +A nice feature of this sort is that you can efficiently insert new +items while the sort is going on, provided that the inserted items are +not "better" than the last 0'th element you extracted. This is +especially useful in simulation contexts, where the tree holds all +incoming events, and the "win" condition means the smallest scheduled +time. When an event schedule other events for execution, they are +scheduled into the future, so they can easily go into the heap. So, a +heap is a good structure for implementing schedulers (this is what I +used for my MIDI sequencer :-). + +Various structures for implementing schedulers have been extensively +studied, and heaps are good for this, as they are reasonably speedy, +the speed is almost constant, and the worst case is not much different +than the average case. However, there are other representations which +are more efficient overall, yet the worst cases might be terrible. + +Heaps are also very useful in big disk sorts. You most probably all +know that a big sort implies producing "runs" (which are pre-sorted +sequences, which size is usually related to the amount of CPU memory), +followed by a merging passes for these runs, which merging is often +very cleverly organised[1]. It is very important that the initial +sort produces the longest runs possible. Tournaments are a good way +to that. If, using all the memory available to hold a tournament, you +replace and percolate items that happen to fit the current run, you'll +produce runs which are twice the size of the memory for random input, +and much better for input fuzzily ordered. + +Moreover, if you output the 0'th item on disk and get an input which +may not fit in the current tournament (because the value "wins" over +the last output value), it cannot fit in the heap, so the size of the +heap decreases. The freed memory could be cleverly reused immediately +for progressively building a second heap, which grows at exactly the +same rate the first heap is melting. When the first heap completely +vanishes, you switch heaps and start a new run. Clever and quite +effective! + +In a word, heaps are useful memory structures to know. I use them in +a few applications, and I think it is good to keep a `heap' module +around. :-) + +-------------------- +[1] The disk balancing algorithms which are current, nowadays, are +more annoying than clever, and this is a consequence of the seeking +capabilities of the disks. On devices which cannot seek, like big +tape drives, the story was quite different, and one had to be very +clever to ensure (far in advance) that each tape movement will be the +most effective possible (that is, will best participate at +"progressing" the merge). Some tapes were even able to read +backwards, and this was also used to avoid the rewinding time. +Believe me, real good tape sorts were quite spectacular to watch! +From all times, sorting has always been a Great Art! :-) +nsmallestheapPush item onto heap, maintaining the heap invariant._siftdownPop the smallest item off the heap, maintaining the heap invariant.lasteltreturnitem_siftupPop and return the current smallest value, and add the new item. + + This is more efficient than heappop() followed by heappush(), and can be + more appropriate when using a fixed-size heap. Note that the value + returned may be larger than item! That constrains reasonable uses of + this routine unless written as part of a conditional replacement: + + if item > heap[0]: + item = heapreplace(heap, item) + Fast version of a heappush followed by a heappop.Transform list into a heap, in-place, in O(len(x)) time.Maxheap version of a heappop._siftup_maxMaxheap version of a heappop followed by a heappush.Transform list into a maxheap, in-place, in O(len(x)) time.newitemparentposchildposrightpos_siftdown_maxMaxheap variant of _siftdownMaxheap variant of _siftupMerge multiple sorted inputs into a single sorted output. + + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + + If *key* is not None, applies a key function to each element to determine + its sort order. + + >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) + ['dog', 'cat', 'fish', 'horse', 'kangaroo'] + + h_append_heapify_heappop_heapreplaceorderkey_valueFind the n smallest elements in a dataset. + + Equivalent to: sorted(iterable, key=key)[:n] + _orderFind the n largest elements in a dataset. + + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + # Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger# raises appropriate IndexError if heap is empty# Transform bottom-up. The largest index there's any point to looking at# is the largest with a child index in-range, so must have 2*i + 1 < n,# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos# is the index of a leaf with a possibly out-of-order value. Restore the# heap invariant.# Follow the path to the root, moving parents down until finding a place# newitem fits.# The child indices of heap index pos are already heaps, and we want to make# a heap at index pos too. We do this by bubbling the smaller child of# pos up (and so on with that child's children, etc) until hitting a leaf,# then using _siftdown to move the oddball originally at index pos into place.# We *could* break out of the loop as soon as we find a pos where newitem <=# both its children, but turns out that's not a good idea, and despite that# many books write the algorithm that way. During a heap pop, the last array# element is sifted in, and that tends to be large, so that comparing it# against values starting from the root usually doesn't pay (= usually doesn't# get us out of the loop early). See Knuth, Volume 3, where this is# explained and quantified in an exercise.# Cutting the # of comparisons is important, since these routines have no# way to extract "the priority" from an array element, so that intelligence# is likely to be hiding in custom comparison methods, or in array elements# storing (priority, record) tuples. Comparisons are thus potentially# expensive.# On random arrays of length 1000, making this change cut the number of# comparisons made by heapify() a little, and those made by exhaustive# heappop() a lot, in accord with theory. Here are typical results from 3# runs (3 just to demonstrate how small the variance is):# Compares needed by heapify Compares needed by 1000 heappops# -------------------------- --------------------------------# 1837 cut to 1663 14996 cut to 8680# 1855 cut to 1659 14966 cut to 8678# 1847 cut to 1660 15024 cut to 8703# Building the heap by using heappush() 1000 times instead required# 2198, 2148, and 2219 compares: heapify() is more efficient, when# you can use it.# The total compares needed by list.sort() on the same lists were 8627,# 8627, and 8632 (this should be compared to the sum of heapify() and# heappop() compares): list.sort() is (unsurprisingly!) more efficient# for sorting.# Bubble up the smaller child until hitting a leaf.# leftmost child position# Set childpos to index of smaller child.# Move the smaller child up.# The leaf at pos is empty now. Put newitem there, and bubble it up# to its final resting place (by sifting its parents down).# Bubble up the larger child until hitting a leaf.# Set childpos to index of larger child.# Move the larger child up.# raises StopIteration when exhausted# restore heap condition# remove empty iterator# fast case when only a single iterator remains# Algorithm notes for nlargest() and nsmallest()# ==============================================# Make a single pass over the data while keeping the k most extreme values# in a heap. Memory consumption is limited to keeping k values in a list.# Measured performance for random inputs:# number of comparisons# n inputs k-extreme values (average of 5 trials) % more than min()# ------------- ---------------- --------------------- -----------------# 1,000 100 3,317 231.7%# 10,000 100 14,046 40.5%# 100,000 100 105,749 5.7%# 1,000,000 100 1,007,751 0.8%# 10,000,000 100 10,009,401 0.1%# Theoretical number of comparisons for k smallest of n random inputs:# Step Comparisons Action# ---- -------------------------- ---------------------------# 1 1.66 * k heapify the first k-inputs# 2 n - k compare remaining elements to top of heap# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap# 4 k * lg2(k) - (k/2) final sort of the k most extreme values# Combining and simplifying for a rough estimate gives:# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k))# Computing the number of comparisons for step 3:# -----------------------------------------------# * For the i-th new value from the iterable, the probability of being in the# k most extreme values is k/i. For example, the probability of the 101st# value seen being in the 100 most extreme values is 100/101.# * If the value is a new extreme value, the cost of inserting it into the# heap is 1 + log(k, 2).# * The probability times the cost gives:# (k/i) * (1 + log(k, 2))# * Summing across the remaining n-k elements gives:# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1))# * This reduces to:# (H(n) - H(k)) * k * (1 + log(k, 2))# * Where H(n) is the n-th harmonic number estimated by:# gamma = 0.5772156649# H(n) = log(n, e) + gamma + 1 / (2 * n)# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence# * Substituting the H(n) formula:# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2)# Worst-case for step 3:# ----------------------# In the worst case, the input data is reversed sorted so that every new element# must be inserted in the heap:# comparisons = 1.66 * k + log(k, 2) * (n - k)# Alternative Algorithms# Other algorithms were not used because they:# 1) Took much more auxiliary memory,# 2) Made multiple passes over the data.# 3) Made more comparisons in common cases (small k, large n, semi-random input).# See the more detailed comparison of approach at:# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest# Short-cut for n==1 is to use min()# When n>=size, it's faster to use sorted()# When key is none, use simpler decoration# put the range(n) first so that zip() doesn't# consume one too many elements from the iterator# General case, slowest method# Short-cut for n==1 is to use max()# If available, use C implementationb'Heap queue algorithm (a.k.a. priority queue). + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +Usage: + +heap = [] # creates an empty heap +heappush(heap, item) # pushes a new item on the heap +item = heappop(heap) # pops the smallest item from the heap +item = heap[0] # smallest item on the heap without popping it +heapify(x) # transforms list into a heap, in-place, in linear time +item = heapreplace(heap, item) # pops and returns smallest item, and adds + # new item; the heap size is unchanged + +Our API differs from textbook heap algorithms as follows: + +- We use 0-based indexing. This makes the relationship between the + index for a node and the indexes for its children slightly less + obvious, but is more suitable since Python uses 0-based indexing. + +- Our heappop() method returns the smallest item, not the largest. + +These two make it possible to view the heap as a regular Python list +without surprises: heap[0] is the smallest item, and heap.sort() +maintains the heap invariant! +'b'Heap queues + +[explanation by François Pinard] + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +The strange invariant above is meant to be an efficient memory +representation for a tournament. The numbers below are `k', not a[k]: + + 0 + + 1 2 + + 3 4 5 6 + + 7 8 9 10 11 12 13 14 + + 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 + + +In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In +a usual binary tournament we see in sports, each cell is the winner +over the two cells it tops, and we can trace the winner down the tree +to see all opponents s/he had. However, in many computer applications +of such tournaments, we do not need to trace the history of a winner. +To be more memory efficient, when a winner is promoted, we try to +replace it by something else at a lower level, and the rule becomes +that a cell and the two cells it tops contain three different items, +but the top cell "wins" over the two topped cells. + +If this heap invariant is protected at all time, index 0 is clearly +the overall winner. The simplest algorithmic way to remove it and +find the "next" winner is to move some loser (let's say cell 30 in the +diagram above) into the 0 position, and then percolate this new 0 down +the tree, exchanging values, until the invariant is re-established. +This is clearly logarithmic on the total number of items in the tree. +By iterating over all items, you get an O(n ln n) sort. + +A nice feature of this sort is that you can efficiently insert new +items while the sort is going on, provided that the inserted items are +not "better" than the last 0'th element you extracted. This is +especially useful in simulation contexts, where the tree holds all +incoming events, and the "win" condition means the smallest scheduled +time. When an event schedule other events for execution, they are +scheduled into the future, so they can easily go into the heap. So, a +heap is a good structure for implementing schedulers (this is what I +used for my MIDI sequencer :-). + +Various structures for implementing schedulers have been extensively +studied, and heaps are good for this, as they are reasonably speedy, +the speed is almost constant, and the worst case is not much different +than the average case. However, there are other representations which +are more efficient overall, yet the worst cases might be terrible. + +Heaps are also very useful in big disk sorts. You most probably all +know that a big sort implies producing "runs" (which are pre-sorted +sequences, which size is usually related to the amount of CPU memory), +followed by a merging passes for these runs, which merging is often +very cleverly organised[1]. It is very important that the initial +sort produces the longest runs possible. Tournaments are a good way +to that. If, using all the memory available to hold a tournament, you +replace and percolate items that happen to fit the current run, you'll +produce runs which are twice the size of the memory for random input, +and much better for input fuzzily ordered. + +Moreover, if you output the 0'th item on disk and get an input which +may not fit in the current tournament (because the value "wins" over +the last output value), it cannot fit in the heap, so the size of the +heap decreases. The freed memory could be cleverly reused immediately +for progressively building a second heap, which grows at exactly the +same rate the first heap is melting. When the first heap completely +vanishes, you switch heaps and start a new run. Clever and quite +effective! + +In a word, heaps are useful memory structures to know. I use them in +a few applications, and I think it is good to keep a `heap' module +around. :-) + +-------------------- +[1] The disk balancing algorithms which are current, nowadays, are +more annoying than clever, and this is a consequence of the seeking +capabilities of the disks. On devices which cannot seek, like big +tape drives, the story was quite different, and one had to be very +clever to ensure (far in advance) that each tape movement will be the +most effective possible (that is, will best participate at +"progressing" the merge). Some tapes were even able to read +backwards, and this was also used to avoid the rewinding time. +Believe me, real good tape sorts were quite spectacular to watch! +From all times, sorting has always been a Great Art! :-) +'b'heappush'u'heappush'b'heappop'u'heappop'b'heapify'u'heapify'b'heapreplace'u'heapreplace'b'merge'u'merge'b'nlargest'u'nlargest'b'nsmallest'u'nsmallest'b'heappushpop'u'heappushpop'b'Push item onto heap, maintaining the heap invariant.'u'Push item onto heap, maintaining the heap invariant.'b'Pop the smallest item off the heap, maintaining the heap invariant.'u'Pop the smallest item off the heap, maintaining the heap invariant.'b'Pop and return the current smallest value, and add the new item. + + This is more efficient than heappop() followed by heappush(), and can be + more appropriate when using a fixed-size heap. Note that the value + returned may be larger than item! That constrains reasonable uses of + this routine unless written as part of a conditional replacement: + + if item > heap[0]: + item = heapreplace(heap, item) + 'u'Pop and return the current smallest value, and add the new item. + + This is more efficient than heappop() followed by heappush(), and can be + more appropriate when using a fixed-size heap. Note that the value + returned may be larger than item! That constrains reasonable uses of + this routine unless written as part of a conditional replacement: + + if item > heap[0]: + item = heapreplace(heap, item) + 'b'Fast version of a heappush followed by a heappop.'u'Fast version of a heappush followed by a heappop.'b'Transform list into a heap, in-place, in O(len(x)) time.'u'Transform list into a heap, in-place, in O(len(x)) time.'b'Maxheap version of a heappop.'u'Maxheap version of a heappop.'b'Maxheap version of a heappop followed by a heappush.'u'Maxheap version of a heappop followed by a heappush.'b'Transform list into a maxheap, in-place, in O(len(x)) time.'u'Transform list into a maxheap, in-place, in O(len(x)) time.'b'Maxheap variant of _siftdown'u'Maxheap variant of _siftdown'b'Maxheap variant of _siftup'u'Maxheap variant of _siftup'b'Merge multiple sorted inputs into a single sorted output. + + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + + If *key* is not None, applies a key function to each element to determine + its sort order. + + >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) + ['dog', 'cat', 'fish', 'horse', 'kangaroo'] + + 'u'Merge multiple sorted inputs into a single sorted output. + + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + + If *key* is not None, applies a key function to each element to determine + its sort order. + + >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) + ['dog', 'cat', 'fish', 'horse', 'kangaroo'] + + 'b'Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable, key=key)[:n] + 'u'Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable, key=key)[:n] + 'b'Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + 'u'Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + 'u'heapq'HMAC (Keyed-Hashing for Message Authentication) module. + +Implements the HMAC algorithm as described by RFC 2104. +compare_digest_hashopenssl_openssl_md_methstrans_5Ctrans_36HMACRFC 2104 HMAC class. Also complies with RFC 4231. + + This supports the API for Cryptographic Hash Functions (PEP 247). + digestmodCreate a new HMAC object. + + key: bytes or buffer, key for the keyed hash object. + msg: bytes or buffer, Initial input for the hash or None. + digestmod: A hash name suitable for hashlib.new(). *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + + Required as of 3.8, despite its position after the optional + msg argument. Passing it as a keyword argument is + recommended, though not required for legacy API reasons. + key: expected bytes or bytearray, but got %rMissing required parameter 'digestmod'.digest_consblock_size of %d seems too small; using our default of %d.'block_size of %d seems too small; using our ''default of %d.'No block_size attribute on given digest object; Assuming %d.'No block_size attribute on given digest object; ''Assuming %d.'hmac-Feed data from msg into this hashing object.Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + _currentReturn a hash object for the current state. + + To be used only internally with digest() and hexdigest(). + Return the hash value of this hashing object. + + This returns the hmac value as bytes. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + Like digest(), but returns a string of hexadecimal digits instead. + Create a new hashing object and return it. + + key: bytes or buffer, The starting key for the hash. + msg: bytes or buffer, Initial input for the hash, or None. + digestmod: A hash name suitable for hashlib.new(). *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + + Required as of 3.8, despite its position after the optional + msg argument. Passing it as a keyword argument is + recommended, though not required for legacy API reasons. + + You can now feed arbitrary bytes into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + or hexdigest() methods. + Fast inline implementation of HMAC. + + key: bytes or buffer, The key for the keyed hash object. + msg: bytes or buffer, Input message. + digest: A hash name suitable for hashlib.new() for best performance. *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + # The size of the digests returned by HMAC depends on the underlying# hashing module used. Use digest_size from the instance of HMAC instead.# 512-bit HMAC; can be changed in subclasses.# self.blocksize is the default blocksize. self.block_size is# effective block size as well as the public API attribute.# Call __new__ directly to avoid the expensive __init__.b'HMAC (Keyed-Hashing for Message Authentication) module. + +Implements the HMAC algorithm as described by RFC 2104. +'u'HMAC (Keyed-Hashing for Message Authentication) module. + +Implements the HMAC algorithm as described by RFC 2104. +'b'RFC 2104 HMAC class. Also complies with RFC 4231. + + This supports the API for Cryptographic Hash Functions (PEP 247). + 'u'RFC 2104 HMAC class. Also complies with RFC 4231. + + This supports the API for Cryptographic Hash Functions (PEP 247). + 'b'Create a new HMAC object. + + key: bytes or buffer, key for the keyed hash object. + msg: bytes or buffer, Initial input for the hash or None. + digestmod: A hash name suitable for hashlib.new(). *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + + Required as of 3.8, despite its position after the optional + msg argument. Passing it as a keyword argument is + recommended, though not required for legacy API reasons. + 'u'Create a new HMAC object. + + key: bytes or buffer, key for the keyed hash object. + msg: bytes or buffer, Initial input for the hash or None. + digestmod: A hash name suitable for hashlib.new(). *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + + Required as of 3.8, despite its position after the optional + msg argument. Passing it as a keyword argument is + recommended, though not required for legacy API reasons. + 'b'key: expected bytes or bytearray, but got %r'u'key: expected bytes or bytearray, but got %r'b'Missing required parameter 'digestmod'.'u'Missing required parameter 'digestmod'.'b'block_size of %d seems too small; using our default of %d.'u'block_size of %d seems too small; using our default of %d.'b'No block_size attribute on given digest object; Assuming %d.'u'No block_size attribute on given digest object; Assuming %d.'b'hmac-'u'hmac-'b'Feed data from msg into this hashing object.'u'Feed data from msg into this hashing object.'b'Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + 'u'Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + 'b'Return a hash object for the current state. + + To be used only internally with digest() and hexdigest(). + 'u'Return a hash object for the current state. + + To be used only internally with digest() and hexdigest(). + 'b'Return the hash value of this hashing object. + + This returns the hmac value as bytes. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + 'u'Return the hash value of this hashing object. + + This returns the hmac value as bytes. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + 'b'Like digest(), but returns a string of hexadecimal digits instead. + 'u'Like digest(), but returns a string of hexadecimal digits instead. + 'b'Create a new hashing object and return it. + + key: bytes or buffer, The starting key for the hash. + msg: bytes or buffer, Initial input for the hash, or None. + digestmod: A hash name suitable for hashlib.new(). *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + + Required as of 3.8, despite its position after the optional + msg argument. Passing it as a keyword argument is + recommended, though not required for legacy API reasons. + + You can now feed arbitrary bytes into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + or hexdigest() methods. + 'u'Create a new hashing object and return it. + + key: bytes or buffer, The starting key for the hash. + msg: bytes or buffer, Initial input for the hash, or None. + digestmod: A hash name suitable for hashlib.new(). *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + + Required as of 3.8, despite its position after the optional + msg argument. Passing it as a keyword argument is + recommended, though not required for legacy API reasons. + + You can now feed arbitrary bytes into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + or hexdigest() methods. + 'b'Fast inline implementation of HMAC. + + key: bytes or buffer, The key for the keyed hash object. + msg: bytes or buffer, Input message. + digest: A hash name suitable for hashlib.new() for best performance. *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + 'u'Fast inline implementation of HMAC. + + key: bytes or buffer, The key for the keyed hash object. + msg: bytes or buffer, Input message. + digest: A hash name suitable for hashlib.new() for best performance. *OR* + A hashlib constructor returning a new hash object. *OR* + A module supporting PEP 247. + 'u'hmac'Get useful information from live Python objects. + +This module encapsulates the interface provided by the internal special +attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion. +It also provides some help for examining source code and class layout. + +Here are some of the useful functions provided by this module: + + ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(), + isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(), + isroutine() - check object types + getmembers() - get members of an object that satisfy a given condition + + getfile(), getsourcefile(), getsource() - find an object's source code + getdoc(), getcomments() - get documentation on an object + getmodule() - determine the module that an object came from + getclasstree() - arrange classes so as to represent their hierarchy + + getargvalues(), getcallargs() - get info about function arguments + getfullargspec() - same, with support for Python 3 features + formatargvalues() - format an argument spec + getouterframes(), getinnerframes() - get info about frames + currentframe() - get the current stack frame + stack(), trace() - get info about frames on the stack or in a traceback + + signature() - get a Signature object for the callable +Ka-Ping Yee Yury Selivanov importlib.machinerymod_dictCO_TPFLAGS_IS_ABSTRACTReturn true if the object is a module. + + Module objects provide these attributes: + __cached__ pathname to byte compiled file + __doc__ documentation string + __file__ filename (missing for built-in modules)Return true if the object is a class. + + Class objects provide these attributes: + __doc__ documentation string + __module__ name of module in which this class was definedReturn true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + __func__ function object containing implementation of method + __self__ instance to which this method is boundReturn true if the object is a method descriptor. + + But not if ismethod() or isclass() or isfunction() are true. + + This is new in Python 2.2, and, for example, is true of int.__add__. + An object passing this test has a __get__ attribute but not a __set__ + attribute, but beyond that the set of attributes varies. __name__ is + usually sensible, and __doc__ often is. + + Methods implemented via descriptors that also pass one of the other + tests return false from the ismethoddescriptor() test, simply because + the other tests promise more -- you can, e.g., count on having the + __func__ attribute (etc) when an object passes ismethod().tpisdatadescriptorReturn true if the object is a data descriptor. + + Data descriptors have a __set__ or a __delete__ attribute. Examples are + properties (defined in Python) and getsets and members (defined in C). + Typically, data descriptors will also have __name__ and __doc__ attributes + (properties, getsets, and members have both of these attributes), but this + is not guaranteed.MemberDescriptorTypeismemberdescriptorReturn true if the object is a member descriptor. + + Member descriptors are specialized descriptors defined in extension + modules.GetSetDescriptorTypeisgetsetdescriptorReturn true if the object is a getset descriptor. + + getset descriptors are specialized descriptors defined in extension + modules.Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + __code__ code object containing compiled function bytecode + __defaults__ tuple of any default values for arguments + __globals__ global namespace in which this function was defined + __annotations__ dict of parameter annotations + __kwdefaults__ dict of keyword only parameters with defaults_has_code_flagReturn true if ``f`` is a function (or a method or functools.partial + wrapper wrapping a function) whose code object has the given ``flag`` + set in its flags.Return true if the object is a user-defined generator function. + + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.Return true if the object is a coroutine function. + + Coroutine functions are defined with "async def" syntax. + isasyncgenfunctionReturn true if the object is an asynchronous generator function. + + Asynchronous generator functions are defined with "async def" + syntax and have "yield" expressions in their body. + isasyncgenReturn true if the object is an asynchronous generator.AsyncGeneratorTypeReturn true if the object is a generator. + + Generator objects provide these attributes: + __iter__ defined to support iteration over container + close raises a new GeneratorExit exception inside the + generator to terminate the iteration + gi_code code object + gi_frame frame object or possibly None once the generator has + been exhausted + gi_running set to 1 when generator is executing, 0 otherwise + next return the next item from the container + send resumes the generator and "sends" a value that becomes + the result of the current yield-expression + throw used to raise an exception inside the generatorReturn true if the object is a coroutine.Return true if object can be passed to an ``await`` expression.CO_ITERABLE_COROUTINEReturn true if the object is a traceback. + + Traceback objects provide these attributes: + tb_frame frame object at this level + tb_lasti index of last attempted instruction in bytecode + tb_lineno current line number in Python source code + tb_next next inner traceback object (called by this level)TracebackTypeReturn true if the object is a frame object. + + Frame objects provide these attributes: + f_back next outer frame object (this frame's caller) + f_builtins built-in namespace seen by this frame + f_code code object being executed in this frame + f_globals global namespace seen by this frame + f_lasti index of last attempted instruction in bytecode + f_lineno current line number in Python source code + f_locals local namespace seen by this frame + f_trace tracing function for this frame, or NoneFrameTypeReturn true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including *, ** args + or keyword only arguments) + co_code string of raw compiled bytecode + co_cellvars tuple of names of cell variables + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + | 16=nested | 32=generator | 64=nofree | 128=coroutine + | 256=iterable_coroutine | 512=async_generator + co_freevars tuple of names of free variables + co_posonlyargcount number of positional only arguments + co_kwonlyargcount number of keyword only arguments (not including ** arg) + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variablesisbuiltinReturn true if the object is a built-in function or method. + + Built-in functions and methods provide these attributes: + __doc__ documentation string + __name__ original name of this function or method + __self__ instance to which a method is bound, or NoneReturn true if the object is any kind of function or method.isabstractReturn true if the object is an abstract base class (ABC).getmembersReturn all members of an object as (name, value) pairs sorted by name. + Optionally, only return members that satisfy a given predicate.getmroprocessedname kind defining_class objectclassify_class_attrsReturn list of attribute-descriptor tuples. + + For each name in dir(cls), the return list contains a 4-tuple + with these elements: + + 0. The name (a string). + + 1. The kind of attribute this is, one of these strings: + 'class method' created via classmethod() + 'static method' created via staticmethod() + 'property' created via property() + 'method' any other flavor of method or descriptor + 'data' not a method + + 2. The class which defined this attribute (a class). + + 3. The object as obtained by calling getattr; if this fails, or if the + resulting object does not live anywhere in the class' mro (including + metaclasses) then the object is looked up in the defining class's + dict (found by walking the mro). + + If one of the items in dir(cls) is stored in the metaclass it will now + be discovered and not have None be listed as the class in which it was + defined. Any items whose home class cannot be discovered are skipped. + metamroclass_basesall_baseshomeclsget_objdict_obj__dict__ is special, don't want the proxylast_clssrch_clssrch_objBuiltinMethodTypestatic methodClassMethodDescriptorTypeclass methodReturn tuple of base classes (including cls) in method resolution order.Get the object wrapped by *func*. + + Follows the chain of :attr:`__wrapped__` attributes returning the last + object in the chain. + + *stop* is an optional callback accepting an object in the wrapper chain + as its sole argument that allows the unwrapping to be terminated early if + the callback returns a true value. If the callback never returns a true + value, the last object in the chain is returned as usual. For example, + :func:`signature` uses this to stop unwrapping if any object in the + chain has a ``__signature__`` attribute defined. + + :exc:`ValueError` is raised if a cycle is encountered. + + _is_wrapperrecursion_limitid_funcwrapper loop when unwrapping {!r}indentsizeReturn the indent size, in spaces, at the start of a line of text.expline_findclass_finddocgetdocGet the documentation string for an object. + + All tabs are expanded to spaces. To clean up docstrings that are + indented to line up with blocks of code, any whitespace than can be + uniformly removed from the second line onwards is removed.Clean up indentation from docstrings. + + Any whitespace that can be uniformly removed from the second line + onwards is removed.marginWork out which source or compiled file an object was defined in.{!r} is a built-in module{!r} is a built-in classmodule, class, method, function, traceback, frame, or code object was expected, got {}'module, class, method, function, traceback, frame, or ''code object was expected, got {}'getmodulenameReturn the module name for a given file, or None.all_suffixesneglenReturn the filename that can be used to locate an object's source. + Return None if no way can be identified to get the source. + all_bytecode_suffixesgetabsfile_filenameReturn an absolute path to the source or compiled file for an object. + + The idea is for each object to have a unique origin, so this routine + normalizes the result as much as possible.modulesbyfile_filesbymodnameReturn the module an object was defined in, or None if not found.mainobjectbuiltinbuiltinobjectfindsourceReturn the entire source file and starting line number for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of all the lines + in the file and the line number indexes a line in that list. An OSError + is raised if the source code cannot be retrieved.source code not availablecould not get source code^(\s*)class\s*\bcandidatescould not find class definitioncould not find function definitionlnum^(\s*def\s)|(\s*async\s+def\s)|(.*(?getsourceReturn the text of the source code for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a single string. An + OSError is raised if the source code cannot be retrieved.walktreeRecursive helper function for getclasstree().getclasstreeArrange the given list of classes into a hierarchy of nested lists. + + Where a nested list appears, it contains classes derived from the class + whose entry immediately precedes the list. Each entry is a 2-tuple + containing a class and a tuple of its base classes. If the 'unique' + argument is true, exactly one entry appears in the returned structure + for each class in the given list. Otherwise, classes using multiple + inheritance and their descendants will appear multiple times.Argumentsargs, varargs, varkwgetargsGet information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where + 'args' is the list of argument names. Keyword-only arguments are + appended. 'varargs' and 'varkw' are the names of the * and ** + arguments or None.{!r} is not a code objectnkwargskwonlyargsvarargsCO_VARARGSvarkwCO_VARKEYWORDSArgSpecargs varargs keywords defaultsgetargspecGet the names and default values of a function's parameters. + + A tuple of four things is returned: (args, varargs, keywords, defaults). + 'args' is a list of the argument names, including keyword-only argument names. + 'varargs' and 'keywords' are the names of the * and ** parameters or None. + 'defaults' is an n-tuple of the default values of the last n parameters. + + This function is deprecated, as it does not support annotations or + keyword-only parameters and will raise ValueError if either is present + on the supplied callable. + + For a more structured introspection API, use inspect.signature() instead. + + Alternatively, use getfullargspec() for an API with a similar namedtuple + based interface, but full support for annotations and keyword-only + parameters. + + Deprecated since Python 3.5, use `inspect.getfullargspec()`. + inspect.getargspec() is deprecated since Python 3.0, use inspect.signature() or inspect.getfullargspec()"inspect.getargspec() is deprecated since Python 3.0, ""use inspect.signature() or inspect.getfullargspec()"getfullargspeckwonlydefaultsFunction has keyword-only parameters or annotations, use inspect.signature() API which can support them"Function has keyword-only parameters or annotations"", use inspect.signature() API which can support them"FullArgSpecargs, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotationsGet the names and default values of a callable object's parameters. + + A tuple of seven things is returned: + (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations). + 'args' is a list of the parameter names. + 'varargs' and 'varkw' are the names of the * and ** parameters or None. + 'defaults' is an n-tuple of the default values of the last n parameters. + 'kwonlyargs' is a list of keyword-only parameter names. + 'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults. + 'annotations' is a dictionary mapping parameter names to annotations. + + Notable differences from inspect.signature(): + - the "self" parameter is always reported, even for bound methods + - wrapper chains defined by __wrapped__ *not* unwrapped automatically + _signature_from_callablefollow_wrapper_chainsskip_bound_argSignaturesigclsunsupported callableposonlyargsreturn_annotation_POSITIONAL_ONLY_POSITIONAL_OR_KEYWORD_VAR_POSITIONAL_KEYWORD_ONLY_VAR_KEYWORDannotationArgInfoargs varargs keywords localsgetargvaluesGet information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names. + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame.formatannotationbase_moduletyping.formatannotationrelativeto_formatannotationformatargspec -> formatargformatvarargsformatvarkwformatvalueformatreturnsFormat an argument spec from the values returned by getfullargspec. + + The first seven arguments are (args, varargs, varkw, defaults, + kwonlyargs, kwonlydefaults, annotations). The other five arguments + are the corresponding optional formatting functions that are called to + turn names and values into strings. The last argument is an optional + function to format the sequence of arguments. + + Deprecated since Python 3.5: use the `signature` function and `Signature` + objects. + `formatargspec` is deprecated since Python 3.5. Use `signature` and the `Signature` object directly"`formatargspec` is deprecated since Python 3.5. Use `signature` and ""the `Signature` object directly"formatargandannotationfirstdefaultkwonlyargformatargvaluesFormat an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments._missing_argumentsf_nameargnames{} and {}, {} and {}%s() missing %i required %s argument%s: %spositionalkeyword-only_too_manykwonlydefcountgivenatleastkwonly_givenat least %dfrom %d to %dkwonly_sig positional argument%s (and %d keyword-only argument%s)%s() takes %s positional argument%s but %d%s %s givenwasweregetcallargsGet the mapping of arguments to values. + + A dict is returned, with keys the function argument names (including the + names of the * and ** arguments, if any), and values the respective bound + values from 'positional' and 'named'.arg2valuenum_posnum_argsnum_defaultspossible_kwargs%s() got an unexpected keyword argument %r%s() got multiple values for argument %rreqkwargClosureVarsnonlocals globals builtins unboundgetclosurevars + Get the mapping of free variables to their current values. + + Returns a named tuple of dicts mapping the current nonlocal, global + and builtin references as seen by the body of the function. A final + set of unbound names that could not be resolved is also provided. + {!r} is not a Python functionnonlocal_varscellcell_contentsglobal_ns__builtins__builtin_nsglobal_varsbuiltin_varsunbound_namesTracebackfilename lineno function code_context indexgetframeinfoGet information about a frame or traceback object. + + A tuple of five things is returned: the filename, the line number of + the current line, the function name, a list of lines of context from + the source code, and the index of the current line within that list. + The optional second argument specifies the number of lines of context + to return, which are centered around the current line.{!r} is not a frame or traceback objectgetlinenoGet the line number from a frame object, allowing for optimization.FrameInfogetouterframesGet a list of records for a frame and all higher (calling) frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.framelistframeinfogetinnerframesGet a list of records for a traceback's frame and all lower frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.Return the frame of the caller or None if this is not possible.Return a list of records for the stack above the caller's frame.Return a list of records for the stack below the current exception._static_getmro_check_instanceinstance_dict_check_class_shadowed_dict_is_typeclass_dictgetattr_staticRetrieve attributes without triggering dynamic lookup via the + descriptor protocol, __getattr__ or __getattribute__. + + Note: this function may not be able to retrieve all attributes + that getattr can fetch (like dynamically created attributes) + and may find attributes that getattr can't (like descriptors + that raise AttributeError). It can also return descriptor objects + instead of instance members in some cases. See the + documentation for details. + instance_resultklass_resultGEN_CREATEDGEN_RUNNINGGEN_SUSPENDEDGEN_CLOSEDgetgeneratorstateGet current state of a generator-iterator. + + Possible states are: + GEN_CREATED: Waiting to start execution. + GEN_RUNNING: Currently being executed by the interpreter. + GEN_SUSPENDED: Currently suspended at a yield expression. + GEN_CLOSED: Execution has completed. + getgeneratorlocals + Get the mapping of generator local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values.{!r} is not a Python generatorCORO_CREATEDCORO_RUNNINGCORO_SUSPENDEDCORO_CLOSEDgetcoroutinestateGet current state of a coroutine object. + + Possible states are: + CORO_CREATED: Waiting to start execution. + CORO_RUNNING: Currently being executed by the interpreter. + CORO_SUSPENDED: Currently suspended at an await expression. + CORO_CLOSED: Execution has completed. + getcoroutinelocals + Get the mapping of coroutine local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values._WrapperDescriptor_MethodWrapper_ClassMethodWrapper_NonUserDefinedCallables_signature_get_user_defined_methodmethod_namePrivate helper. Checks if ``cls`` has an attribute + named ``method_name`` and returns it only if it is a + pure python function. + meth_signature_get_partialwrapped_sigextra_argsPrivate helper to calculate how 'wrapped_sig' signature will + look like after applying a 'functools.partial' object (or alike) + on it. + old_paramsnew_paramspartial_argspartial_keywordsbind_partialbapartial object {!r} has incorrect argumentstransform_to_kwonlyparam_namearg_valuenew_param_signature_bound_methodPrivate helper to transform signatures for unbound + functions to bound methods. + invalid method signatureinvalid argument type_signature_is_builtinPrivate helper to test if `obj` is a callable that might + support Argument Clinic's __text_signature__ protocol. + _signature_is_functionlikePrivate helper to test if `obj` is a duck type of FunctionType. + A good example of such objects are functions compiled with + Cython, which have all attributes that a pure Python function + would have, but have their code statically compiled. + _void_signature_get_bound_param Private helper to get first parameter name from a + __text_signature__ of a builtin method, which should + be in the following format: '($param1, ...)'. + Assumptions are that the first argument won't have + a default value or an annotation. + ($cpos_signature_strip_non_python_syntax + Private helper function. Takes a signature in Argument Clinic's + extended signature format. + + Returns a tuple of three things: + * that signature re-rendered in standard Python syntax, + * the index of the "self" parameter (generally 0), or None if + the function does not have a "self" parameter, and + * the index of the last "positional only" parameter, + or None if the signature has no positional-only parameters. + self_parameterlast_positional_onlytoken_streamdelayed_commaskip_next_commacurrent_parameterERRORTOKENENCODINGclean_signature_signature_fromstrPrivate helper to parse content of '__text_signature__' + and return a Signature based on it. + ast_parameter_clsParameterdef foo: pass{!r} builtin has invalid signatureinvalidmodule_dictsys_module_dictparse_nameAnnotations are not currently supportedwrap_valueRewriteSymbolicsvisit_Attributevisit_Namename_nodedefault_node_emptyfillvaluePOSITIONAL_ONLYPOSITIONAL_OR_KEYWORDvarargVAR_POSITIONALKEYWORD_ONLYkw_defaultsVAR_KEYWORD_selfself_isboundself_ismodule_signature_from_builtinPrivate helper function to get signature for + builtin callables. + {!r} is not a Python builtin function"{!r} is not a Python builtin ""function"no signature found for builtin {!r}_signature_from_functionPrivate helper: constructs Signature for the given python function.is_duck_functionfunc_codepos_countarg_namesposonly_countkeyword_only_countkeyword_onlypos_default_countnon_default_countposonly_left__validate_parameters__Private helper function to get signature for arbitrary + callable objects. + {!r} is not a callable object__signature__unexpected object {!r} in __signature__ attribute'unexpected object {!r} in __signature__ ''attribute'first_wrapped_paramsig_paramstext_sigfrom_callableno signature found for builtin type {!r}no signature found for {!r}no signature found for builtin function {!r}callable {!r} is not supported by signatureA private marker - used in Parameter & Signature.Marker object for Signature.empty and Parameter.empty._ParameterKind_PARAM_NAME_MAPPINGpositional-onlypositional or keywordvariadic positionalvariadic keywordRepresents a parameter in a function signature. + + Has the following public attributes: + + * name : str + The name of the parameter as a string. + * default : object + The default value for the parameter if specified. If the + parameter has no default value, this attribute is set to + `Parameter.empty`. + * annotation + The annotation for the parameter if specified. If the + parameter has no annotation, this attribute is set to + `Parameter.empty`. + * kind : str + Describes how argument values are bound to the parameter. + Possible values: `Parameter.POSITIONAL_ONLY`, + `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, + `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. + _kind_annotationvalue is not a valid Parameter.kind{} parameters cannot have default valuesname is a required attribute for Parametername must be a str, not a {}implicit arguments must be passed as positional or keyword arguments, not {}'implicit arguments must be passed as ''positional or keyword arguments, not {}'implicit{}{!r} is not a valid parameter nameCreates a customized copy of the Parameter.formatted{}: {}{} = {}{}={}<{} "{}">BoundArgumentsResult of `Signature.bind` call. Holds the mapping of arguments + to the function's parameters. + + Has the following public attributes: + + * arguments : OrderedDict + An ordered mutable mapping of parameters' names to arguments' values. + Does not contain arguments' default values. + * signature : Signature + The Signature object that created this instance. + * args : tuple + Tuple of positional arguments values. + * kwargs : dict + Dict of keyword arguments values. + _signaturekwargs_startedapply_defaultsSet default values for missing arguments. + + For variable-positional arguments (*args) the default is an + empty tuple. + + For variable-keyword arguments (**kwargs) the default is an + empty dict. + new_arguments<{} ({})>A Signature object represents the overall signature of a function. + It stores a Parameter object for each parameter accepted by the + function, as well as information specific to the function itself. + + A Signature object has the following public attributes and methods: + + * parameters : OrderedDict + An ordered mapping of parameters' names to the corresponding + Parameter objects (keyword-only arguments are in the same order + as listed in `code.co_varnames`). + * return_annotation : object + The annotation for the return type of the function if specified. + If the function has no annotation for its return type, this + attribute is set to `Signature.empty`. + * bind(*args, **kwargs) -> BoundArguments + Creates a mapping from positional and keyword arguments to + parameters. + * bind_partial(*args, **kwargs) -> BoundArguments + Creates a partial mapping from positional and keyword arguments + to parameters (simulating 'functools.partial' behavior.) + _return_annotation_parameters_bound_arguments_clsConstructs Signature from the given list of Parameter + objects and 'return_annotation'. All arguments are optional. + top_kindkind_defaultswrong parameter order: {} parameter before {} parameter'wrong parameter order: {} parameter before {} ''parameter'non-default argument follows default argument'non-default argument follows default ''argument'duplicate parameter name: {!r}from_functionConstructs Signature for the given python function. + + Deprecated since Python 3.5, use `Signature.from_callable()`. + inspect.Signature.from_function() is deprecated since Python 3.5, use Signature.from_callable()"inspect.Signature.from_function() is deprecated since ""Python 3.5, use Signature.from_callable()"from_builtinConstructs Signature for the given builtin function. + + Deprecated since Python 3.5, use `Signature.from_callable()`. + inspect.Signature.from_builtin() is deprecated since Python 3.5, use Signature.from_callable()"inspect.Signature.from_builtin() is deprecated since "follow_wrappedConstructs Signature for the given callable object.Creates a customized copy of the Signature. + Pass 'parameters' and/or 'return_annotation' arguments + to override them in the new copy. + _hash_basiskwo_paramsPrivate method. Don't use directly.parameters_exarg_valsarg_valtoo many positional argumentsmultiple values for argument {arg!r}{arg!r} parameter is positional only, but was passed as a keyword'{arg!r} parameter is positional only, ''but was passed as a keyword'missing a required argument: {arg!r}kwargs_paramgot an unexpected keyword argument {arg!r}Get a BoundArguments object, that maps the passed `args` + and `kwargs` to the function's signature. Raises `TypeError` + if the passed arguments can not be bound. + Get a BoundArguments object, that partially maps the + passed `args` and `kwargs` to the function's signature. + Raises `TypeError` if the passed arguments can not be bound. + render_pos_only_separatorrender_kw_only_separatorrenderedanno -> {}Get a signature object for the passed callable. Logic for inspecting an object given at command line The object to be analysed. It supports the 'module:qualname' syntax"The object to be analysed. ""It supports the 'module:qualname' syntax"--detailsDisplay info about the module rather than its source codemod_namehas_attrsFailed to import {} ({}: {})Can't get info for builtin modules.detailsTarget: {}Origin: {}Cached: {}Loader: {}Submodule search path: {}Line: {}# This module is in the public domain. No warranties.# Create constants for the compiler flags in Include/code.h# We try to get them from dis to avoid duplication# See Include/object.h# ----------------------------------------------------------- type-checking# mutual exclusion# CPython and equivalent# Other implementations# It looks like ABCMeta.__new__ has finished running;# TPFLAGS_IS_ABSTRACT should have been accurate.# It looks like ABCMeta.__new__ has not finished running yet; we're# probably in __init_subclass__. We'll look for abstractmethods manually.# :dd any DynamicClassAttributes to the list of names if object is a class;# this may result in duplicate entries if, for example, a virtual# attribute with the same name as a DynamicClassAttribute exists# First try to get the value via getattr. Some descriptors don't# like calling their __get__ (see bug #1785), so fall back to# looking in the __dict__.# handle the duplicate key# could be a (currently) missing slot member, or a buggy# __dir__; discard and move on# for attributes stored in the metaclass# :dd any DynamicClassAttributes to the list of names;# attribute with the same name as a DynamicClassAttribute exists.# Get the object associated with the name, and where it was defined.# Normal objects will be looked up with both getattr and directly in# its class' dict (in case getattr fails [bug #1785], and also to look# for a docstring).# For DynamicClassAttributes on the second pass we only look in the# class's dict.# Getting an obj from the __dict__ sometimes reveals more than# using getattr. Static and class methods are dramatic examples.# if the resulting object does not live somewhere in the# mro, drop it and search the mro manually# first look in the classes# then check the metaclasses# unable to locate the attribute anywhere, most likely due to# buggy custom __dir__; discard and move on# Classify the object or its descriptor.# ----------------------------------------------------------- class helpers# -------------------------------------------------------- function helpers# remember the original func for error reporting# Memoise by id to tolerate non-hashable objects, but store objects to# ensure they aren't destroyed, which would allow their IDs to be reused.# -------------------------------------------------- source code extraction# classmethod# Should be tested before isdatadescriptor().# Find minimum indentation of any non-blank lines after first line.# Remove indentation.# Remove any trailing or leading blank lines.# Check for paths that look like an actual module file# try longest suffixes first, in case they overlap# only return a non-existent filename if the module has a PEP 302 loader# or it is in the linecache# Try the filename to modulename cache# Try the cache again with the absolute file name# Update the filename to module name cache and check yet again# Copy sys.modules in order to cope with changes while iterating# Have already mapped this module, so skip it# Always map to the name the module knows itself by# Check the main module# Check builtins# Invalidate cache if needed.# Allow filenames in form of "" to pass through.# `doctest` monkeypatches `linecache` module to enable# inspection, so let `linecache.getlines` to be called.# make some effort to find the best matching class definition:# use the one with the least indentation, which is the one# that's most probably not inside a function definition.# if it's at toplevel, it's already the best one# else add whitespace to candidate list# this will sort by whitespace, and by line number,# less whitespace first# Look for a comment block at the top of the file.# Look for a preceding block of comments at the same indentation.# skip any decorators# look for the first "def", "class" or "lambda"# skip to the end of the line# stop skipping when a NEWLINE is seen# lambdas always end at the first NEWLINE# hitting a NEWLINE when in a decorator without args# ends the decorator# the end of matching indent/dedent pairs end a block# (note that this only works for "def"/"class" blocks,# not e.g. for "if: else:" or "try: finally:" blocks)# Include comments if indented at least as much as the block# any other token on the same indentation level end the previous# block as well, except the pseudo-tokens COMMENT and NL.# for module or frame that corresponds to module, return all source lines# --------------------------------------------------- class tree extraction# ------------------------------------------------ argument list extraction# Re: `skip_bound_arg=False`# There is a notable difference in behaviour between getfullargspec# and Signature: the former always returns 'self' parameter for bound# methods, whereas the Signature always shows the actual calling# signature of the passed object.# To simulate this behaviour, we "unbind" bound methods, to trick# inspect.signature to always return their first parameter ("self",# usually)# Re: `follow_wrapper_chains=False`# getfullargspec() historically ignored __wrapped__ attributes,# so we ensure that remains the case in 3.3+# Most of the times 'signature' will raise ValueError.# But, it can also raise AttributeError, and, maybe something# else. So to be fully backwards compatible, we catch all# possible exceptions here, and reraise a TypeError.# compatibility with 'func.__kwdefaults__'# compatibility with 'func.__defaults__'# implicit 'self' (or 'cls' for classmethods) argument# Nonlocal references are named in co_freevars and resolved# by looking them up in __closure__ by positional index# Global and builtin references are named in co_names and resolved# by looking them up in __globals__ or __builtins__# Because these used to be builtins instead of keywords, they# may still show up as name references. We ignore them.# -------------------------------------------------- stack frame extraction# FrameType.f_lineno is now a descriptor that grovels co_lnotab# ------------------------------------------------ static version of getattr# for types we check the metaclass too# ------------------------------------------------ generator introspection# ------------------------------------------------ coroutine introspection################################################################################## Function Signature Object (PEP 362)# Once '__signature__' will be added to 'C'-level# callables, this check won't be necessary# If positional-only parameter is bound by partial,# it effectively disappears from the signature# This means that this parameter, and all parameters# after it should be keyword-only (and var-positional# should be removed). Here's why. Consider the following# function:# foo(a, b, *args, c):# pass# "partial(foo, a='spam')" will have the following# signature: "(*, a='spam', b, c)". Because attempting# to call that partial with "(10, 20)" arguments will# raise a TypeError, saying that "a" argument received# multiple values.# Set the new default value# was passed as a positional argument# Drop first parameter:# '(p1, p2[, ...])' -> '(p2[, ...])'# Unless we add a new parameter type we never# get here# It's a var-positional parameter.# Do nothing. '(*args[, ...])' -> '(*args[, ...])'# Can't test 'isinstance(type)' here, as it would# also be True for regular python classes# All function-like objects are obviously callables,# and not classes.# Important to use _void ...# ... and not None here# token stream always starts with ENCODING token, skip it# Lazy import ast because it's relatively heavy and# it's not used for other than this function.# non-keyword-only parameters# *args# keyword-only arguments# **kwargs# Possibly strip the bound argument:# - We *always* strip first bound argument if# it is a module.# - We don't strip first bound argument if# skip_bound_arg is False.# for builtins, self parameter is always positional-only!# If it's not a pure Python function, and not a duck type# of pure function:# Parameter information.# Non-keyword-only parameters w/o defaults.# ... w/ defaults.# Keyword-only parameters.# Is 'func' is a pure Python function - don't validate the# parameters list (for correct order and defaults), it should be OK.# In this case we skip the first parameter of the underlying# function (usually `self` or `cls`).# Was this function wrapped by a decorator?# If the unwrapped object is a *method*, we might want to# skip its first parameter (self).# See test_signature_wrapped_bound_method for details.# Unbound partialmethod (see functools.partialmethod)# This means, that we need to calculate the signature# as if it's a regular partial object, but taking into# account that the first positional argument# (usually `self`, or `cls`) will not be passed# automatically (as for boundmethods)# First argument of the wrapped callable is `*args`, as in# `partialmethod(lambda *args)`.# If it's a pure Python function, or an object that is duck type# of a Python function (Cython functions, for instance), then:# obj is a class or a metaclass# First, let's see if it has an overloaded __call__ defined# in its metaclass# Now we check if the 'obj' class has a '__new__' method# Finally, we should have at least __init__ implemented# At this point we know, that `obj` is a class, with no user-# defined '__init__', '__new__', or class-level '__call__'# Since '__text_signature__' is implemented as a# descriptor that extracts text signature from the# class docstring, if 'obj' is derived from a builtin# class, its own '__text_signature__' may be 'None'.# Therefore, we go through the MRO (except the last# class in there, which is 'object') to find the first# class with non-empty text signature.# If 'obj' class has a __text_signature__ attribute:# return a signature based on it# No '__text_signature__' was found for the 'obj' class.# Last option is to check if its '__init__' is# object.__init__ or type.__init__.# We have a class (not metaclass), but no user-defined# __init__ or __new__ for it# Return a signature of 'object' builtin.# An object with __call__# We also check that the 'obj' is not an instance of# _WrapperDescriptor or _MethodWrapper to avoid# infinite recursion (and even potential segfault)# For classes and objects we skip the first parameter of their# __call__, __new__, or __init__ methods# Raise a nicer error message for builtins# These are implicit arguments generated by comprehensions. In# order to provide a friendlier interface to users, we recast# their name as "implicitN" and treat them as positional-only.# See issue 19611.# Add annotation and default value# We're done here. Other arguments# will be mapped in 'BoundArguments.kwargs'# plain argument# plain keyword argument# This BoundArguments was likely produced by# Signature.bind_partial().# No default for this parameter, but the# previous parameter of the same kind had# a default# There is a default for this parameter.# Let's iterate through the positional arguments and corresponding# parameters# No more positional arguments# No more parameters. That's it. Just need to check that# we have no `kwargs` after this while loop# That's OK, just empty *args. Let's start parsing# kwargs# That's fine too - we have a default value for this# parameter. So, lets start parsing `kwargs`, starting# with the current parameter# No default, not VAR_KEYWORD, not VAR_POSITIONAL,# not in `kwargs`# We have a positional argument to process# Looks like we have no parameter for this positional# We have an '*args'-like argument, let's fill it with# all positional arguments we have left and move on to# the next phase# Now, we iterate through the remaining parameters to process# keyword arguments# Memorize that we have a '**kwargs'-like parameter# Named arguments don't refer to '*args'-like parameters.# We only arrive here if the positional arguments ended# before reaching the last parameter before *args.# We have no value for this parameter. It's fine though,# if it has a default value, or it is an '*args'-like# parameter, left alone by the processing of positional# arguments.# This should never happen in case of a properly built# Signature object (but let's have this check here# to ensure correct behaviour just in case)# Process our '**kwargs'-like parameter# It's not a positional-only parameter, and the flag# is set to 'True' (there were pos-only params before.)# OK, we have an '*args'-like parameter, so we won't need# a '*' to separate keyword-only arguments# We have a keyword-only parameter to render and we haven't# rendered an '*args'-like parameter before, so add a '*'# separator to the parameters list ("foo(arg1, *, arg2)" case)# This condition should be only triggered once, so# reset the flag# There were only positional-only parameters, hence the# flag was not reset to 'False'b'Get useful information from live Python objects. + +This module encapsulates the interface provided by the internal special +attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion. +It also provides some help for examining source code and class layout. + +Here are some of the useful functions provided by this module: + + ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(), + isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(), + isroutine() - check object types + getmembers() - get members of an object that satisfy a given condition + + getfile(), getsourcefile(), getsource() - find an object's source code + getdoc(), getcomments() - get documentation on an object + getmodule() - determine the module that an object came from + getclasstree() - arrange classes so as to represent their hierarchy + + getargvalues(), getcallargs() - get info about function arguments + getfullargspec() - same, with support for Python 3 features + formatargvalues() - format an argument spec + getouterframes(), getinnerframes() - get info about frames + currentframe() - get the current stack frame + stack(), trace() - get info about frames on the stack or in a traceback + + signature() - get a Signature object for the callable +'u'Get useful information from live Python objects. + +This module encapsulates the interface provided by the internal special +attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion. +It also provides some help for examining source code and class layout. + +Here are some of the useful functions provided by this module: + + ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(), + isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(), + isroutine() - check object types + getmembers() - get members of an object that satisfy a given condition + + getfile(), getsourcefile(), getsource() - find an object's source code + getdoc(), getcomments() - get documentation on an object + getmodule() - determine the module that an object came from + getclasstree() - arrange classes so as to represent their hierarchy + + getargvalues(), getcallargs() - get info about function arguments + getfullargspec() - same, with support for Python 3 features + formatargvalues() - format an argument spec + getouterframes(), getinnerframes() - get info about frames + currentframe() - get the current stack frame + stack(), trace() - get info about frames on the stack or in a traceback + + signature() - get a Signature object for the callable +'b'Ka-Ping Yee 'u'Ka-Ping Yee 'b'Yury Selivanov 'u'Yury Selivanov 'b'CO_'u'CO_'b'Return true if the object is a module. + + Module objects provide these attributes: + __cached__ pathname to byte compiled file + __doc__ documentation string + __file__ filename (missing for built-in modules)'u'Return true if the object is a module. + + Module objects provide these attributes: + __cached__ pathname to byte compiled file + __doc__ documentation string + __file__ filename (missing for built-in modules)'b'Return true if the object is a class. + + Class objects provide these attributes: + __doc__ documentation string + __module__ name of module in which this class was defined'u'Return true if the object is a class. + + Class objects provide these attributes: + __doc__ documentation string + __module__ name of module in which this class was defined'b'Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + __func__ function object containing implementation of method + __self__ instance to which this method is bound'u'Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + __func__ function object containing implementation of method + __self__ instance to which this method is bound'b'Return true if the object is a method descriptor. + + But not if ismethod() or isclass() or isfunction() are true. + + This is new in Python 2.2, and, for example, is true of int.__add__. + An object passing this test has a __get__ attribute but not a __set__ + attribute, but beyond that the set of attributes varies. __name__ is + usually sensible, and __doc__ often is. + + Methods implemented via descriptors that also pass one of the other + tests return false from the ismethoddescriptor() test, simply because + the other tests promise more -- you can, e.g., count on having the + __func__ attribute (etc) when an object passes ismethod().'u'Return true if the object is a method descriptor. + + But not if ismethod() or isclass() or isfunction() are true. + + This is new in Python 2.2, and, for example, is true of int.__add__. + An object passing this test has a __get__ attribute but not a __set__ + attribute, but beyond that the set of attributes varies. __name__ is + usually sensible, and __doc__ often is. + + Methods implemented via descriptors that also pass one of the other + tests return false from the ismethoddescriptor() test, simply because + the other tests promise more -- you can, e.g., count on having the + __func__ attribute (etc) when an object passes ismethod().'b'Return true if the object is a data descriptor. + + Data descriptors have a __set__ or a __delete__ attribute. Examples are + properties (defined in Python) and getsets and members (defined in C). + Typically, data descriptors will also have __name__ and __doc__ attributes + (properties, getsets, and members have both of these attributes), but this + is not guaranteed.'u'Return true if the object is a data descriptor. + + Data descriptors have a __set__ or a __delete__ attribute. Examples are + properties (defined in Python) and getsets and members (defined in C). + Typically, data descriptors will also have __name__ and __doc__ attributes + (properties, getsets, and members have both of these attributes), but this + is not guaranteed.'b'MemberDescriptorType'u'MemberDescriptorType'b'Return true if the object is a member descriptor. + + Member descriptors are specialized descriptors defined in extension + modules.'u'Return true if the object is a member descriptor. + + Member descriptors are specialized descriptors defined in extension + modules.'b'GetSetDescriptorType'u'GetSetDescriptorType'b'Return true if the object is a getset descriptor. + + getset descriptors are specialized descriptors defined in extension + modules.'u'Return true if the object is a getset descriptor. + + getset descriptors are specialized descriptors defined in extension + modules.'b'Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + __code__ code object containing compiled function bytecode + __defaults__ tuple of any default values for arguments + __globals__ global namespace in which this function was defined + __annotations__ dict of parameter annotations + __kwdefaults__ dict of keyword only parameters with defaults'u'Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + __code__ code object containing compiled function bytecode + __defaults__ tuple of any default values for arguments + __globals__ global namespace in which this function was defined + __annotations__ dict of parameter annotations + __kwdefaults__ dict of keyword only parameters with defaults'b'Return true if ``f`` is a function (or a method or functools.partial + wrapper wrapping a function) whose code object has the given ``flag`` + set in its flags.'u'Return true if ``f`` is a function (or a method or functools.partial + wrapper wrapping a function) whose code object has the given ``flag`` + set in its flags.'b'Return true if the object is a user-defined generator function. + + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.'u'Return true if the object is a user-defined generator function. + + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.'b'Return true if the object is a coroutine function. + + Coroutine functions are defined with "async def" syntax. + 'u'Return true if the object is a coroutine function. + + Coroutine functions are defined with "async def" syntax. + 'b'Return true if the object is an asynchronous generator function. + + Asynchronous generator functions are defined with "async def" + syntax and have "yield" expressions in their body. + 'u'Return true if the object is an asynchronous generator function. + + Asynchronous generator functions are defined with "async def" + syntax and have "yield" expressions in their body. + 'b'Return true if the object is an asynchronous generator.'u'Return true if the object is an asynchronous generator.'b'Return true if the object is a generator. + + Generator objects provide these attributes: + __iter__ defined to support iteration over container + close raises a new GeneratorExit exception inside the + generator to terminate the iteration + gi_code code object + gi_frame frame object or possibly None once the generator has + been exhausted + gi_running set to 1 when generator is executing, 0 otherwise + next return the next item from the container + send resumes the generator and "sends" a value that becomes + the result of the current yield-expression + throw used to raise an exception inside the generator'u'Return true if the object is a generator. + + Generator objects provide these attributes: + __iter__ defined to support iteration over container + close raises a new GeneratorExit exception inside the + generator to terminate the iteration + gi_code code object + gi_frame frame object or possibly None once the generator has + been exhausted + gi_running set to 1 when generator is executing, 0 otherwise + next return the next item from the container + send resumes the generator and "sends" a value that becomes + the result of the current yield-expression + throw used to raise an exception inside the generator'b'Return true if the object is a coroutine.'u'Return true if the object is a coroutine.'b'Return true if object can be passed to an ``await`` expression.'u'Return true if object can be passed to an ``await`` expression.'b'Return true if the object is a traceback. + + Traceback objects provide these attributes: + tb_frame frame object at this level + tb_lasti index of last attempted instruction in bytecode + tb_lineno current line number in Python source code + tb_next next inner traceback object (called by this level)'u'Return true if the object is a traceback. + + Traceback objects provide these attributes: + tb_frame frame object at this level + tb_lasti index of last attempted instruction in bytecode + tb_lineno current line number in Python source code + tb_next next inner traceback object (called by this level)'b'Return true if the object is a frame object. + + Frame objects provide these attributes: + f_back next outer frame object (this frame's caller) + f_builtins built-in namespace seen by this frame + f_code code object being executed in this frame + f_globals global namespace seen by this frame + f_lasti index of last attempted instruction in bytecode + f_lineno current line number in Python source code + f_locals local namespace seen by this frame + f_trace tracing function for this frame, or None'u'Return true if the object is a frame object. + + Frame objects provide these attributes: + f_back next outer frame object (this frame's caller) + f_builtins built-in namespace seen by this frame + f_code code object being executed in this frame + f_globals global namespace seen by this frame + f_lasti index of last attempted instruction in bytecode + f_lineno current line number in Python source code + f_locals local namespace seen by this frame + f_trace tracing function for this frame, or None'b'Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including *, ** args + or keyword only arguments) + co_code string of raw compiled bytecode + co_cellvars tuple of names of cell variables + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + | 16=nested | 32=generator | 64=nofree | 128=coroutine + | 256=iterable_coroutine | 512=async_generator + co_freevars tuple of names of free variables + co_posonlyargcount number of positional only arguments + co_kwonlyargcount number of keyword only arguments (not including ** arg) + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables'u'Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including *, ** args + or keyword only arguments) + co_code string of raw compiled bytecode + co_cellvars tuple of names of cell variables + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + | 16=nested | 32=generator | 64=nofree | 128=coroutine + | 256=iterable_coroutine | 512=async_generator + co_freevars tuple of names of free variables + co_posonlyargcount number of positional only arguments + co_kwonlyargcount number of keyword only arguments (not including ** arg) + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables'b'Return true if the object is a built-in function or method. + + Built-in functions and methods provide these attributes: + __doc__ documentation string + __name__ original name of this function or method + __self__ instance to which a method is bound, or None'u'Return true if the object is a built-in function or method. + + Built-in functions and methods provide these attributes: + __doc__ documentation string + __name__ original name of this function or method + __self__ instance to which a method is bound, or None'b'Return true if the object is any kind of function or method.'u'Return true if the object is any kind of function or method.'b'Return true if the object is an abstract base class (ABC).'u'Return true if the object is an abstract base class (ABC).'b'Return all members of an object as (name, value) pairs sorted by name. + Optionally, only return members that satisfy a given predicate.'u'Return all members of an object as (name, value) pairs sorted by name. + Optionally, only return members that satisfy a given predicate.'b'Attribute'u'Attribute'b'name kind defining_class object'u'name kind defining_class object'b'Return list of attribute-descriptor tuples. + + For each name in dir(cls), the return list contains a 4-tuple + with these elements: + + 0. The name (a string). + + 1. The kind of attribute this is, one of these strings: + 'class method' created via classmethod() + 'static method' created via staticmethod() + 'property' created via property() + 'method' any other flavor of method or descriptor + 'data' not a method + + 2. The class which defined this attribute (a class). + + 3. The object as obtained by calling getattr; if this fails, or if the + resulting object does not live anywhere in the class' mro (including + metaclasses) then the object is looked up in the defining class's + dict (found by walking the mro). + + If one of the items in dir(cls) is stored in the metaclass it will now + be discovered and not have None be listed as the class in which it was + defined. Any items whose home class cannot be discovered are skipped. + 'u'Return list of attribute-descriptor tuples. + + For each name in dir(cls), the return list contains a 4-tuple + with these elements: + + 0. The name (a string). + + 1. The kind of attribute this is, one of these strings: + 'class method' created via classmethod() + 'static method' created via staticmethod() + 'property' created via property() + 'method' any other flavor of method or descriptor + 'data' not a method + + 2. The class which defined this attribute (a class). + + 3. The object as obtained by calling getattr; if this fails, or if the + resulting object does not live anywhere in the class' mro (including + metaclasses) then the object is looked up in the defining class's + dict (found by walking the mro). + + If one of the items in dir(cls) is stored in the metaclass it will now + be discovered and not have None be listed as the class in which it was + defined. Any items whose home class cannot be discovered are skipped. + 'b'__dict__ is special, don't want the proxy'u'__dict__ is special, don't want the proxy'b'static method'u'static method'b'class method'u'class method'b'property'u'property'b'method'u'method'b'Return tuple of base classes (including cls) in method resolution order.'u'Return tuple of base classes (including cls) in method resolution order.'b'Get the object wrapped by *func*. + + Follows the chain of :attr:`__wrapped__` attributes returning the last + object in the chain. + + *stop* is an optional callback accepting an object in the wrapper chain + as its sole argument that allows the unwrapping to be terminated early if + the callback returns a true value. If the callback never returns a true + value, the last object in the chain is returned as usual. For example, + :func:`signature` uses this to stop unwrapping if any object in the + chain has a ``__signature__`` attribute defined. + + :exc:`ValueError` is raised if a cycle is encountered. + + 'u'Get the object wrapped by *func*. + + Follows the chain of :attr:`__wrapped__` attributes returning the last + object in the chain. + + *stop* is an optional callback accepting an object in the wrapper chain + as its sole argument that allows the unwrapping to be terminated early if + the callback returns a true value. If the callback never returns a true + value, the last object in the chain is returned as usual. For example, + :func:`signature` uses this to stop unwrapping if any object in the + chain has a ``__signature__`` attribute defined. + + :exc:`ValueError` is raised if a cycle is encountered. + + 'b'__wrapped__'u'__wrapped__'b'wrapper loop when unwrapping {!r}'u'wrapper loop when unwrapping {!r}'b'Return the indent size, in spaces, at the start of a line of text.'u'Return the indent size, in spaces, at the start of a line of text.'b'Get the documentation string for an object. + + All tabs are expanded to spaces. To clean up docstrings that are + indented to line up with blocks of code, any whitespace than can be + uniformly removed from the second line onwards is removed.'u'Get the documentation string for an object. + + All tabs are expanded to spaces. To clean up docstrings that are + indented to line up with blocks of code, any whitespace than can be + uniformly removed from the second line onwards is removed.'b'Clean up indentation from docstrings. + + Any whitespace that can be uniformly removed from the second line + onwards is removed.'u'Clean up indentation from docstrings. + + Any whitespace that can be uniformly removed from the second line + onwards is removed.'b'Work out which source or compiled file an object was defined in.'u'Work out which source or compiled file an object was defined in.'b'{!r} is a built-in module'u'{!r} is a built-in module'b'{!r} is a built-in class'u'{!r} is a built-in class'b'module, class, method, function, traceback, frame, or code object was expected, got {}'u'module, class, method, function, traceback, frame, or code object was expected, got {}'b'Return the module name for a given file, or None.'u'Return the module name for a given file, or None.'b'Return the filename that can be used to locate an object's source. + Return None if no way can be identified to get the source. + 'u'Return the filename that can be used to locate an object's source. + Return None if no way can be identified to get the source. + 'b'Return an absolute path to the source or compiled file for an object. + + The idea is for each object to have a unique origin, so this routine + normalizes the result as much as possible.'u'Return an absolute path to the source or compiled file for an object. + + The idea is for each object to have a unique origin, so this routine + normalizes the result as much as possible.'b'Return the module an object was defined in, or None if not found.'u'Return the module an object was defined in, or None if not found.'b'Return the entire source file and starting line number for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of all the lines + in the file and the line number indexes a line in that list. An OSError + is raised if the source code cannot be retrieved.'u'Return the entire source file and starting line number for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of all the lines + in the file and the line number indexes a line in that list. An OSError + is raised if the source code cannot be retrieved.'b'source code not available'u'source code not available'b'could not get source code'u'could not get source code'b'^(\s*)class\s*'u'^(\s*)class\s*'b'\b'u'\b'b'could not find class definition'u'could not find class definition'b'could not find function definition'u'could not find function definition'b'^(\s*def\s)|(\s*async\s+def\s)|(.*(?'u''b'Return the text of the source code for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a single string. An + OSError is raised if the source code cannot be retrieved.'u'Return the text of the source code for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a single string. An + OSError is raised if the source code cannot be retrieved.'b'Recursive helper function for getclasstree().'u'Recursive helper function for getclasstree().'b'Arrange the given list of classes into a hierarchy of nested lists. + + Where a nested list appears, it contains classes derived from the class + whose entry immediately precedes the list. Each entry is a 2-tuple + containing a class and a tuple of its base classes. If the 'unique' + argument is true, exactly one entry appears in the returned structure + for each class in the given list. Otherwise, classes using multiple + inheritance and their descendants will appear multiple times.'u'Arrange the given list of classes into a hierarchy of nested lists. + + Where a nested list appears, it contains classes derived from the class + whose entry immediately precedes the list. Each entry is a 2-tuple + containing a class and a tuple of its base classes. If the 'unique' + argument is true, exactly one entry appears in the returned structure + for each class in the given list. Otherwise, classes using multiple + inheritance and their descendants will appear multiple times.'b'Arguments'u'Arguments'b'args, varargs, varkw'u'args, varargs, varkw'b'Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where + 'args' is the list of argument names. Keyword-only arguments are + appended. 'varargs' and 'varkw' are the names of the * and ** + arguments or None.'u'Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where + 'args' is the list of argument names. Keyword-only arguments are + appended. 'varargs' and 'varkw' are the names of the * and ** + arguments or None.'b'{!r} is not a code object'u'{!r} is not a code object'b'ArgSpec'u'ArgSpec'b'args varargs keywords defaults'u'args varargs keywords defaults'b'Get the names and default values of a function's parameters. + + A tuple of four things is returned: (args, varargs, keywords, defaults). + 'args' is a list of the argument names, including keyword-only argument names. + 'varargs' and 'keywords' are the names of the * and ** parameters or None. + 'defaults' is an n-tuple of the default values of the last n parameters. + + This function is deprecated, as it does not support annotations or + keyword-only parameters and will raise ValueError if either is present + on the supplied callable. + + For a more structured introspection API, use inspect.signature() instead. + + Alternatively, use getfullargspec() for an API with a similar namedtuple + based interface, but full support for annotations and keyword-only + parameters. + + Deprecated since Python 3.5, use `inspect.getfullargspec()`. + 'u'Get the names and default values of a function's parameters. + + A tuple of four things is returned: (args, varargs, keywords, defaults). + 'args' is a list of the argument names, including keyword-only argument names. + 'varargs' and 'keywords' are the names of the * and ** parameters or None. + 'defaults' is an n-tuple of the default values of the last n parameters. + + This function is deprecated, as it does not support annotations or + keyword-only parameters and will raise ValueError if either is present + on the supplied callable. + + For a more structured introspection API, use inspect.signature() instead. + + Alternatively, use getfullargspec() for an API with a similar namedtuple + based interface, but full support for annotations and keyword-only + parameters. + + Deprecated since Python 3.5, use `inspect.getfullargspec()`. + 'b'inspect.getargspec() is deprecated since Python 3.0, use inspect.signature() or inspect.getfullargspec()'u'inspect.getargspec() is deprecated since Python 3.0, use inspect.signature() or inspect.getfullargspec()'b'Function has keyword-only parameters or annotations, use inspect.signature() API which can support them'u'Function has keyword-only parameters or annotations, use inspect.signature() API which can support them'b'FullArgSpec'u'FullArgSpec'b'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations'u'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations'b'Get the names and default values of a callable object's parameters. + + A tuple of seven things is returned: + (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations). + 'args' is a list of the parameter names. + 'varargs' and 'varkw' are the names of the * and ** parameters or None. + 'defaults' is an n-tuple of the default values of the last n parameters. + 'kwonlyargs' is a list of keyword-only parameter names. + 'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults. + 'annotations' is a dictionary mapping parameter names to annotations. + + Notable differences from inspect.signature(): + - the "self" parameter is always reported, even for bound methods + - wrapper chains defined by __wrapped__ *not* unwrapped automatically + 'u'Get the names and default values of a callable object's parameters. + + A tuple of seven things is returned: + (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations). + 'args' is a list of the parameter names. + 'varargs' and 'varkw' are the names of the * and ** parameters or None. + 'defaults' is an n-tuple of the default values of the last n parameters. + 'kwonlyargs' is a list of keyword-only parameter names. + 'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults. + 'annotations' is a dictionary mapping parameter names to annotations. + + Notable differences from inspect.signature(): + - the "self" parameter is always reported, even for bound methods + - wrapper chains defined by __wrapped__ *not* unwrapped automatically + 'b'unsupported callable'u'unsupported callable'b'ArgInfo'u'ArgInfo'b'args varargs keywords locals'u'args varargs keywords locals'b'Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names. + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame.'u'Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names. + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame.'b'typing'u'typing'b'typing.'u'typing.'b' -> 'u' -> 'b'Format an argument spec from the values returned by getfullargspec. + + The first seven arguments are (args, varargs, varkw, defaults, + kwonlyargs, kwonlydefaults, annotations). The other five arguments + are the corresponding optional formatting functions that are called to + turn names and values into strings. The last argument is an optional + function to format the sequence of arguments. + + Deprecated since Python 3.5: use the `signature` function and `Signature` + objects. + 'u'Format an argument spec from the values returned by getfullargspec. + + The first seven arguments are (args, varargs, varkw, defaults, + kwonlyargs, kwonlydefaults, annotations). The other five arguments + are the corresponding optional formatting functions that are called to + turn names and values into strings. The last argument is an optional + function to format the sequence of arguments. + + Deprecated since Python 3.5: use the `signature` function and `Signature` + objects. + 'b'`formatargspec` is deprecated since Python 3.5. Use `signature` and the `Signature` object directly'u'`formatargspec` is deprecated since Python 3.5. Use `signature` and the `Signature` object directly'b'Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments.'u'Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments.'b'{} and {}'u'{} and {}'b', {} and {}'u', {} and {}'b'%s() missing %i required %s argument%s: %s'u'%s() missing %i required %s argument%s: %s'b'positional'u'positional'b'keyword-only'u'keyword-only'b'at least %d'u'at least %d'b'from %d to %d'u'from %d to %d'b' positional argument%s (and %d keyword-only argument%s)'u' positional argument%s (and %d keyword-only argument%s)'b'%s() takes %s positional argument%s but %d%s %s given'u'%s() takes %s positional argument%s but %d%s %s given'b'was'u'was'b'were'u'were'b'Get the mapping of arguments to values. + + A dict is returned, with keys the function argument names (including the + names of the * and ** arguments, if any), and values the respective bound + values from 'positional' and 'named'.'u'Get the mapping of arguments to values. + + A dict is returned, with keys the function argument names (including the + names of the * and ** arguments, if any), and values the respective bound + values from 'positional' and 'named'.'b'%s() got an unexpected keyword argument %r'u'%s() got an unexpected keyword argument %r'b'%s() got multiple values for argument %r'u'%s() got multiple values for argument %r'b'ClosureVars'u'ClosureVars'b'nonlocals globals builtins unbound'u'nonlocals globals builtins unbound'b' + Get the mapping of free variables to their current values. + + Returns a named tuple of dicts mapping the current nonlocal, global + and builtin references as seen by the body of the function. A final + set of unbound names that could not be resolved is also provided. + 'u' + Get the mapping of free variables to their current values. + + Returns a named tuple of dicts mapping the current nonlocal, global + and builtin references as seen by the body of the function. A final + set of unbound names that could not be resolved is also provided. + 'b'{!r} is not a Python function'u'{!r} is not a Python function'b'__builtins__'u'__builtins__'b'True'u'True'b'False'u'False'b'Traceback'u'Traceback'b'filename lineno function code_context index'u'filename lineno function code_context index'b'Get information about a frame or traceback object. + + A tuple of five things is returned: the filename, the line number of + the current line, the function name, a list of lines of context from + the source code, and the index of the current line within that list. + The optional second argument specifies the number of lines of context + to return, which are centered around the current line.'u'Get information about a frame or traceback object. + + A tuple of five things is returned: the filename, the line number of + the current line, the function name, a list of lines of context from + the source code, and the index of the current line within that list. + The optional second argument specifies the number of lines of context + to return, which are centered around the current line.'b'{!r} is not a frame or traceback object'u'{!r} is not a frame or traceback object'b'Get the line number from a frame object, allowing for optimization.'u'Get the line number from a frame object, allowing for optimization.'b'FrameInfo'u'FrameInfo'b'Get a list of records for a frame and all higher (calling) frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.'u'Get a list of records for a frame and all higher (calling) frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.'b'Get a list of records for a traceback's frame and all lower frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.'u'Get a list of records for a traceback's frame and all lower frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.'b'Return the frame of the caller or None if this is not possible.'u'Return the frame of the caller or None if this is not possible.'b'Return a list of records for the stack above the caller's frame.'u'Return a list of records for the stack above the caller's frame.'b'Return a list of records for the stack below the current exception.'u'Return a list of records for the stack below the current exception.'b'Retrieve attributes without triggering dynamic lookup via the + descriptor protocol, __getattr__ or __getattribute__. + + Note: this function may not be able to retrieve all attributes + that getattr can fetch (like dynamically created attributes) + and may find attributes that getattr can't (like descriptors + that raise AttributeError). It can also return descriptor objects + instead of instance members in some cases. See the + documentation for details. + 'u'Retrieve attributes without triggering dynamic lookup via the + descriptor protocol, __getattr__ or __getattribute__. + + Note: this function may not be able to retrieve all attributes + that getattr can fetch (like dynamically created attributes) + and may find attributes that getattr can't (like descriptors + that raise AttributeError). It can also return descriptor objects + instead of instance members in some cases. See the + documentation for details. + 'b'GEN_CREATED'u'GEN_CREATED'b'GEN_RUNNING'u'GEN_RUNNING'b'GEN_SUSPENDED'u'GEN_SUSPENDED'b'GEN_CLOSED'u'GEN_CLOSED'b'Get current state of a generator-iterator. + + Possible states are: + GEN_CREATED: Waiting to start execution. + GEN_RUNNING: Currently being executed by the interpreter. + GEN_SUSPENDED: Currently suspended at a yield expression. + GEN_CLOSED: Execution has completed. + 'u'Get current state of a generator-iterator. + + Possible states are: + GEN_CREATED: Waiting to start execution. + GEN_RUNNING: Currently being executed by the interpreter. + GEN_SUSPENDED: Currently suspended at a yield expression. + GEN_CLOSED: Execution has completed. + 'b' + Get the mapping of generator local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values.'u' + Get the mapping of generator local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values.'b'{!r} is not a Python generator'u'{!r} is not a Python generator'b'CORO_CREATED'u'CORO_CREATED'b'CORO_RUNNING'u'CORO_RUNNING'b'CORO_SUSPENDED'u'CORO_SUSPENDED'b'CORO_CLOSED'u'CORO_CLOSED'b'Get current state of a coroutine object. + + Possible states are: + CORO_CREATED: Waiting to start execution. + CORO_RUNNING: Currently being executed by the interpreter. + CORO_SUSPENDED: Currently suspended at an await expression. + CORO_CLOSED: Execution has completed. + 'u'Get current state of a coroutine object. + + Possible states are: + CORO_CREATED: Waiting to start execution. + CORO_RUNNING: Currently being executed by the interpreter. + CORO_SUSPENDED: Currently suspended at an await expression. + CORO_CLOSED: Execution has completed. + 'b' + Get the mapping of coroutine local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values.'u' + Get the mapping of coroutine local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values.'b'from_bytes'u'from_bytes'b'Private helper. Checks if ``cls`` has an attribute + named ``method_name`` and returns it only if it is a + pure python function. + 'u'Private helper. Checks if ``cls`` has an attribute + named ``method_name`` and returns it only if it is a + pure python function. + 'b'Private helper to calculate how 'wrapped_sig' signature will + look like after applying a 'functools.partial' object (or alike) + on it. + 'u'Private helper to calculate how 'wrapped_sig' signature will + look like after applying a 'functools.partial' object (or alike) + on it. + 'b'partial object {!r} has incorrect arguments'u'partial object {!r} has incorrect arguments'b'Private helper to transform signatures for unbound + functions to bound methods. + 'u'Private helper to transform signatures for unbound + functions to bound methods. + 'b'invalid method signature'u'invalid method signature'b'invalid argument type'u'invalid argument type'b'Private helper to test if `obj` is a callable that might + support Argument Clinic's __text_signature__ protocol. + 'u'Private helper to test if `obj` is a callable that might + support Argument Clinic's __text_signature__ protocol. + 'b'Private helper to test if `obj` is a duck type of FunctionType. + A good example of such objects are functions compiled with + Cython, which have all attributes that a pure Python function + would have, but have their code statically compiled. + 'u'Private helper to test if `obj` is a duck type of FunctionType. + A good example of such objects are functions compiled with + Cython, which have all attributes that a pure Python function + would have, but have their code statically compiled. + 'b'__defaults__'u'__defaults__'b'__kwdefaults__'u'__kwdefaults__'b' Private helper to get first parameter name from a + __text_signature__ of a builtin method, which should + be in the following format: '($param1, ...)'. + Assumptions are that the first argument won't have + a default value or an annotation. + 'u' Private helper to get first parameter name from a + __text_signature__ of a builtin method, which should + be in the following format: '($param1, ...)'. + Assumptions are that the first argument won't have + a default value or an annotation. + 'b'($'u'($'b' + Private helper function. Takes a signature in Argument Clinic's + extended signature format. + + Returns a tuple of three things: + * that signature re-rendered in standard Python syntax, + * the index of the "self" parameter (generally 0), or None if + the function does not have a "self" parameter, and + * the index of the last "positional only" parameter, + or None if the signature has no positional-only parameters. + 'u' + Private helper function. Takes a signature in Argument Clinic's + extended signature format. + + Returns a tuple of three things: + * that signature re-rendered in standard Python syntax, + * the index of the "self" parameter (generally 0), or None if + the function does not have a "self" parameter, and + * the index of the last "positional only" parameter, + or None if the signature has no positional-only parameters. + 'b'Private helper to parse content of '__text_signature__' + and return a Signature based on it. + 'u'Private helper to parse content of '__text_signature__' + and return a Signature based on it. + 'b'def foo'u'def foo'b': pass'u': pass'b'{!r} builtin has invalid signature'u'{!r} builtin has invalid signature'b'Annotations are not currently supported'u'Annotations are not currently supported'b'Private helper function to get signature for + builtin callables. + 'u'Private helper function to get signature for + builtin callables. + 'b'{!r} is not a Python builtin function'u'{!r} is not a Python builtin function'b'__text_signature__'u'__text_signature__'b'no signature found for builtin {!r}'u'no signature found for builtin {!r}'b'Private helper: constructs Signature for the given python function.'u'Private helper: constructs Signature for the given python function.'b'Private helper function to get signature for arbitrary + callable objects. + 'u'Private helper function to get signature for arbitrary + callable objects. + 'b'{!r} is not a callable object'u'{!r} is not a callable object'b'__signature__'u'__signature__'b'unexpected object {!r} in __signature__ attribute'u'unexpected object {!r} in __signature__ attribute'b'no signature found for builtin type {!r}'u'no signature found for builtin type {!r}'b'no signature found for {!r}'u'no signature found for {!r}'b'no signature found for builtin function {!r}'u'no signature found for builtin function {!r}'b'callable {!r} is not supported by signature'u'callable {!r} is not supported by signature'b'A private marker - used in Parameter & Signature.'u'A private marker - used in Parameter & Signature.'b'Marker object for Signature.empty and Parameter.empty.'u'Marker object for Signature.empty and Parameter.empty.'b'positional-only'u'positional-only'b'positional or keyword'u'positional or keyword'b'variadic positional'u'variadic positional'b'variadic keyword'u'variadic keyword'b'Represents a parameter in a function signature. + + Has the following public attributes: + + * name : str + The name of the parameter as a string. + * default : object + The default value for the parameter if specified. If the + parameter has no default value, this attribute is set to + `Parameter.empty`. + * annotation + The annotation for the parameter if specified. If the + parameter has no annotation, this attribute is set to + `Parameter.empty`. + * kind : str + Describes how argument values are bound to the parameter. + Possible values: `Parameter.POSITIONAL_ONLY`, + `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, + `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. + 'u'Represents a parameter in a function signature. + + Has the following public attributes: + + * name : str + The name of the parameter as a string. + * default : object + The default value for the parameter if specified. If the + parameter has no default value, this attribute is set to + `Parameter.empty`. + * annotation + The annotation for the parameter if specified. If the + parameter has no annotation, this attribute is set to + `Parameter.empty`. + * kind : str + Describes how argument values are bound to the parameter. + Possible values: `Parameter.POSITIONAL_ONLY`, + `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, + `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. + 'b'_kind'u'_kind'b'_default'u'_default'b'_annotation'u'_annotation'b'value 'u'value 'b' is not a valid Parameter.kind'u' is not a valid Parameter.kind'b'{} parameters cannot have default values'u'{} parameters cannot have default values'b'name is a required attribute for Parameter'u'name is a required attribute for Parameter'b'name must be a str, not a {}'u'name must be a str, not a {}'b'implicit arguments must be passed as positional or keyword arguments, not {}'u'implicit arguments must be passed as positional or keyword arguments, not {}'b'implicit{}'u'implicit{}'b'{!r} is not a valid parameter name'u'{!r} is not a valid parameter name'b'Creates a customized copy of the Parameter.'u'Creates a customized copy of the Parameter.'b'{}: {}'u'{}: {}'b'{} = {}'u'{} = {}'b'{}={}'u'{}={}'b'<{} "{}">'u'<{} "{}">'b'Result of `Signature.bind` call. Holds the mapping of arguments + to the function's parameters. + + Has the following public attributes: + + * arguments : OrderedDict + An ordered mutable mapping of parameters' names to arguments' values. + Does not contain arguments' default values. + * signature : Signature + The Signature object that created this instance. + * args : tuple + Tuple of positional arguments values. + * kwargs : dict + Dict of keyword arguments values. + 'u'Result of `Signature.bind` call. Holds the mapping of arguments + to the function's parameters. + + Has the following public attributes: + + * arguments : OrderedDict + An ordered mutable mapping of parameters' names to arguments' values. + Does not contain arguments' default values. + * signature : Signature + The Signature object that created this instance. + * args : tuple + Tuple of positional arguments values. + * kwargs : dict + Dict of keyword arguments values. + 'b'arguments'u'arguments'b'_signature'u'_signature'b'Set default values for missing arguments. + + For variable-positional arguments (*args) the default is an + empty tuple. + + For variable-keyword arguments (**kwargs) the default is an + empty dict. + 'u'Set default values for missing arguments. + + For variable-positional arguments (*args) the default is an + empty tuple. + + For variable-keyword arguments (**kwargs) the default is an + empty dict. + 'b'<{} ({})>'u'<{} ({})>'b'A Signature object represents the overall signature of a function. + It stores a Parameter object for each parameter accepted by the + function, as well as information specific to the function itself. + + A Signature object has the following public attributes and methods: + + * parameters : OrderedDict + An ordered mapping of parameters' names to the corresponding + Parameter objects (keyword-only arguments are in the same order + as listed in `code.co_varnames`). + * return_annotation : object + The annotation for the return type of the function if specified. + If the function has no annotation for its return type, this + attribute is set to `Signature.empty`. + * bind(*args, **kwargs) -> BoundArguments + Creates a mapping from positional and keyword arguments to + parameters. + * bind_partial(*args, **kwargs) -> BoundArguments + Creates a partial mapping from positional and keyword arguments + to parameters (simulating 'functools.partial' behavior.) + 'u'A Signature object represents the overall signature of a function. + It stores a Parameter object for each parameter accepted by the + function, as well as information specific to the function itself. + + A Signature object has the following public attributes and methods: + + * parameters : OrderedDict + An ordered mapping of parameters' names to the corresponding + Parameter objects (keyword-only arguments are in the same order + as listed in `code.co_varnames`). + * return_annotation : object + The annotation for the return type of the function if specified. + If the function has no annotation for its return type, this + attribute is set to `Signature.empty`. + * bind(*args, **kwargs) -> BoundArguments + Creates a mapping from positional and keyword arguments to + parameters. + * bind_partial(*args, **kwargs) -> BoundArguments + Creates a partial mapping from positional and keyword arguments + to parameters (simulating 'functools.partial' behavior.) + 'b'_return_annotation'u'_return_annotation'b'_parameters'u'_parameters'b'Constructs Signature from the given list of Parameter + objects and 'return_annotation'. All arguments are optional. + 'u'Constructs Signature from the given list of Parameter + objects and 'return_annotation'. All arguments are optional. + 'b'wrong parameter order: {} parameter before {} parameter'u'wrong parameter order: {} parameter before {} parameter'b'non-default argument follows default argument'u'non-default argument follows default argument'b'duplicate parameter name: {!r}'u'duplicate parameter name: {!r}'b'Constructs Signature for the given python function. + + Deprecated since Python 3.5, use `Signature.from_callable()`. + 'u'Constructs Signature for the given python function. + + Deprecated since Python 3.5, use `Signature.from_callable()`. + 'b'inspect.Signature.from_function() is deprecated since Python 3.5, use Signature.from_callable()'u'inspect.Signature.from_function() is deprecated since Python 3.5, use Signature.from_callable()'b'Constructs Signature for the given builtin function. + + Deprecated since Python 3.5, use `Signature.from_callable()`. + 'u'Constructs Signature for the given builtin function. + + Deprecated since Python 3.5, use `Signature.from_callable()`. + 'b'inspect.Signature.from_builtin() is deprecated since Python 3.5, use Signature.from_callable()'u'inspect.Signature.from_builtin() is deprecated since Python 3.5, use Signature.from_callable()'b'Constructs Signature for the given callable object.'u'Constructs Signature for the given callable object.'b'Creates a customized copy of the Signature. + Pass 'parameters' and/or 'return_annotation' arguments + to override them in the new copy. + 'u'Creates a customized copy of the Signature. + Pass 'parameters' and/or 'return_annotation' arguments + to override them in the new copy. + 'b'Private method. Don't use directly.'u'Private method. Don't use directly.'b'too many positional arguments'u'too many positional arguments'b'multiple values for argument {arg!r}'u'multiple values for argument {arg!r}'b'{arg!r} parameter is positional only, but was passed as a keyword'u'{arg!r} parameter is positional only, but was passed as a keyword'b'missing a required argument: {arg!r}'u'missing a required argument: {arg!r}'b'got an unexpected keyword argument {arg!r}'u'got an unexpected keyword argument {arg!r}'b'Get a BoundArguments object, that maps the passed `args` + and `kwargs` to the function's signature. Raises `TypeError` + if the passed arguments can not be bound. + 'u'Get a BoundArguments object, that maps the passed `args` + and `kwargs` to the function's signature. Raises `TypeError` + if the passed arguments can not be bound. + 'b'Get a BoundArguments object, that partially maps the + passed `args` and `kwargs` to the function's signature. + Raises `TypeError` if the passed arguments can not be bound. + 'u'Get a BoundArguments object, that partially maps the + passed `args` and `kwargs` to the function's signature. + Raises `TypeError` if the passed arguments can not be bound. + 'b' -> {}'u' -> {}'b'Get a signature object for the passed callable.'u'Get a signature object for the passed callable.'b' Logic for inspecting an object given at command line 'u' Logic for inspecting an object given at command line 'b'object'u'object'b'The object to be analysed. It supports the 'module:qualname' syntax'u'The object to be analysed. It supports the 'module:qualname' syntax'b'--details'u'--details'b'Display info about the module rather than its source code'u'Display info about the module rather than its source code'b'Failed to import {} ({}: {})'u'Failed to import {} ({}: {})'b'Can't get info for builtin modules.'u'Can't get info for builtin modules.'b'Target: {}'u'Target: {}'b'Origin: {}'u'Origin: {}'b'Cached: {}'u'Cached: {}'b'Loader: {}'u'Loader: {}'b'Submodule search path: {}'u'Submodule search path: {}'b'Line: {}'u'Line: {}'u'inspect'The io module provides the Python interfaces to stream handling. The +builtin open function is defined in this module. + +At the top of the I/O hierarchy is the abstract base class IOBase. It +defines the basic interface to a stream. Note, however, that there is no +separation between reading and writing to streams; implementations are +allowed to raise an OSError if they do not support a given operation. + +Extending IOBase is RawIOBase which deals simply with the reading and +writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide +an interface to OS files. + +BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its +subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer +streams that are readable, writable, and both respectively. +BufferedRandom provides a buffered interface to random access +streams. BytesIO is a simple stream of in-memory bytes. + +Another IOBase subclass, TextIOBase, deals with the encoding and decoding +of streams into text. TextIOWrapper, which extends it, is a buffered text +interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO +is an in-memory stream for text. + +Argument names are not part of the specification, and only the arguments +of open() are intended to be used as keyword arguments. + +data: + +DEFAULT_BUFFER_SIZE + + An int containing the default buffer size used by the module's buffered + I/O classes. open() uses the file's blksize (as obtained by os.stat) if + possible. +Guido van Rossum , Mike Verdone , Mark Russell , Antoine Pitrou , Amaury Forgeot d'Arc , Benjamin Peterson "Guido van Rossum , ""Mike Verdone , ""Mark Russell , ""Antoine Pitrou , ""Amaury Forgeot d'Arc , ""Benjamin Peterson "IOBaseOpenWrapper_WindowsConsoleIO# New I/O library conforming to PEP 3116.# for compatibility with _pyio# Pretend this exception was created here.# for seek()# Declaring ABCs in C is tricky so we do it here.# Method descriptions and default implementations are inherited from the C# version however.b'The io module provides the Python interfaces to stream handling. The +builtin open function is defined in this module. + +At the top of the I/O hierarchy is the abstract base class IOBase. It +defines the basic interface to a stream. Note, however, that there is no +separation between reading and writing to streams; implementations are +allowed to raise an OSError if they do not support a given operation. + +Extending IOBase is RawIOBase which deals simply with the reading and +writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide +an interface to OS files. + +BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its +subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer +streams that are readable, writable, and both respectively. +BufferedRandom provides a buffered interface to random access +streams. BytesIO is a simple stream of in-memory bytes. + +Another IOBase subclass, TextIOBase, deals with the encoding and decoding +of streams into text. TextIOWrapper, which extends it, is a buffered text +interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO +is an in-memory stream for text. + +Argument names are not part of the specification, and only the arguments +of open() are intended to be used as keyword arguments. + +data: + +DEFAULT_BUFFER_SIZE + + An int containing the default buffer size used by the module's buffered + I/O classes. open() uses the file's blksize (as obtained by os.stat) if + possible. +'b'Guido van Rossum , Mike Verdone , Mark Russell , Antoine Pitrou , Amaury Forgeot d'Arc , Benjamin Peterson 'u'Guido van Rossum , Mike Verdone , Mark Russell , Antoine Pitrou , Amaury Forgeot d'Arc , Benjamin Peterson 'b'BlockingIOError'u'BlockingIOError'b'open_code'u'open_code'b'IOBase'u'IOBase'b'RawIOBase'u'RawIOBase'b'FileIO'u'FileIO'b'BytesIO'u'BytesIO'b'BufferedIOBase'u'BufferedIOBase'b'BufferedReader'u'BufferedReader'b'BufferedWriter'u'BufferedWriter'b'BufferedRWPair'u'BufferedRWPair'b'BufferedRandom'u'BufferedRandom'b'TextIOBase'u'TextIOBase'b'TextIOWrapper'u'TextIOWrapper'b'UnsupportedOperation'u'UnsupportedOperation'b'SEEK_SET'u'SEEK_SET'b'SEEK_CUR'u'SEEK_CUR'b'SEEK_END'u'SEEK_END'u'Functional tools for creating and using iterators. + +Infinite iterators: +count(start=0, step=1) --> start, start+step, start+2*step, ... +cycle(p) --> p0, p1, ... plast, p0, p1, ... +repeat(elem [,n]) --> elem, elem, elem, ... endlessly or up to n times + +Iterators terminating on the shortest input sequence: +accumulate(p[, func]) --> p0, p0+p1, p0+p1+p2 +chain(p, q, ...) --> p0, p1, ... plast, q0, q1, ... +chain.from_iterable([p, q, ...]) --> p0, p1, ... plast, q0, q1, ... +compress(data, selectors) --> (d[0] if s[0]), (d[1] if s[1]), ... +dropwhile(pred, seq) --> seq[n], seq[n+1], starting when pred fails +groupby(iterable[, keyfunc]) --> sub-iterators grouped by value of keyfunc(v) +filterfalse(pred, seq) --> elements of seq where pred(elem) is False +islice(seq, [start,] stop [, step]) --> elements from + seq[start:stop:step] +starmap(fun, seq) --> fun(*seq[0]), fun(*seq[1]), ... +tee(it, n=2) --> (it1, it2 , ... itn) splits one iterator into n +takewhile(pred, seq) --> seq[0], seq[1], until pred fails +zip_longest(p, q, ...) --> (p[0], q[0]), (p[1], q[1]), ... + +Combinatoric generators: +product(p, q, ... [repeat=1]) --> cartesian product +permutations(p[, r]) +combinations(p, r) +combinations_with_replacement(p, r) +'itertools._grouper_grouperu'Iterator wrapped to make it copyable.'itertools._tee_teeu'teedataobject(iterable, values, next, /) +-- + +Data container common to multiple tee objects.'itertools._tee_dataobject_tee_dataobjectu'Return series of accumulated sums (or other binary function results).'itertools.accumulateaccumulateu'chain(*iterables) --> chain object + +Return a chain object whose .__next__() method returns elements from the +first iterable until it is exhausted, then elements from the next +iterable, until all of the iterables are exhausted.'itertools.chainu'Return successive r-length combinations of elements in the iterable. + +combinations(range(4), 3) --> (0,1,2), (0,1,3), (0,2,3), (1,2,3)'itertools.combinationscombinationsu'Return successive r-length combinations of elements in the iterable allowing individual elements to have successive repeats. + +combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC"'itertools.combinations_with_replacementcombinations_with_replacementu'Return data elements corresponding to true selector elements. + +Forms a shorter iterator from selected data elements using the selectors to +choose the data elements.'itertools.compressu'Return a count object whose .__next__() method returns consecutive values. + +Equivalent to: + def count(firstval=0, step=1): + x = firstval + while 1: + yield x + x += step'itertools.countu'Return elements from the iterable until it is exhausted. Then repeat the sequence indefinitely.'itertools.cyclecycleu'Drop items from the iterable while predicate(item) is true. + +Afterwards, return every element until the iterable is exhausted.'itertools.dropwhiledropwhileu'Return those items of iterable for which function(item) is false. + +If function is None, return the items that are false.'itertools.filterfalseu'make an iterator that returns consecutive keys and groups from the iterable + + iterable + Elements to divide into groups according to the key function. + key + A function for computing the group category for each element. + If the key function is not specified or is None, the element itself + is used for grouping.'itertools.groupbygroupbyu'islice(iterable, stop) --> islice object +islice(iterable, start, stop[, step]) --> islice object + +Return an iterator whose next() method returns selected values from an +iterable. If start is specified, will skip all preceding elements; +otherwise, start defaults to zero. Step defaults to one. If +specified as another value, step determines how many values are +skipped between successive calls. Works like a slice() on a list +but returns an iterator.'itertools.isliceisliceu'Return successive r-length permutations of elements in the iterable. + +permutations(range(3), 2) --> (0,1), (0,2), (1,0), (1,2), (2,0), (2,1)'itertools.permutationspermutationsu'product(*iterables, repeat=1) --> product object + +Cartesian product of input iterables. Equivalent to nested for-loops. + +For example, product(A, B) returns the same as: ((x,y) for x in A for y in B). +The leftmost iterators are in the outermost for-loop, so the output tuples +cycle in a manner similar to an odometer (with the rightmost element changing +on every iteration). + +To compute the product of an iterable with itself, specify the number +of repetitions with the optional repeat keyword argument. For example, +product(A, repeat=4) means the same as product(A, A, A, A). + +product('ab', range(3)) --> ('a',0) ('a',1) ('a',2) ('b',0) ('b',1) ('b',2) +product((0,1), (0,1), (0,1)) --> (0,0,0) (0,0,1) (0,1,0) (0,1,1) (1,0,0) ...'itertools.productu'repeat(object [,times]) -> create an iterator which returns the object +for the specified number of times. If not specified, returns the object +endlessly.'itertools.repeatu'Return an iterator whose values are returned from the function evaluated with an argument tuple taken from the given sequence.'itertools.starmapu'Return successive entries from an iterable as long as the predicate evaluates to true for each entry.'itertools.takewhiletakewhileteeu'zip_longest(iter1 [,iter2 [...]], [fillvalue=None]) --> zip_longest object + +Return a zip_longest object whose .__next__() method returns a tuple where +the i-th element comes from the i-th iterable argument. The .__next__() +method continues until the longest iterable in the argument sequence +is exhausted and then it raises StopIteration. When the shorter iterables +are exhausted, the fillvalue is substituted in their place. The fillvalue +defaults to None or can be specified by a keyword argument. +'itertools.zip_longestKeywords (from "Grammar/Grammar") + +This file is automatically generated; please don't muck it up! + +To update the symbols in this file, 'cd' to the top directory of +the python source tree and run: + + python3 -m Parser.pgen.keywordgen Grammar/Grammar Grammar/Tokens Lib/keyword.py + +Alternatively, you can run 'make regen-keyword'. +kwlistassertasyncawaitbreakcontinuedelelifelseexceptfinallyglobalisnonlocaltrywhilewithyieldb'Keywords (from "Grammar/Grammar") + +This file is automatically generated; please don't muck it up! + +To update the symbols in this file, 'cd' to the top directory of +the python source tree and run: + + python3 -m Parser.pgen.keywordgen Grammar/Grammar Grammar/Tokens Lib/keyword.py + +Alternatively, you can run 'make regen-keyword'. +'u'Keywords (from "Grammar/Grammar") + +This file is automatically generated; please don't muck it up! + +To update the symbols in this file, 'cd' to the top directory of +the python source tree and run: + + python3 -m Parser.pgen.keywordgen Grammar/Grammar Grammar/Tokens Lib/keyword.py + +Alternatively, you can run 'make regen-keyword'. +'b'iskeyword'u'iskeyword'b'kwlist'u'kwlist'b'assert'u'assert'b'async'u'async'b'await'u'await'b'break'u'break'b'continue'u'continue'b'del'u'del'b'elif'u'elif'b'else'u'else'b'except'u'except'b'finally'u'finally'b'global'u'global'b'is'u'is'b'nonlocal'u'nonlocal'b'try'u'try'b'while'u'while'b'with'u'with'b'yield'u'yield'u'keyword'Cache lines from Python source files. + +This is intended to read lines from modules imported -- hence if a filename +is not found, it will look down the module search path for a file by +that name. +clearcacheClear the cache entirely.Get the lines for a Python source file from the cache. + Update the cache if it doesn't contain an entry for this file already.updatecacheDiscard cache entries that are out of date. + (This is not checked upon each call!)filenamesUpdate a cache entry and return its list of lines. + If something's wrong, print a message, discard the cache entry, + and return an empty list.lazycacheSeed the cache for filename with module_globals. + + The module loader will be asked for the source only when getlines is + called, not immediately. + + If there is an entry in the cache already, it is not altered. + + :return: True if a lazy load is registered in the cache, + otherwise False. To register such a load a module loader with a + get_source method must be found, the filename must be a cachable + filename, and the filename must not be already cached. + # The cache# The cache. Maps filenames to either a thunk which will provide source code,# or a tuple (size, mtime, lines, fullname) once loaded.# lazy cache entry, leave it lazy.# no-op for files loaded via a __loader__# Realise a lazy loader based lookup if there is one# otherwise try to lookup right now.# No luck, the PEP302 loader cannot find the source# for this module.# Try looking through the module search path, which is only useful# when handling a relative filename.# Not sufficiently string-like to do anything useful with.# Try for a __loader__, if availableb'Cache lines from Python source files. + +This is intended to read lines from modules imported -- hence if a filename +is not found, it will look down the module search path for a file by +that name. +'u'Cache lines from Python source files. + +This is intended to read lines from modules imported -- hence if a filename +is not found, it will look down the module search path for a file by +that name. +'b'getline'u'getline'b'clearcache'u'clearcache'b'checkcache'u'checkcache'b'Clear the cache entirely.'u'Clear the cache entirely.'b'Get the lines for a Python source file from the cache. + Update the cache if it doesn't contain an entry for this file already.'u'Get the lines for a Python source file from the cache. + Update the cache if it doesn't contain an entry for this file already.'b'Discard cache entries that are out of date. + (This is not checked upon each call!)'u'Discard cache entries that are out of date. + (This is not checked upon each call!)'b'Update a cache entry and return its list of lines. + If something's wrong, print a message, discard the cache entry, + and return an empty list.'u'Update a cache entry and return its list of lines. + If something's wrong, print a message, discard the cache entry, + and return an empty list.'b'Seed the cache for filename with module_globals. + + The module loader will be asked for the source only when getlines is + called, not immediately. + + If there is an entry in the cache already, it is not altered. + + :return: True if a lazy load is registered in the cache, + otherwise False. To register such a load a module loader with a + get_source method must be found, the filename must be a cachable + filename, and the filename must not be already cached. + 'u'Seed the cache for filename with module_globals. + + The module loader will be asked for the source only when getlines is + called, not immediately. + + If there is an entry in the cache already, it is not altered. + + :return: True if a lazy load is registered in the cache, + otherwise False. To register such a load a module loader with a + get_source method must be found, the filename must be a cachable + filename, and the filename must not be already cached. + 'b'get_source'u'get_source'u'linecache'Safely evaluate Python string literals without using eval(). simple_escapeseschexesinvalid hex string escape ('\%s')invalid octal string escape ('\%s')evalString\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})b'Safely evaluate Python string literals without using eval().'u'Safely evaluate Python string literals without using eval().'b''u''u''b' 'u' 'b' 'u' 'b'v'u'v'b'invalid hex string escape ('\%s')'u'invalid hex string escape ('\%s')'b'invalid octal string escape ('\%s')'u'invalid octal string escape ('\%s')'b'\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})'u'\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})'u'lib2to3.pgen2.literals'u'pgen2.literals'u'literals'Loading unittests.[_a-z]\w*\.py$VALID_MODULE_NAME_FailedTesttestFailure_make_failed_import_testsuiteClassFailed to import test module: %s +%sformat_exc_make_failed_test_make_failed_load_testsFailed to call load_tests: +%s_make_skipped_testtestSkippedModuleSkippedTestClass_jython_aware_splitext$py.class + This class is responsible for loading tests according to various criteria + and returning them wrapped in a TestSuite + testMethodPrefixthree_way_cmpsortTestMethodsUsingtestNamePatterns_top_level_dir_loading_packagesloadTestsFromTestCasetestCaseClassReturn a suite of all test cases contained in testCaseClassTest cases should not be derived from TestSuite. Maybe you meant to derive from TestCase?"Test cases should not be derived from ""TestSuite. Maybe you meant to derive from ""TestCase?"testCaseNamesloaded_suiteloadTestsFromModuleReturn a suite of all test cases contained in the given moduleuse_load_testsuse_load_tests is deprecated and ignoredcomplaintloadTestsFromModule() takes 1 positional argument but {} were givenloadTestsFromModule() got an unexpected keyword argument '{}'error_caseerror_messageloadTestsFromNameReturn a suite of all test cases given a string specifier. + + The name may resolve either to a module, a test case class, a + test method within a test case class, or a callable object which + returns a TestCase or TestSuite instance. + + The method optionally resolves the names relative to a given module. + parts_copynext_attributeFailed to access attribute: +%scalling %s returned %s, not a testdon't know how to make test from: %sloadTestsFromNamesReturn a suite of all test cases found using the given sequence + of string specifiers. See 'loadTestsFromName()'. + suitesReturn a sorted sequence of method names found within testCaseClass + shouldIncludeMethod%s.%s.%sfullNametestFnNamestest*.pyFind and return all test modules from the specified start + directory, recursing into subdirectories to find them and return all + tests found within them. Only test files that match the pattern will + be loaded. (Using shell style pattern matching.) + + All test modules must be importable from the top level of the project. + If the start directory is not the top level directory then the top + level directory must be specified separately. + + If a test package name (directory with '__init__.py') matches the + pattern then the package will be checked for a 'load_tests' function. If + this exists then it will be called with (loader, tests, pattern) unless + the package has already had load_tests called from the same discovery + invocation, in which case the package module object is not scanned for + tests - this ensures that when a package uses discover to further + discover child tests that infinite recursion does not happen. + + If load_tests exists then discovery does *not* recurse into the package, + load_tests is responsible for loading all tests in the package. + + The pattern is deliberately not stored as a loader attribute so that + packages can continue discovery themselves. top_level_dir is stored so + load_tests does not need to pass this argument in to loader.discover(). + + Paths are sorted before being imported to ensure reproducible execution + order even on filesystems with non-alphabetical ordering like ext3/4. + set_implicit_topis_not_importablethe_moduletop_part_find_testsCan not use builtin modules as dotted module names'Can not use builtin modules ''as dotted module names'don't know how to discover from {!r}_get_directory_containing_moduleStart directory is not importable: %r_get_name_from_path_relpathPath must be within the project_get_module_from_name_match_pathUsed by discovery. Yields test suites it loads._find_test_pathshould_recurseUsed by discovery. + + Loads tests from a single file, or a directories' __init__.py when + passed the directory. + + Returns a tuple (None_or_tests_from_file, should_recurse). + mod_filefullpath_noextmodule_direxpected_dir%r module incorrectly imported from %r. Expected %r. Is this module globally installed?"%r module incorrectly imported from %r. Expected ""%r. Is this module globally installed?"_makeLoadersortUsing# what about .pyc (etc)# we would need to avoid loading the same tests multiple times# from '.py', *and* '.pyc'# Tracks packages which we have called into via load_tests, to# avoid infinite re-entrancy.# XXX After Python 3.5, remove backward compatibility hacks for# use_load_tests deprecation via *args and **kws. See issue 16662.# This method used to take an undocumented and unofficial# use_load_tests argument. For backward compatibility, we still# accept the argument (which can also be the first position) but we# ignore it and issue a deprecation warning if it's present.# Complain about the number of arguments, but don't forget the# required `module` argument.# Since the keyword arguments are unsorted (see PEP 468), just# pick the alphabetically sorted first argument to complain about,# if multiple were given. At least the error message will be# predictable.# Last error so we can give it to the user if needed.# Even the top level import failed: report that error.# We can't traverse some part of the name.# This is a package (no __path__ per importlib docs), and we# encountered an error importing something. We cannot tell# the difference between package.WrongNameTestClass and# package.wrong_module_name so we just report the# ImportError - it is more informative.# Otherwise, we signal that an AttributeError has occurred.# static methods follow a different path# make top_level_dir optional if called from load_tests in a package# all test modules must be importable from the top level directory# should we *unconditionally* put the start directory in first# in sys.path to minimise likelihood of conflicts between installed# modules and development versions?# support for discovery from dotted module names# look for namespace packages# builtin module# here we have been given a module rather than a package - so# all we can do is search the *same* directory the module is in# should an exception be raised instead# override this method to use alternative matching strategy# Handle the __init__ in this package# name is '.' when start_dir == top_level_dir (and top_level_dir is by# definition not a package).# name is in self._loading_packages while we have called into# loadTestsFromModule with name.# Either an error occurred, or load_tests was used by the# package.# Handle the contents.# we found a package that didn't use load_tests.# valid Python identifiers only# if the test file matches, load it# Mark this package as being in load_tests (possibly ;))# loadTestsFromModule(package) has loaded tests for us.b'Loading unittests.'u'Loading unittests.'b'[_a-z]\w*\.py$'u'[_a-z]\w*\.py$'b'Failed to import test module: %s +%s'u'Failed to import test module: %s +%s'b'Failed to call load_tests: +%s'u'Failed to call load_tests: +%s'b'ModuleSkipped'u'ModuleSkipped'b'$py.class'u'$py.class'b' + This class is responsible for loading tests according to various criteria + and returning them wrapped in a TestSuite + 'u' + This class is responsible for loading tests according to various criteria + and returning them wrapped in a TestSuite + 'b'test'b'Return a suite of all test cases contained in testCaseClass'u'Return a suite of all test cases contained in testCaseClass'b'Test cases should not be derived from TestSuite. Maybe you meant to derive from TestCase?'u'Test cases should not be derived from TestSuite. Maybe you meant to derive from TestCase?'b'Return a suite of all test cases contained in the given module'u'Return a suite of all test cases contained in the given module'b'use_load_tests'u'use_load_tests'b'use_load_tests is deprecated and ignored'u'use_load_tests is deprecated and ignored'b'loadTestsFromModule() takes 1 positional argument but {} were given'u'loadTestsFromModule() takes 1 positional argument but {} were given'b'loadTestsFromModule() got an unexpected keyword argument '{}''u'loadTestsFromModule() got an unexpected keyword argument '{}''b'load_tests'u'load_tests'b'Return a suite of all test cases given a string specifier. + + The name may resolve either to a module, a test case class, a + test method within a test case class, or a callable object which + returns a TestCase or TestSuite instance. + + The method optionally resolves the names relative to a given module. + 'u'Return a suite of all test cases given a string specifier. + + The name may resolve either to a module, a test case class, a + test method within a test case class, or a callable object which + returns a TestCase or TestSuite instance. + + The method optionally resolves the names relative to a given module. + 'b'Failed to access attribute: +%s'u'Failed to access attribute: +%s'b'calling %s returned %s, not a test'u'calling %s returned %s, not a test'b'don't know how to make test from: %s'u'don't know how to make test from: %s'b'Return a suite of all test cases found using the given sequence + of string specifiers. See 'loadTestsFromName()'. + 'u'Return a suite of all test cases found using the given sequence + of string specifiers. See 'loadTestsFromName()'. + 'b'Return a sorted sequence of method names found within testCaseClass + 'u'Return a sorted sequence of method names found within testCaseClass + 'b'%s.%s.%s'u'%s.%s.%s'b'test*.py'u'test*.py'b'Find and return all test modules from the specified start + directory, recursing into subdirectories to find them and return all + tests found within them. Only test files that match the pattern will + be loaded. (Using shell style pattern matching.) + + All test modules must be importable from the top level of the project. + If the start directory is not the top level directory then the top + level directory must be specified separately. + + If a test package name (directory with '__init__.py') matches the + pattern then the package will be checked for a 'load_tests' function. If + this exists then it will be called with (loader, tests, pattern) unless + the package has already had load_tests called from the same discovery + invocation, in which case the package module object is not scanned for + tests - this ensures that when a package uses discover to further + discover child tests that infinite recursion does not happen. + + If load_tests exists then discovery does *not* recurse into the package, + load_tests is responsible for loading all tests in the package. + + The pattern is deliberately not stored as a loader attribute so that + packages can continue discovery themselves. top_level_dir is stored so + load_tests does not need to pass this argument in to loader.discover(). + + Paths are sorted before being imported to ensure reproducible execution + order even on filesystems with non-alphabetical ordering like ext3/4. + 'u'Find and return all test modules from the specified start + directory, recursing into subdirectories to find them and return all + tests found within them. Only test files that match the pattern will + be loaded. (Using shell style pattern matching.) + + All test modules must be importable from the top level of the project. + If the start directory is not the top level directory then the top + level directory must be specified separately. + + If a test package name (directory with '__init__.py') matches the + pattern then the package will be checked for a 'load_tests' function. If + this exists then it will be called with (loader, tests, pattern) unless + the package has already had load_tests called from the same discovery + invocation, in which case the package module object is not scanned for + tests - this ensures that when a package uses discover to further + discover child tests that infinite recursion does not happen. + + If load_tests exists then discovery does *not* recurse into the package, + load_tests is responsible for loading all tests in the package. + + The pattern is deliberately not stored as a loader attribute so that + packages can continue discovery themselves. top_level_dir is stored so + load_tests does not need to pass this argument in to loader.discover(). + + Paths are sorted before being imported to ensure reproducible execution + order even on filesystems with non-alphabetical ordering like ext3/4. + 'b'Can not use builtin modules as dotted module names'u'Can not use builtin modules as dotted module names'b'don't know how to discover from {!r}'u'don't know how to discover from {!r}'b'Start directory is not importable: %r'u'Start directory is not importable: %r'b'Path must be within the project'u'Path must be within the project'b'Used by discovery. Yields test suites it loads.'u'Used by discovery. Yields test suites it loads.'b'Used by discovery. + + Loads tests from a single file, or a directories' __init__.py when + passed the directory. + + Returns a tuple (None_or_tests_from_file, should_recurse). + 'u'Used by discovery. + + Loads tests from a single file, or a directories' __init__.py when + passed the directory. + + Returns a tuple (None_or_tests_from_file, should_recurse). + 'b'%r module incorrectly imported from %r. Expected %r. Is this module globally installed?'u'%r module incorrectly imported from %r. Expected %r. Is this module globally installed?'u'unittest.loader'u'loader'Locale support module. + +The module provides low-level access to the C lib's locale APIs and adds high +level number formatting APIs as well as a locale aliasing engine to complement +these. + +The aliasing engine includes support for many commonly used locale names and +maps them to values suitable for passing to the C lib's setlocale() function. It +also includes default encodings for all supported locale names. + +encodings.aliases_builtin_strresetlocaleatofatoicurrency_strcoll strcoll(string,string) -> int. + Compares two strings according to the locale. + _strxfrm strxfrm(string) -> string. + Returns a string that behaves for cmp locale-aware. + localeconv() -> dict. + Returns numeric and monetary locale-specific parameters. + currency_symboln_sign_posnp_cs_precedesn_cs_precedesmon_groupingn_sep_by_spacenegative_signpositive_signp_sep_by_spaceint_curr_symbolp_sign_posnmon_thousands_sepfrac_digitsmon_decimal_pointint_frac_digits setlocale(integer,string=None) -> string. + Activates/queries locale processing. + _locale emulation only supports "C" locale_override_localeconv_grouping_intervalslast_intervalinvalid grouping_groupmonetaryright_spacesleft_spaces0123456789_strip_paddingamountlposrpos%(?:\((?P.*?)\))?(?P[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]r'%(?:\((?P.*?)\))?'r'(?P[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]'_percent_repercentadditionaleEfFgGsepsdiuFormats a string in the same way that the % formatting would use, + but takes the current locale into account. + + Grouping is applied if the third parameter is true. + Conversion uses monetary thousands separator and grouping strings if + forth parameter monetary is true.percentsnew_fpercmodifiersstarcountDeprecated, use format_string instead.This method will be removed in a future version of Python. Use 'locale.format_string()' instead."This method will be removed in a future version of Python. ""Use 'locale.format_string()' instead."format() must be given exactly one %%char format specifier, %s not valid"format() must be given exactly one %%char ""format specifier, %s not valid"internationalFormats val according to the currency settings + in the current locale.Currency formatting is not possible using the 'C' locale."Currency formatting is not possible using ""the 'C' locale."%%.%ifsmbprecedesseparatedsign_posConvert float to string, taking the locale into account.%.12gdelocalizeParses a string as a normalized number according to the locale settings.Parses a string as a float according to the locale settings.Converts a string to an integer according to the locale settings.1234567893.14_setlocale_replace_encodinglangnamelocale_encoding_alias_append_modifier.ISO8859-15ISO8859-15ISO8859-1localename Returns a normalized locale code for the given locale + name. + + The returned locale code is formatted for use with + setlocale(). + + If normalization fails, the original name is returned + unchanged. + + If the given encoding is not known, the function defaults to + the default encoding for the locale code just like setlocale() + does. + + lang_enclookup_namelocale_aliasdefmod_parse_localename Parses the locale code for localename and returns the + result as tuple (language code, encoding). + + The localename is normalized and passed through the locale + alias engine. A ValueError is raised in case the locale name + cannot be parsed. + + The language code corresponds to RFC 1766. code and encoding + can be None in case the values cannot be determined or are + unknown to this implementation. + + unknown locale: %s_build_localenamelocaletuple Builds a locale code from the given tuple (language code, + encoding). + + No aliasing or normalizing takes place. + + Locale must be None, a string, or an iterable of two strings -- language code, encoding.'Locale must be None, a string, or an iterable of ''two strings -- language code, encoding.'envvars Tries to determine the default locale settings and returns + them as tuple (language code, encoding). + + According to POSIX, a program which has not called + setlocale(LC_ALL, "") runs using the portable 'C' locale. + Calling setlocale(LC_ALL, "") lets it use the default locale as + defined by the LANG variable. Since we don't want to interfere + with the current locale setting we thus emulate the behavior + in the way described above. + + To maintain compatibility with other platforms, not only the + LANG variable is tested, but a list of variables given as + envvars parameter. The first found to be defined will be + used. envvars defaults to the search path used in GNU gettext; + it must always contain the variable name 'LANG'. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + 0xwindows_locale Returns the current setting for the given locale category as + tuple (language code, encoding). + + category may be one of the LC_* value except LC_ALL. It + defaults to LC_CTYPE. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + category LC_ALL is not supported Set the locale for the given category. The locale can be + a string, an iterable of two strings (language code and encoding), + or None. + + Iterables are converted to strings using the locale aliasing + engine. Locale strings are passed directly to the C lib. + + category may be given as one of the LC_* values. + + Sets the locale for category to the default setting. + + The default setting is determined by calling + getdefaultlocale(). category defaults to LC_ALL. + + Return the charset that the user is likely using._bootlocaleReturn the charset that the user is likely using, + according to the system configuration.oldlocReturn the charset that the user is likely using, + by looking at environment variables.enJIS7jisjis7eucJPajecKOI8-Ckoi8cCP1251microsoftcp1251CP1255microsoftcp1255CP1256microsoftcp125688591ISO8859-288592ISO8859-588595885915ISO8859-10ISO8859-11ISO8859-13ISO8859-14ISO8859-16ISO8859-3ISO8859-4ISO8859-6ISO8859-7ISO8859-8ISO8859-9SJISTACTISeucKRKOI8-RKOI8-Tkoi8_tKOI8-Ukoi8_uRK1048az_AZ.KOI8-Ca3a3_aza3_az.koicaa_DJ.ISO8859-1aa_djaa_ER.UTF-8aa_eraa_ET.UTF-8aa_etaf_ZA.ISO8859-1af_zaagr_PE.UTF-8agr_peak_GH.UTF-8ak_gham_ET.UTF-8amam_eten_US.ISO8859-1americanan_ES.ISO8859-15an_esanp_IN.UTF-8anp_inar_AA.ISO8859-6arar_aaar_AE.ISO8859-6ar_aear_BH.ISO8859-6ar_bhar_DZ.ISO8859-6ar_dzar_EG.ISO8859-6ar_egar_IN.UTF-8ar_inar_IQ.ISO8859-6ar_iqar_JO.ISO8859-6ar_joar_KW.ISO8859-6ar_kwar_LB.ISO8859-6ar_lbar_LY.ISO8859-6ar_lyar_MA.ISO8859-6ar_maar_OM.ISO8859-6ar_omar_QA.ISO8859-6ar_qaar_SA.ISO8859-6ar_saar_SD.ISO8859-6ar_sdar_SS.UTF-8ar_ssar_SY.ISO8859-6ar_syar_TN.ISO8859-6ar_tnar_YE.ISO8859-6ar_yeas_IN.UTF-8as_inast_ES.ISO8859-15ast_esayc_PE.UTF-8ayc_peaz_AZ.ISO8859-9Eazaz_azaz_az.iso88599eaz_IR.UTF-8az_irbe_BY.CP1251bebe_BY.UTF-8@latinbe@latinbg_BG.UTF-8be_bg.utf8be_bybe_by@latinbem_ZM.UTF-8bem_zmber_DZ.UTF-8ber_dzber_MA.UTF-8ber_mabg_BG.CP1251bgbg_bgbhb_IN.UTF-8bhb_in.utf8bho_IN.UTF-8bho_inbho_NP.UTF-8bho_npbi_VU.UTF-8bi_vubn_BD.UTF-8bn_bdbn_IN.UTF-8bn_inbo_CN.UTF-8bo_cnbo_IN.UTF-8bo_innb_NO.ISO8859-1bokmalbokmålbr_FR.ISO8859-1br_frbrx_IN.UTF-8brx_inbs_BA.ISO8859-2bsbs_babulgarianbyn_ER.UTF-8byn_erfr_CA.ISO8859-1c-frenchc.asciic.enc.iso88591en_US.UTF-8c.utf8c_cc_c.cca_ES.ISO8859-1ca_AD.ISO8859-1ca_adca_esca_ES.UTF-8@valenciaca_es@valenciaca_FR.ISO8859-1ca_frca_IT.ISO8859-1ca_itcatalance_RU.UTF-8ce_rucextendzh_CN.eucCNchinese-szh_TW.eucTWchinese-tchr_US.UTF-8chr_usckb_IQ.UTF-8ckb_iqcmn_TW.UTF-8cmn_twcrh_UA.UTF-8crh_uahr_HR.ISO8859-2croatiancs_CZ.ISO8859-2cscs_cscs_czcsb_PL.UTF-8csb_plcv_RU.UTF-8cv_rucy_GB.ISO8859-1cycy_gbczcz_czczechda_DK.ISO8859-1dada_dkdanishdanskde_DE.ISO8859-1dede_AT.ISO8859-1de_atde_BE.ISO8859-1de_bede_CH.ISO8859-1de_chde_dede_IT.ISO8859-1de_itde_LI.UTF-8de_li.utf8de_LU.ISO8859-1de_ludeutschdoi_IN.UTF-8doi_innl_NL.ISO8859-1dutchnl_BE.ISO8859-1dutch.iso88591dv_MV.UTF-8dv_mvdz_BT.UTF-8dz_btee_EE.ISO8859-4eeee_eeet_EE.ISO8859-1eestiel_GR.ISO8859-7elel_CY.ISO8859-7el_cyel_grel_GR.ISO8859-15el_gr@euroen_AG.UTF-8en_agen_AU.ISO8859-1en_auen_BE.ISO8859-1en_been_BW.ISO8859-1en_bwen_CA.ISO8859-1en_caen_DK.ISO8859-1en_dken_DL.UTF-8en_dl.utf8en_GB.ISO8859-1en_gben_HK.ISO8859-1en_hken_IE.ISO8859-1en_ieen_IL.UTF-8en_ilen_IN.ISO8859-1en_inen_NG.UTF-8en_ngen_NZ.ISO8859-1en_nzen_PH.ISO8859-1en_phen_SC.UTF-8en_sc.utf8en_SG.ISO8859-1en_sgen_uken_usen_US.ISO8859-15en_us@euro@euroen_ZA.ISO8859-1en_zaen_ZM.UTF-8en_zmen_ZW.ISO8859-1en_zwen_ZS.UTF-8en_zw.utf8eng_gben_EN.ISO8859-1englishenglish.iso88591english_ukenglish_united-statesenglish_united-states.437english_useo_XX.ISO8859-3eoeo.UTF-8eo.utf8eo_EO.ISO8859-3eo_eoeo_US.UTF-8eo_us.utf8eo_xxes_ES.ISO8859-1eses_AR.ISO8859-1es_ares_BO.ISO8859-1es_boes_CL.ISO8859-1es_cles_CO.ISO8859-1es_coes_CR.ISO8859-1es_cres_CU.UTF-8es_cues_DO.ISO8859-1es_does_EC.ISO8859-1es_eces_eses_GT.ISO8859-1es_gtes_HN.ISO8859-1es_hnes_MX.ISO8859-1es_mxes_NI.ISO8859-1es_nies_PA.ISO8859-1es_paes_PE.ISO8859-1es_pees_PR.ISO8859-1es_pres_PY.ISO8859-1es_pyes_SV.ISO8859-1es_sves_US.ISO8859-1es_uses_UY.ISO8859-1es_uyes_VE.ISO8859-1es_veestonianet_EE.ISO8859-15etet_eeeu_ES.ISO8859-1eueu_eseu_FR.ISO8859-1eu_frfa_IR.UTF-8fafa_irfa_IR.ISIRI-3342fa_ir.isiri3342ff_SN.UTF-8ff_snfi_FI.ISO8859-15fifi_fifil_PH.UTF-8fil_phfi_FI.ISO8859-1finnishfo_FO.ISO8859-1fofo_fofr_FR.ISO8859-1frfr_BE.ISO8859-1fr_befr_cafr_CH.ISO8859-1fr_chfr_frfr_LU.ISO8859-1fr_lufrançaisfre_frfrenchfrench.iso88591french_francefur_IT.UTF-8fur_itfy_DE.UTF-8fy_defy_NL.UTF-8fy_nlga_IE.ISO8859-1gaga_iegl_ES.ISO8859-1galegogaliciangd_GB.ISO8859-1gdgd_gbger_degermangerman.iso88591german_germanygez_ER.UTF-8gez_ergez_ET.UTF-8gez_etglgl_esgu_IN.UTF-8gu_ingv_GB.ISO8859-1gvgv_gbha_NG.UTF-8ha_nghak_TW.UTF-8hak_twhe_IL.ISO8859-8hehe_ilhi_IN.ISCII-DEVhi_inhi_in.isciidevhif_FJ.UTF-8hif_fjhne_IN.UTF-8hnehne_inhr_hrhrvatskihsb_DE.ISO8859-2hsb_deht_HT.UTF-8ht_hthu_HU.ISO8859-2huhu_huhungarianhy_AM.UTF-8hy_amhy_AM.ARMSCII_8hy_am.armscii8ia.UTF-8iaia_FR.UTF-8ia_fris_IS.ISO8859-1icelandicid_ID.ISO8859-1id_idig_NG.UTF-8ig_ngik_CA.UTF-8ik_cain_idis_isiso8859-1iso8859-15it_IT.ISO8859-1it_CH.ISO8859-1it_chit_ititalianiu_CA.NUNACOM-8iuiu_caiu_ca.nunacom8iwiw_iliw_IL.UTF-8iw_il.utf8ja_JP.eucJPjaja_jpja_jp.eucja_JP.SJISja_jp.mscodeja_jp.pckjapanjapanesejapanese-eucjapanese.eucjp_jpka_GE.GEORGIAN-ACADEMYkaka_geka_ge.georgianacademyka_GE.GEORGIAN-PSka_ge.georgianpska_ge.georgianrskab_DZ.UTF-8kab_dzkk_KZ.ptcp154kk_kzkl_GL.ISO8859-1klkl_glkm_KH.UTF-8km_khkn_IN.UTF-8knkn_inko_KR.eucKRkoko_krko_kr.euckok_IN.UTF-8kok_inkorean.eucks_IN.UTF-8ksks_inks_IN.UTF-8@devanagariks_in@devanagari.utf8ku_TR.ISO8859-9ku_trkw_GB.ISO8859-1kw_gbky_KG.UTF-8kyky_kglb_LU.UTF-8lb_lulg_UG.ISO8859-10lg_ugli_BE.UTF-8li_beli_NL.UTF-8li_nllij_IT.UTF-8lij_itlt_LT.ISO8859-13lithuanianln_CD.UTF-8ln_cdlo_LA.MULELAO-1lo_lalo_LA.IBM-CP1133lo_la.cp1133lo_la.ibmcp1133lo_la.mulelao1lt_ltlv_LV.ISO8859-13lvlv_lvlzh_TW.UTF-8lzh_twmag_IN.UTF-8mag_inmai_IN.UTF-8maimai_inmai_NP.UTF-8mai_npmfe_MU.UTF-8mfe_mumg_MG.ISO8859-15mg_mgmhr_RU.UTF-8mhr_rumi_NZ.ISO8859-1mimi_nzmiq_NI.UTF-8miq_nimjw_IN.UTF-8mjw_inmk_MK.ISO8859-5mkmk_mkml_IN.UTF-8mlml_inmn_MN.UTF-8mn_mnmni_IN.UTF-8mni_inmr_IN.UTF-8mrmr_inms_MY.ISO8859-1ms_mymt_MT.ISO8859-3mtmt_mtmy_MM.UTF-8my_mmnan_TW.UTF-8nan_twnbnb_nonds_DE.UTF-8nds_dends_NL.UTF-8nds_nlne_NP.UTF-8ne_npnhn_MX.UTF-8nhn_mxniu_NU.UTF-8niu_nuniu_NZ.UTF-8niu_nznlnl_AW.UTF-8nl_awnl_benl_nlnn_NO.ISO8859-1nn_nono_NO.ISO8859-1nony_NO.ISO8859-1no@nynorskno_nono_no.iso88591@bokmalno_no.iso88591@nynorsknorwegiannr_ZA.ISO8859-1nrnr_zanso_ZA.ISO8859-15nsonso_zanyny_nonynorskoc_FR.ISO8859-1ococ_from_ET.UTF-8om_etom_KE.ISO8859-1om_keor_IN.UTF-8or_inos_RU.UTF-8os_rupa_IN.UTF-8papa_inpa_PK.UTF-8pa_pkpap_AN.UTF-8pap_anpap_AW.UTF-8pap_awpap_CW.UTF-8pap_cwpd_US.ISO8859-1pdpd_DE.ISO8859-1pd_depd_usph_PH.ISO8859-1ph_phpl_PL.ISO8859-2plpl_plpolishpt_PT.ISO8859-1portuguesept_BR.ISO8859-1portuguese_brazilposix-utf2pp_AN.ISO8859-1pppp_anps_AF.UTF-8ps_afptpt_brpt_ptquz_PE.UTF-8quz_peraj_IN.UTF-8raj_inro_RO.ISO8859-2roro_roromanianru_RU.UTF-8ruru_ruru_UA.KOI8-Uru_uarumanianru_RU.KOI8-Rrussianrw_RW.ISO8859-1rwrw_rwsa_IN.UTF-8sa_insat_IN.UTF-8sat_insc_IT.UTF-8sc_itsd_IN.UTF-8sdsd_insd_IN.UTF-8@devanagarisd_in@devanagari.utf8sd_PK.UTF-8sd_pkse_NO.UTF-8se_nosr_RS.UTF-8@latinserbocroatiansgs_LT.UTF-8sgs_ltshsr_CS.ISO8859-2sh_ba.iso88592@bosniash_HR.ISO8859-2sh_hrsh_hr.iso88592sh_spsh_yushn_MM.UTF-8shn_mmshs_CA.UTF-8shs_casi_LK.UTF-8sisi_lksid_ET.UTF-8sid_etsinhalask_SK.ISO8859-2sksk_sksl_SI.ISO8859-2sl_CS.ISO8859-2sl_cssl_sislovakslovenesloveniansm_WS.UTF-8sm_wsso_DJ.ISO8859-1so_djso_ET.UTF-8so_etso_KE.ISO8859-1so_keso_SO.ISO8859-1so_sosr_CS.ISO8859-5spsp_yuspanishspanish_spainsq_AL.ISO8859-2sqsq_alsq_MK.UTF-8sq_mksr_RS.UTF-8sr@cyrillicsr_CS.UTF-8@latinsr@latnsr_CS.UTF-8sr_cssr_cs.iso88592@latnsr_cs@latnsr_ME.UTF-8sr_mesr_rssr_rs@latnsr_spsr_yusr_CS.CP1251sr_yu.cp1251@cyrillicsr_yu.iso88592sr_yu.iso88595sr_yu.iso88595@cyrillicsr_yu.microsoftcp1251@cyrillicsr_yu.utf8sr_yu.utf8@cyrillicsr_yu@cyrillicss_ZA.ISO8859-1ss_zast_ZA.ISO8859-1st_zasv_SE.ISO8859-1svsv_FI.ISO8859-1sv_fisv_sesw_KE.UTF-8sw_kesw_TZ.UTF-8sw_tzswedishszl_PL.UTF-8szl_plta_IN.TSCII-0tata_inta_in.tsciita_in.tscii0ta_LK.UTF-8ta_lktcy_IN.UTF-8tcy_in.utf8te_IN.UTF-8te_intg_TJ.KOI8-Ctgtg_tjth_TH.ISO8859-11thth_thth_TH.TIS620th_th.tactisth_th.tis620the_NP.UTF-8the_npti_ER.UTF-8ti_erti_ET.UTF-8ti_ettig_ER.UTF-8tig_ertk_TM.UTF-8tk_tmtl_PH.ISO8859-1tltl_phtn_ZA.ISO8859-15tntn_zato_TO.UTF-8to_totpi_PG.UTF-8tpi_pgtr_TR.ISO8859-9trtr_CY.ISO8859-9tr_cytr_trts_ZA.ISO8859-1ts_zatt_RU.TATAR-CYRtttt_rutt_ru.tatarcyrtt_RU.UTF-8@iqteliftt_ru@iqtelifturkishug_CN.UTF-8ug_cnuk_UA.KOI8-Uukuk_uaen_US.utfunivuniversal.utf8@ucs4unm_US.UTF-8unm_usur_PK.CP1256urur_IN.UTF-8ur_inur_pkuz_UZ.UTF-8uzuz_uzuz_uz@cyrillicve_ZA.UTF-8veve_zavi_VN.TCVNvivi_vnvi_vn.tcvnvi_vn.tcvn5712vi_VN.VISCIIvi_vn.visciivi_vn.viscii111wa_BE.ISO8859-1wawa_bewae_CH.UTF-8wae_chwal_ET.UTF-8wal_etwo_SN.UTF-8wo_snxh_ZA.ISO8859-1xhxh_zayi_US.CP1255yiyi_usyo_NG.UTF-8yo_ngyue_HK.UTF-8yue_hkyuw_PG.UTF-8yuw_pgzhzh_CN.gb2312zh_cnzh_TW.big5zh_cn.big5zh_cn.euczh_HK.big5hkscszh_hkzh_hk.big5hkzh_SG.GB2312zh_sgzh_SG.GBKzh_sg.gbkzh_twzh_tw.euczh_tw.euctwzu_ZA.ISO8859-1zuzu_zaaf_ZA10780x0436sq_AL10520x041cgsw_FR11560x0484am_ET11180x045ear_SA0x0401ar_IQ20490x0801ar_EG30730x0c01ar_LY0x1001ar_DZ51210x1401ar_MA61450x1801ar_TN71690x1c01ar_OM81930x2001ar_YE92170x2401ar_SY102410x2801ar_JO112650x2c01ar_LB122890x3001ar_KW133130x3401ar_AE143370x3801ar_BH153610x3c01ar_QA163850x4001hy_AM10670x042bas_IN11010x044daz_AZ10680x042c20920x082cba_RU11330x046deu_ES10690x042dbe_BY10590x0423bn_IN10930x0445bs_BA51460x141abr_FR11500x047ebg_BG0x0402ca_ES10270x0403zh_CHS0x0004zh_TW10280x0404zh_CN20520x0804zh_HK30760x0c04zh_SG0x1004zh_MO51240x1404zh_CHT317480x7c04co_FR11550x0483hr_HR10500x041ahr_BA41220x101acs_CZ10290x0405da_DK10300x0406gbz_AF11640x048cdiv_MV0x0465nl_NL10430x0413nl_BE20670x0813en_US10330x0409en_GB20570x0809en_AU30810x0c09en_CA41050x1009en_NZ51290x1409en_IE61530x1809en_ZA71770x1c09en_JAen_CB92250x2409en_BZ102490x2809en_TT112730x2c09en_ZW122970x3009en_PH133210x3409en_IN163930x4009en_MY174170x4409184410x4809et_EE10610x0425fo_FO10800x0438fil_PH11240x0464fi_FI10350x040bfr_FR10360x040cfr_BE20600x080cfr_CA30840x0c0cfr_CH41080x100cfr_LU51320x140cfr_MC61560x180cfy_NL11220x0462gl_ES11100x0456ka_GE10790x0437de_DE10310x0407de_CH20550x0807de_AT30790x0c07de_LU0x1007de_LI0x1407el_GR10320x0408kl_GL11350x046fgu_IN10950x0447ha_NG11280x0468he_IL10370x040dhi_IN10810x0439hu_HU10380x040eis_IS10390x040fid_ID10570x0421iu_CA11170x045d21410x085dga_IE21080x083cit_IT10400x0410it_CH20640x0810ja_JP10410x0411kn_IN10990x044bkk_KZ10870x043fkh_KH11070x0453qut_GT11580x0486rw_RW11590x0487kok_IN11110x0457ko_KR10420x0412ky_KG10880x0440lo_LA11080x0454lv_LV10620x0426lt_LT10630x0427dsb_DE20940x082elb_LU11340x046emk_MK10710x042fms_MY10860x043ems_BN21100x083eml_IN11000x044cmt_MT10820x043ami_NZ11530x0481arn_CL11460x047amr_IN11020x044emoh_CA11480x047cmn_MN11040x0450mn_CN21280x0850ne_NP11210x0461nb_NO10440x0414nn_NO20680x0814oc_FR11540x0482or_IN10960x0448ps_AF11230x0463fa_IR10650x0429pl_PL10450x0415pt_BR10460x0416pt_PT20700x0816pa_IN10940x0446quz_BO11310x046bquz_EC21550x086bquz_PE31790x0c6bro_RO10480x0418rm_CH10470x0417ru_RU10490x0419smn_FI92750x243bsmj_NO41550x103bsmj_SE51790x143bse_NO10830x043bse_SE21070x083bse_FI31310x0c3bsms_FI82510x203bsma_NO62030x183bsma_SE72270x1c3bsa_IN11030x044fsr_SP30980x0c1asr_BA71940x1c1a20740x081a61700x181asi_LK11150x045bns_ZA11320x046ctn_ZA10740x0432sk_SK10510x041bsl_SI10600x0424es_ES10340x040aes_MX20580x080a30820x0c0aes_GT41060x100aes_CR51300x140aes_PA61540x180aes_DO71780x1c0aes_VE82020x200aes_CO92260x240aes_PE102500x280aes_AR112740x2c0aes_EC122980x300aes_CL133220x340aes_UR143460x380aes_PY153700x3c0aes_BO163940x400aes_SV174180x440aes_HN184420x480aes_NI194660x4c0aes_PR204900x500aes_US215140x540asw_KE10890x0441sv_SE10530x041dsv_FI20770x081dsyr_SY11140x045atg_TJ10640x0428tmz_DZ21430x085fta_IN10970x0449tt_RU10920x0444te_IN10980x044ath_TH10540x041ebo_BT21290x0851bo_CN11050x0451tr_TR10550x041ftk_TM10900x0442ug_CN11520x0480uk_UA10580x0422wen_DE10700x042eur_PK10560x0420ur_IN20800x0820uz_UZ10910x044321150x0843vi_VN10660x042acy_GB11060x0452wo_SN11600x0488xh_ZA10760x0434sah_RU11570x0485ii_CN11440x0478yo_NG11300x046azu_ZA10770x0435_print_locale Test function. + categories_init_categoriesLC_Locale defaults as determined by getdefaultlocale():Language: (undefined)Encoding: Locale settings on startup: Language: Encoding: Locale settings after calling resetlocale():Locale settings after calling setlocale(LC_ALL, ""):NOTE:setlocale(LC_ALL, "") does not support the default localegiven in the OS environment variables.Locale aliasing:Number formatting:# Try importing the _locale module.# If this fails, fall back on a basic 'C' locale emulation.# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before# trying the import. So __all__ is also fiddled at the end of the file.# Locale emulation# 'C' locale default values# These may or may not exist in _locale, so be sure to set them.# With this dict, you can override some items of localeconv's return value.# This is useful for testing purposes.### Number formatting APIs# Author: Martin von Loewis# improved by Georg Brandl# Iterate over grouping intervals# if grouping is -1, we are done# 0: re-use last group ad infinitum#perform the grouping from right to left# only non-digit characters remain (sign, spaces)# Strip a given amount of excess padding from the given string# floats and decimal ints need special action!# check for illegal values# '<' and '>' are markers if the sign must be inserted between symbol and value# the default if nothing specified;# this should be the most fitting sign position#First, get rid of the grouping#next, replace the decimal point with a dot#do grouping#standard formatting### Locale name aliasing engine# Author: Marc-Andre Lemburg, mal@lemburg.com# Various tweaks by Fredrik Lundh # store away the low-level version of setlocale (it's# overridden below)# Convert the encoding to a C lib compatible encoding string#print('norm encoding: %r' % norm_encoding)#print('aliased encoding: %r' % norm_encoding)#print('found encoding %r' % encoding)# Normalize the locale name and extract the encoding and modifier# ':' is sometimes used as encoding delimiter.# First lookup: fullname (possibly with encoding and modifier)#print('first lookup failed')# Second try: fullname without modifier (possibly with encoding)#print('lookup without modifier succeeded')#print('second lookup failed')# Third try: langname (without encoding, possibly with modifier)#print('lookup without encoding succeeded')# Fourth try: langname (without encoding and modifier)#print('lookup without modifier and encoding succeeded')# Deal with locale modifiers# Assume Latin-9 for @euro locales. This is bogus,# since some systems may use other encodings for these# locales. Also, we ignore other modifiers.# On macOS "LC_CTYPE=UTF-8" is a valid locale setting# for getting UTF-8 handling for text.# check if it's supported by the _locale module# make sure the code/encoding values are valid# map windows language identifier to language name# ...add other platform-specific processing here, if# necessary...# fall back on POSIX behaviour# convert to string# On Win32, this will return the ANSI code page# On Unix, if CODESET is available, use that.# Fall back to parsing environment variables :-(# LANG not set, default conservatively to ASCII### Database# The following data was extracted from the locale.alias file which# comes with X11 and then hand edited removing the explicit encoding# definitions and adding some more aliases. The file is usually# available as /usr/lib/X11/locale/locale.alias.# The local_encoding_alias table maps lowercase encoding alias names# to C locale encoding names (case-sensitive). Note that normalize()# first looks up the encoding in the encodings.aliases dictionary and# then applies this mapping to find the correct C lib name for the# encoding.# Mappings for non-standard encoding names used in locale names# Mappings from Python codec names to C lib encoding names# XXX This list is still incomplete. If you know more# mappings, please file a bug report. Thanks.# The locale_alias table maps lowercase alias names to C locale names# (case-sensitive). Encodings are always separated from the locale# name using a dot ('.'); they should only be given in case the# language name is needed to interpret the given encoding alias# correctly (CJK codes often have this need).# Note that the normalize() function which uses this tables# removes '_' and '-' characters from the encoding part of the# locale name before doing the lookup. This saves a lot of# space in the table.# MAL 2004-12-10:# Updated alias mapping to most recent locale.alias file# from X.org distribution using makelocalealias.py.# These are the differences compared to the old mapping (Python 2.4# and older):# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1'# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP'# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13'# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13'# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11'# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312'# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5'# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5'# MAL 2008-05-30:# These are the differences compared to the old mapping (Python 2.5# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2'# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2'# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2'# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2'# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8'# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'# AP 2010-04-12:# These are the differences compared to the old mapping (Python 2.6.5# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin'# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin'# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8'# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'# SS 2013-12-20:# These are the differences compared to the old mapping (Python 3.3.3# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'# updated 'sd' -> 'sd_IN@devanagari.UTF-8' to 'sd_IN.UTF-8'# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8'# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'# SS 2014-10-01:# Updated alias mapping with glibc 2.19 supported locales.# SS 2018-05-05:# Updated alias mapping with glibc 2.27 supported locales.# These are the differences compared to the old mapping (Python 3.6.5# updated 'ca_es@valencia' -> 'ca_ES.ISO8859-15@valencia' to 'ca_ES.UTF-8@valencia'# updated 'kk_kz' -> 'kk_KZ.RK1048' to 'kk_KZ.ptcp154'# updated 'russian' -> 'ru_RU.ISO8859-5' to 'ru_RU.KOI8-R'# This maps Windows language identifiers to locale strings.# This list has been updated from# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp# to include every locale up to Windows Vista.# NOTE: this mapping is incomplete. If your language is missing, please# submit a bug report to the Python bug tracker at http://bugs.python.org/# Make sure you include the missing language identifier and the suggested# locale code.# Afrikaans# Albanian# Alsatian - France# Amharic - Ethiopia# Arabic - Saudi Arabia# Arabic - Iraq# Arabic - Egypt# Arabic - Libya# Arabic - Algeria# Arabic - Morocco# Arabic - Tunisia# Arabic - Oman# Arabic - Yemen# Arabic - Syria# Arabic - Jordan# Arabic - Lebanon# Arabic - Kuwait# Arabic - United Arab Emirates# Arabic - Bahrain# Arabic - Qatar# Armenian# Assamese - India# Azeri - Latin# Azeri - Cyrillic# Bashkir# Basque - Russia# Belarusian# Begali# Bosnian - Cyrillic# Bosnian - Latin# Breton - France# Bulgarian# 0x0455: "my_MM", # Burmese - Not supported# Catalan# Chinese - Simplified# Chinese - Taiwan# Chinese - PRC# Chinese - Hong Kong S.A.R.# Chinese - Singapore# Chinese - Macao S.A.R.# Chinese - Traditional# Corsican - France# Croatian# Croatian - Bosnia# Czech# Danish# Dari - Afghanistan# Divehi - Maldives# Dutch - The Netherlands# Dutch - Belgium# English - United States# English - United Kingdom# English - Australia# English - Canada# English - New Zealand# English - Ireland# English - South Africa# English - Jamaica# English - Caribbean# English - Belize# English - Trinidad# English - Zimbabwe# English - Philippines# English - India# English - Malaysia# English - Singapore# Estonian# Faroese# Filipino# Finnish# French - France# French - Belgium# French - Canada# French - Switzerland# French - Luxembourg# French - Monaco# Frisian - Netherlands# Galician# Georgian# German - Germany# German - Switzerland# German - Austria# German - Luxembourg# German - Liechtenstein# Greek# Greenlandic - Greenland# Gujarati# Hausa - Latin# Hebrew# Hindi# Hungarian# Icelandic# Indonesian# Inuktitut - Syllabics# Inuktitut - Latin# Irish - Ireland# Italian - Italy# Italian - Switzerland# Japanese# Kannada - India# Kazakh# Khmer - Cambodia# K'iche - Guatemala# Kinyarwanda - Rwanda# Konkani# Korean# Kyrgyz# Lao - Lao PDR# Latvian# Lithuanian# Lower Sorbian - Germany# Luxembourgish# FYROM Macedonian# Malay - Malaysia# Malay - Brunei Darussalam# Malayalam - India# Maltese# Maori# Mapudungun# Marathi# Mohawk - Canada# Mongolian - Cyrillic# Mongolian - PRC# Nepali# Norwegian - Bokmal# Norwegian - Nynorsk# Occitan - France# Oriya - India# Pashto - Afghanistan# Persian# Polish# Portuguese - Brazil# Portuguese - Portugal# Punjabi# Quechua (Bolivia)# Quechua (Ecuador)# Quechua (Peru)# Romanian - Romania# Romansh# Russian# Sami Finland# Sami Norway# Sami Sweden# Sami Northern Norway# Sami Northern Sweden# Sami Northern Finland# Sami Skolt# Sami Southern Norway# Sami Southern Sweden# Sanskrit# Serbian - Cyrillic# Serbian - Bosnia Cyrillic# Serbian - Latin# Serbian - Bosnia Latin# Sinhala - Sri Lanka# Northern Sotho# Setswana - Southern Africa# Slovak# Slovenian# Spanish - Spain# Spanish - Mexico# Spanish - Spain (Modern)# Spanish - Guatemala# Spanish - Costa Rica# Spanish - Panama# Spanish - Dominican Republic# Spanish - Venezuela# Spanish - Colombia# Spanish - Peru# Spanish - Argentina# Spanish - Ecuador# Spanish - Chile# Spanish - Uruguay# Spanish - Paraguay# Spanish - Bolivia# Spanish - El Salvador# Spanish - Honduras# Spanish - Nicaragua# Spanish - Puerto Rico# Spanish - United States# 0x0430: "", # Sutu - Not supported# Swahili# Swedish - Sweden# Swedish - Finland# Syriac# Tajik - Cyrillic# Tamazight - Latin# Tamil# Tatar# Telugu# Thai# Tibetan - Bhutan# Tibetan - PRC# Turkish# Turkmen - Cyrillic# Uighur - Arabic# Ukrainian# Upper Sorbian - Germany# Urdu# Urdu - India# Uzbek - Latin# Uzbek - Cyrillic# Vietnamese# Welsh# Wolof - Senegal# Xhosa - South Africa# Yakut - Cyrillic# Yi - PRC# Yoruba - Nigeria# Zulub'Locale support module. + +The module provides low-level access to the C lib's locale APIs and adds high +level number formatting APIs as well as a locale aliasing engine to complement +these. + +The aliasing engine includes support for many commonly used locale names and +maps them to values suitable for passing to the C lib's setlocale() function. It +also includes default encodings for all supported locale names. + +'u'Locale support module. + +The module provides low-level access to the C lib's locale APIs and adds high +level number formatting APIs as well as a locale aliasing engine to complement +these. + +The aliasing engine includes support for many commonly used locale names and +maps them to values suitable for passing to the C lib's setlocale() function. It +also includes default encodings for all supported locale names. + +'b'getlocale'u'getlocale'b'getdefaultlocale'u'getdefaultlocale'b'getpreferredencoding'u'getpreferredencoding'b'setlocale'u'setlocale'b'resetlocale'u'resetlocale'b'localeconv'u'localeconv'b'strcoll'u'strcoll'b'strxfrm'u'strxfrm'b'atof'u'atof'b'atoi'u'atoi'b'format_string'u'format_string'b'currency'u'currency'b'normalize'u'normalize'b'LC_CTYPE'u'LC_CTYPE'b'LC_COLLATE'u'LC_COLLATE'b'LC_TIME'u'LC_TIME'b'LC_MONETARY'u'LC_MONETARY'b'LC_NUMERIC'u'LC_NUMERIC'b'CHAR_MAX'u'CHAR_MAX'b' strcoll(string,string) -> int. + Compares two strings according to the locale. + 'u' strcoll(string,string) -> int. + Compares two strings according to the locale. + 'b' strxfrm(string) -> string. + Returns a string that behaves for cmp locale-aware. + 'u' strxfrm(string) -> string. + Returns a string that behaves for cmp locale-aware. + 'b' localeconv() -> dict. + Returns numeric and monetary locale-specific parameters. + 'u' localeconv() -> dict. + Returns numeric and monetary locale-specific parameters. + 'b'currency_symbol'u'currency_symbol'b'n_sign_posn'u'n_sign_posn'b'p_cs_precedes'u'p_cs_precedes'b'n_cs_precedes'u'n_cs_precedes'b'mon_grouping'u'mon_grouping'b'n_sep_by_space'u'n_sep_by_space'b'negative_sign'u'negative_sign'b'positive_sign'u'positive_sign'b'p_sep_by_space'u'p_sep_by_space'b'int_curr_symbol'u'int_curr_symbol'b'p_sign_posn'u'p_sign_posn'b'mon_thousands_sep'u'mon_thousands_sep'b'frac_digits'u'frac_digits'b'mon_decimal_point'u'mon_decimal_point'b'int_frac_digits'u'int_frac_digits'b' setlocale(integer,string=None) -> string. + Activates/queries locale processing. + 'u' setlocale(integer,string=None) -> string. + Activates/queries locale processing. + 'b'_locale emulation only supports "C" locale'u'_locale emulation only supports "C" locale'b'invalid grouping'u'invalid grouping'b'0123456789'u'0123456789'b'%(?:\((?P.*?)\))?(?P[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]'u'%(?:\((?P.*?)\))?(?P[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]'b'eEfFgG'u'eEfFgG'b'diu'u'diu'b'Formats a string in the same way that the % formatting would use, + but takes the current locale into account. + + Grouping is applied if the third parameter is true. + Conversion uses monetary thousands separator and grouping strings if + forth parameter monetary is true.'u'Formats a string in the same way that the % formatting would use, + but takes the current locale into account. + + Grouping is applied if the third parameter is true. + Conversion uses monetary thousands separator and grouping strings if + forth parameter monetary is true.'b'modifiers'u'modifiers'b'Deprecated, use format_string instead.'u'Deprecated, use format_string instead.'b'This method will be removed in a future version of Python. Use 'locale.format_string()' instead.'u'This method will be removed in a future version of Python. Use 'locale.format_string()' instead.'b'format() must be given exactly one %%char format specifier, %s not valid'u'format() must be given exactly one %%char format specifier, %s not valid'b'Formats val according to the currency settings + in the current locale.'u'Formats val according to the currency settings + in the current locale.'b'Currency formatting is not possible using the 'C' locale.'u'Currency formatting is not possible using the 'C' locale.'b'%%.%if'u'%%.%if'b'Convert float to string, taking the locale into account.'u'Convert float to string, taking the locale into account.'b'%.12g'u'%.12g'b'Parses a string as a normalized number according to the locale settings.'u'Parses a string as a normalized number according to the locale settings.'b'Parses a string as a float according to the locale settings.'u'Parses a string as a float according to the locale settings.'b'Converts a string to an integer according to the locale settings.'u'Converts a string to an integer according to the locale settings.'b'.ISO8859-15'u'.ISO8859-15'b'ISO8859-15'u'ISO8859-15'b'ISO8859-1'u'ISO8859-1'b' Returns a normalized locale code for the given locale + name. + + The returned locale code is formatted for use with + setlocale(). + + If normalization fails, the original name is returned + unchanged. + + If the given encoding is not known, the function defaults to + the default encoding for the locale code just like setlocale() + does. + + 'u' Returns a normalized locale code for the given locale + name. + + The returned locale code is formatted for use with + setlocale(). + + If normalization fails, the original name is returned + unchanged. + + If the given encoding is not known, the function defaults to + the default encoding for the locale code just like setlocale() + does. + + 'b' Parses the locale code for localename and returns the + result as tuple (language code, encoding). + + The localename is normalized and passed through the locale + alias engine. A ValueError is raised in case the locale name + cannot be parsed. + + The language code corresponds to RFC 1766. code and encoding + can be None in case the values cannot be determined or are + unknown to this implementation. + + 'u' Parses the locale code for localename and returns the + result as tuple (language code, encoding). + + The localename is normalized and passed through the locale + alias engine. A ValueError is raised in case the locale name + cannot be parsed. + + The language code corresponds to RFC 1766. code and encoding + can be None in case the values cannot be determined or are + unknown to this implementation. + + 'b'unknown locale: %s'u'unknown locale: %s'b' Builds a locale code from the given tuple (language code, + encoding). + + No aliasing or normalizing takes place. + + 'u' Builds a locale code from the given tuple (language code, + encoding). + + No aliasing or normalizing takes place. + + 'b'Locale must be None, a string, or an iterable of two strings -- language code, encoding.'u'Locale must be None, a string, or an iterable of two strings -- language code, encoding.'b' Tries to determine the default locale settings and returns + them as tuple (language code, encoding). + + According to POSIX, a program which has not called + setlocale(LC_ALL, "") runs using the portable 'C' locale. + Calling setlocale(LC_ALL, "") lets it use the default locale as + defined by the LANG variable. Since we don't want to interfere + with the current locale setting we thus emulate the behavior + in the way described above. + + To maintain compatibility with other platforms, not only the + LANG variable is tested, but a list of variables given as + envvars parameter. The first found to be defined will be + used. envvars defaults to the search path used in GNU gettext; + it must always contain the variable name 'LANG'. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + 'u' Tries to determine the default locale settings and returns + them as tuple (language code, encoding). + + According to POSIX, a program which has not called + setlocale(LC_ALL, "") runs using the portable 'C' locale. + Calling setlocale(LC_ALL, "") lets it use the default locale as + defined by the LANG variable. Since we don't want to interfere + with the current locale setting we thus emulate the behavior + in the way described above. + + To maintain compatibility with other platforms, not only the + LANG variable is tested, but a list of variables given as + envvars parameter. The first found to be defined will be + used. envvars defaults to the search path used in GNU gettext; + it must always contain the variable name 'LANG'. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + 'b'0x'u'0x'b' Returns the current setting for the given locale category as + tuple (language code, encoding). + + category may be one of the LC_* value except LC_ALL. It + defaults to LC_CTYPE. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + 'u' Returns the current setting for the given locale category as + tuple (language code, encoding). + + category may be one of the LC_* value except LC_ALL. It + defaults to LC_CTYPE. + + Except for the code 'C', the language code corresponds to RFC + 1766. code and encoding can be None in case the values cannot + be determined. + + 'b'category LC_ALL is not supported'u'category LC_ALL is not supported'b' Set the locale for the given category. The locale can be + a string, an iterable of two strings (language code and encoding), + or None. + + Iterables are converted to strings using the locale aliasing + engine. Locale strings are passed directly to the C lib. + + category may be given as one of the LC_* values. + + 'u' Set the locale for the given category. The locale can be + a string, an iterable of two strings (language code and encoding), + or None. + + Iterables are converted to strings using the locale aliasing + engine. Locale strings are passed directly to the C lib. + + category may be given as one of the LC_* values. + + 'b' Sets the locale for category to the default setting. + + The default setting is determined by calling + getdefaultlocale(). category defaults to LC_ALL. + + 'u' Sets the locale for category to the default setting. + + The default setting is determined by calling + getdefaultlocale(). category defaults to LC_ALL. + + 'b'Return the charset that the user is likely using.'u'Return the charset that the user is likely using.'b'Return the charset that the user is likely using, + according to the system configuration.'u'Return the charset that the user is likely using, + according to the system configuration.'b'Return the charset that the user is likely using, + by looking at environment variables.'u'Return the charset that the user is likely using, + by looking at environment variables.'b'en'u'en'b'JIS7'u'JIS7'b'jis'u'jis'b'jis7'u'jis7'b'eucJP'u'eucJP'b'ajec'u'ajec'b'KOI8-C'u'KOI8-C'b'koi8c'u'koi8c'b'CP1251'u'CP1251'b'microsoftcp1251'u'microsoftcp1251'b'CP1255'u'CP1255'b'microsoftcp1255'u'microsoftcp1255'b'CP1256'u'CP1256'b'microsoftcp1256'u'microsoftcp1256'b'88591'u'88591'b'ISO8859-2'u'ISO8859-2'b'88592'u'88592'b'ISO8859-5'u'ISO8859-5'b'88595'u'88595'b'885915'u'885915'b'ISO8859-10'u'ISO8859-10'b'ISO8859-11'u'ISO8859-11'b'ISO8859-13'u'ISO8859-13'b'ISO8859-14'u'ISO8859-14'b'ISO8859-16'u'ISO8859-16'b'ISO8859-3'u'ISO8859-3'b'ISO8859-4'u'ISO8859-4'b'ISO8859-6'u'ISO8859-6'b'ISO8859-7'u'ISO8859-7'b'ISO8859-8'u'ISO8859-8'b'ISO8859-9'u'ISO8859-9'b'SJIS'u'SJIS'b'TACTIS'u'TACTIS'b'eucKR'u'eucKR'b'KOI8-R'u'KOI8-R'b'KOI8-T'u'KOI8-T'b'koi8_t'u'koi8_t'b'KOI8-U'u'KOI8-U'b'koi8_u'u'koi8_u'b'RK1048'u'RK1048'b'az_AZ.KOI8-C'u'az_AZ.KOI8-C'b'a3'u'a3'b'a3_az'u'a3_az'b'a3_az.koic'u'a3_az.koic'b'aa_DJ.ISO8859-1'u'aa_DJ.ISO8859-1'b'aa_dj'u'aa_dj'b'aa_ER.UTF-8'u'aa_ER.UTF-8'b'aa_er'u'aa_er'b'aa_ET.UTF-8'u'aa_ET.UTF-8'b'aa_et'u'aa_et'b'af_ZA.ISO8859-1'u'af_ZA.ISO8859-1'b'af'u'af'b'af_za'u'af_za'b'agr_PE.UTF-8'u'agr_PE.UTF-8'b'agr_pe'u'agr_pe'b'ak_GH.UTF-8'u'ak_GH.UTF-8'b'ak_gh'u'ak_gh'b'am_ET.UTF-8'u'am_ET.UTF-8'b'am'u'am'b'am_et'u'am_et'b'en_US.ISO8859-1'u'en_US.ISO8859-1'b'american'u'american'b'an_ES.ISO8859-15'u'an_ES.ISO8859-15'b'an_es'u'an_es'b'anp_IN.UTF-8'u'anp_IN.UTF-8'b'anp_in'u'anp_in'b'ar_AA.ISO8859-6'u'ar_AA.ISO8859-6'b'ar'u'ar'b'ar_aa'u'ar_aa'b'ar_AE.ISO8859-6'u'ar_AE.ISO8859-6'b'ar_ae'u'ar_ae'b'ar_BH.ISO8859-6'u'ar_BH.ISO8859-6'b'ar_bh'u'ar_bh'b'ar_DZ.ISO8859-6'u'ar_DZ.ISO8859-6'b'ar_dz'u'ar_dz'b'ar_EG.ISO8859-6'u'ar_EG.ISO8859-6'b'ar_eg'u'ar_eg'b'ar_IN.UTF-8'u'ar_IN.UTF-8'b'ar_in'u'ar_in'b'ar_IQ.ISO8859-6'u'ar_IQ.ISO8859-6'b'ar_iq'u'ar_iq'b'ar_JO.ISO8859-6'u'ar_JO.ISO8859-6'b'ar_jo'u'ar_jo'b'ar_KW.ISO8859-6'u'ar_KW.ISO8859-6'b'ar_kw'u'ar_kw'b'ar_LB.ISO8859-6'u'ar_LB.ISO8859-6'b'ar_lb'u'ar_lb'b'ar_LY.ISO8859-6'u'ar_LY.ISO8859-6'b'ar_ly'u'ar_ly'b'ar_MA.ISO8859-6'u'ar_MA.ISO8859-6'b'ar_ma'u'ar_ma'b'ar_OM.ISO8859-6'u'ar_OM.ISO8859-6'b'ar_om'u'ar_om'b'ar_QA.ISO8859-6'u'ar_QA.ISO8859-6'b'ar_qa'u'ar_qa'b'ar_SA.ISO8859-6'u'ar_SA.ISO8859-6'b'ar_sa'u'ar_sa'b'ar_SD.ISO8859-6'u'ar_SD.ISO8859-6'b'ar_sd'u'ar_sd'b'ar_SS.UTF-8'u'ar_SS.UTF-8'b'ar_ss'u'ar_ss'b'ar_SY.ISO8859-6'u'ar_SY.ISO8859-6'b'ar_sy'u'ar_sy'b'ar_TN.ISO8859-6'u'ar_TN.ISO8859-6'b'ar_tn'u'ar_tn'b'ar_YE.ISO8859-6'u'ar_YE.ISO8859-6'b'ar_ye'u'ar_ye'b'as_IN.UTF-8'u'as_IN.UTF-8'b'as_in'u'as_in'b'ast_ES.ISO8859-15'u'ast_ES.ISO8859-15'b'ast_es'u'ast_es'b'ayc_PE.UTF-8'u'ayc_PE.UTF-8'b'ayc_pe'u'ayc_pe'b'az_AZ.ISO8859-9E'u'az_AZ.ISO8859-9E'b'az'u'az'b'az_az'u'az_az'b'az_az.iso88599e'u'az_az.iso88599e'b'az_IR.UTF-8'u'az_IR.UTF-8'b'az_ir'u'az_ir'b'be_BY.CP1251'u'be_BY.CP1251'b'be'u'be'b'be_BY.UTF-8@latin'u'be_BY.UTF-8@latin'b'be@latin'u'be@latin'b'bg_BG.UTF-8'u'bg_BG.UTF-8'b'be_bg.utf8'u'be_bg.utf8'b'be_by'u'be_by'b'be_by@latin'u'be_by@latin'b'bem_ZM.UTF-8'u'bem_ZM.UTF-8'b'bem_zm'u'bem_zm'b'ber_DZ.UTF-8'u'ber_DZ.UTF-8'b'ber_dz'u'ber_dz'b'ber_MA.UTF-8'u'ber_MA.UTF-8'b'ber_ma'u'ber_ma'b'bg_BG.CP1251'u'bg_BG.CP1251'b'bg'u'bg'b'bg_bg'u'bg_bg'b'bhb_IN.UTF-8'u'bhb_IN.UTF-8'b'bhb_in.utf8'u'bhb_in.utf8'b'bho_IN.UTF-8'u'bho_IN.UTF-8'b'bho_in'u'bho_in'b'bho_NP.UTF-8'u'bho_NP.UTF-8'b'bho_np'u'bho_np'b'bi_VU.UTF-8'u'bi_VU.UTF-8'b'bi_vu'u'bi_vu'b'bn_BD.UTF-8'u'bn_BD.UTF-8'b'bn_bd'u'bn_bd'b'bn_IN.UTF-8'u'bn_IN.UTF-8'b'bn_in'u'bn_in'b'bo_CN.UTF-8'u'bo_CN.UTF-8'b'bo_cn'u'bo_cn'b'bo_IN.UTF-8'u'bo_IN.UTF-8'b'bo_in'u'bo_in'b'nb_NO.ISO8859-1'u'nb_NO.ISO8859-1'b'bokmal'u'bokmal'b'bokmål'u'bokmål'b'br_FR.ISO8859-1'u'br_FR.ISO8859-1'b'br_fr'u'br_fr'b'brx_IN.UTF-8'u'brx_IN.UTF-8'b'brx_in'u'brx_in'b'bs_BA.ISO8859-2'u'bs_BA.ISO8859-2'b'bs'u'bs'b'bs_ba'u'bs_ba'b'bulgarian'u'bulgarian'b'byn_ER.UTF-8'u'byn_ER.UTF-8'b'byn_er'u'byn_er'b'fr_CA.ISO8859-1'u'fr_CA.ISO8859-1'b'c-french'u'c-french'b'c.ascii'u'c.ascii'b'c.en'u'c.en'b'c.iso88591'u'c.iso88591'b'en_US.UTF-8'u'en_US.UTF-8'b'c.utf8'u'c.utf8'b'c_c'u'c_c'b'c_c.c'u'c_c.c'b'ca_ES.ISO8859-1'u'ca_ES.ISO8859-1'b'ca'u'ca'b'ca_AD.ISO8859-1'u'ca_AD.ISO8859-1'b'ca_ad'u'ca_ad'b'ca_es'u'ca_es'b'ca_ES.UTF-8@valencia'u'ca_ES.UTF-8@valencia'b'ca_es@valencia'u'ca_es@valencia'b'ca_FR.ISO8859-1'u'ca_FR.ISO8859-1'b'ca_fr'u'ca_fr'b'ca_IT.ISO8859-1'u'ca_IT.ISO8859-1'b'ca_it'u'ca_it'b'catalan'u'catalan'b'ce_RU.UTF-8'u'ce_RU.UTF-8'b'ce_ru'u'ce_ru'b'cextend'u'cextend'b'zh_CN.eucCN'u'zh_CN.eucCN'b'chinese-s'u'chinese-s'b'zh_TW.eucTW'u'zh_TW.eucTW'b'chinese-t'u'chinese-t'b'chr_US.UTF-8'u'chr_US.UTF-8'b'chr_us'u'chr_us'b'ckb_IQ.UTF-8'u'ckb_IQ.UTF-8'b'ckb_iq'u'ckb_iq'b'cmn_TW.UTF-8'u'cmn_TW.UTF-8'b'cmn_tw'u'cmn_tw'b'crh_UA.UTF-8'u'crh_UA.UTF-8'b'crh_ua'u'crh_ua'b'hr_HR.ISO8859-2'u'hr_HR.ISO8859-2'b'croatian'u'croatian'b'cs_CZ.ISO8859-2'u'cs_CZ.ISO8859-2'b'cs'u'cs'b'cs_cs'u'cs_cs'b'cs_cz'u'cs_cz'b'csb_PL.UTF-8'u'csb_PL.UTF-8'b'csb_pl'u'csb_pl'b'cv_RU.UTF-8'u'cv_RU.UTF-8'b'cv_ru'u'cv_ru'b'cy_GB.ISO8859-1'u'cy_GB.ISO8859-1'b'cy'u'cy'b'cy_gb'u'cy_gb'b'cz'u'cz'b'cz_cz'u'cz_cz'b'czech'u'czech'b'da_DK.ISO8859-1'u'da_DK.ISO8859-1'b'da'u'da'b'da_dk'u'da_dk'b'danish'u'danish'b'dansk'u'dansk'b'de_DE.ISO8859-1'u'de_DE.ISO8859-1'b'de'u'de'b'de_AT.ISO8859-1'u'de_AT.ISO8859-1'b'de_at'u'de_at'b'de_BE.ISO8859-1'u'de_BE.ISO8859-1'b'de_be'u'de_be'b'de_CH.ISO8859-1'u'de_CH.ISO8859-1'b'de_ch'u'de_ch'b'de_de'u'de_de'b'de_IT.ISO8859-1'u'de_IT.ISO8859-1'b'de_it'u'de_it'b'de_LI.UTF-8'u'de_LI.UTF-8'b'de_li.utf8'u'de_li.utf8'b'de_LU.ISO8859-1'u'de_LU.ISO8859-1'b'de_lu'u'de_lu'b'deutsch'u'deutsch'b'doi_IN.UTF-8'u'doi_IN.UTF-8'b'doi_in'u'doi_in'b'nl_NL.ISO8859-1'u'nl_NL.ISO8859-1'b'dutch'u'dutch'b'nl_BE.ISO8859-1'u'nl_BE.ISO8859-1'b'dutch.iso88591'u'dutch.iso88591'b'dv_MV.UTF-8'u'dv_MV.UTF-8'b'dv_mv'u'dv_mv'b'dz_BT.UTF-8'u'dz_BT.UTF-8'b'dz_bt'u'dz_bt'b'ee_EE.ISO8859-4'u'ee_EE.ISO8859-4'b'ee'u'ee'b'ee_ee'u'ee_ee'b'et_EE.ISO8859-1'u'et_EE.ISO8859-1'b'eesti'u'eesti'b'el_GR.ISO8859-7'u'el_GR.ISO8859-7'b'el'u'el'b'el_CY.ISO8859-7'u'el_CY.ISO8859-7'b'el_cy'u'el_cy'b'el_gr'u'el_gr'b'el_GR.ISO8859-15'u'el_GR.ISO8859-15'b'el_gr@euro'u'el_gr@euro'b'en_AG.UTF-8'u'en_AG.UTF-8'b'en_ag'u'en_ag'b'en_AU.ISO8859-1'u'en_AU.ISO8859-1'b'en_au'u'en_au'b'en_BE.ISO8859-1'u'en_BE.ISO8859-1'b'en_be'u'en_be'b'en_BW.ISO8859-1'u'en_BW.ISO8859-1'b'en_bw'u'en_bw'b'en_CA.ISO8859-1'u'en_CA.ISO8859-1'b'en_ca'u'en_ca'b'en_DK.ISO8859-1'u'en_DK.ISO8859-1'b'en_dk'u'en_dk'b'en_DL.UTF-8'u'en_DL.UTF-8'b'en_dl.utf8'u'en_dl.utf8'b'en_GB.ISO8859-1'u'en_GB.ISO8859-1'b'en_gb'u'en_gb'b'en_HK.ISO8859-1'u'en_HK.ISO8859-1'b'en_hk'u'en_hk'b'en_IE.ISO8859-1'u'en_IE.ISO8859-1'b'en_ie'u'en_ie'b'en_IL.UTF-8'u'en_IL.UTF-8'b'en_il'u'en_il'b'en_IN.ISO8859-1'u'en_IN.ISO8859-1'b'en_in'u'en_in'b'en_NG.UTF-8'u'en_NG.UTF-8'b'en_ng'u'en_ng'b'en_NZ.ISO8859-1'u'en_NZ.ISO8859-1'b'en_nz'u'en_nz'b'en_PH.ISO8859-1'u'en_PH.ISO8859-1'b'en_ph'u'en_ph'b'en_SC.UTF-8'u'en_SC.UTF-8'b'en_sc.utf8'u'en_sc.utf8'b'en_SG.ISO8859-1'u'en_SG.ISO8859-1'b'en_sg'u'en_sg'b'en_uk'u'en_uk'b'en_us'u'en_us'b'en_US.ISO8859-15'u'en_US.ISO8859-15'b'en_us@euro@euro'u'en_us@euro@euro'b'en_ZA.ISO8859-1'u'en_ZA.ISO8859-1'b'en_za'u'en_za'b'en_ZM.UTF-8'u'en_ZM.UTF-8'b'en_zm'u'en_zm'b'en_ZW.ISO8859-1'u'en_ZW.ISO8859-1'b'en_zw'u'en_zw'b'en_ZS.UTF-8'u'en_ZS.UTF-8'b'en_zw.utf8'u'en_zw.utf8'b'eng_gb'u'eng_gb'b'en_EN.ISO8859-1'u'en_EN.ISO8859-1'b'english'u'english'b'english.iso88591'u'english.iso88591'b'english_uk'u'english_uk'b'english_united-states'u'english_united-states'b'english_united-states.437'u'english_united-states.437'b'english_us'u'english_us'b'eo_XX.ISO8859-3'u'eo_XX.ISO8859-3'b'eo'u'eo'b'eo.UTF-8'u'eo.UTF-8'b'eo.utf8'u'eo.utf8'b'eo_EO.ISO8859-3'u'eo_EO.ISO8859-3'b'eo_eo'u'eo_eo'b'eo_US.UTF-8'u'eo_US.UTF-8'b'eo_us.utf8'u'eo_us.utf8'b'eo_xx'u'eo_xx'b'es_ES.ISO8859-1'u'es_ES.ISO8859-1'b'es'u'es'b'es_AR.ISO8859-1'u'es_AR.ISO8859-1'b'es_ar'u'es_ar'b'es_BO.ISO8859-1'u'es_BO.ISO8859-1'b'es_bo'u'es_bo'b'es_CL.ISO8859-1'u'es_CL.ISO8859-1'b'es_cl'u'es_cl'b'es_CO.ISO8859-1'u'es_CO.ISO8859-1'b'es_co'u'es_co'b'es_CR.ISO8859-1'u'es_CR.ISO8859-1'b'es_cr'u'es_cr'b'es_CU.UTF-8'u'es_CU.UTF-8'b'es_cu'u'es_cu'b'es_DO.ISO8859-1'u'es_DO.ISO8859-1'b'es_do'u'es_do'b'es_EC.ISO8859-1'u'es_EC.ISO8859-1'b'es_ec'u'es_ec'b'es_es'u'es_es'b'es_GT.ISO8859-1'u'es_GT.ISO8859-1'b'es_gt'u'es_gt'b'es_HN.ISO8859-1'u'es_HN.ISO8859-1'b'es_hn'u'es_hn'b'es_MX.ISO8859-1'u'es_MX.ISO8859-1'b'es_mx'u'es_mx'b'es_NI.ISO8859-1'u'es_NI.ISO8859-1'b'es_ni'u'es_ni'b'es_PA.ISO8859-1'u'es_PA.ISO8859-1'b'es_pa'u'es_pa'b'es_PE.ISO8859-1'u'es_PE.ISO8859-1'b'es_pe'u'es_pe'b'es_PR.ISO8859-1'u'es_PR.ISO8859-1'b'es_pr'u'es_pr'b'es_PY.ISO8859-1'u'es_PY.ISO8859-1'b'es_py'u'es_py'b'es_SV.ISO8859-1'u'es_SV.ISO8859-1'b'es_sv'u'es_sv'b'es_US.ISO8859-1'u'es_US.ISO8859-1'b'es_us'u'es_us'b'es_UY.ISO8859-1'u'es_UY.ISO8859-1'b'es_uy'u'es_uy'b'es_VE.ISO8859-1'u'es_VE.ISO8859-1'b'es_ve'u'es_ve'b'estonian'u'estonian'b'et_EE.ISO8859-15'u'et_EE.ISO8859-15'b'et'u'et'b'et_ee'u'et_ee'b'eu_ES.ISO8859-1'u'eu_ES.ISO8859-1'b'eu'u'eu'b'eu_es'u'eu_es'b'eu_FR.ISO8859-1'u'eu_FR.ISO8859-1'b'eu_fr'u'eu_fr'b'fa_IR.UTF-8'u'fa_IR.UTF-8'b'fa'u'fa'b'fa_ir'u'fa_ir'b'fa_IR.ISIRI-3342'u'fa_IR.ISIRI-3342'b'fa_ir.isiri3342'u'fa_ir.isiri3342'b'ff_SN.UTF-8'u'ff_SN.UTF-8'b'ff_sn'u'ff_sn'b'fi_FI.ISO8859-15'u'fi_FI.ISO8859-15'b'fi'u'fi'b'fi_fi'u'fi_fi'b'fil_PH.UTF-8'u'fil_PH.UTF-8'b'fil_ph'u'fil_ph'b'fi_FI.ISO8859-1'u'fi_FI.ISO8859-1'b'finnish'u'finnish'b'fo_FO.ISO8859-1'u'fo_FO.ISO8859-1'b'fo'u'fo'b'fo_fo'u'fo_fo'b'fr_FR.ISO8859-1'u'fr_FR.ISO8859-1'b'fr'u'fr'b'fr_BE.ISO8859-1'u'fr_BE.ISO8859-1'b'fr_be'u'fr_be'b'fr_ca'u'fr_ca'b'fr_CH.ISO8859-1'u'fr_CH.ISO8859-1'b'fr_ch'u'fr_ch'b'fr_fr'u'fr_fr'b'fr_LU.ISO8859-1'u'fr_LU.ISO8859-1'b'fr_lu'u'fr_lu'b'français'u'français'b'fre_fr'u'fre_fr'b'french'u'french'b'french.iso88591'u'french.iso88591'b'french_france'u'french_france'b'fur_IT.UTF-8'u'fur_IT.UTF-8'b'fur_it'u'fur_it'b'fy_DE.UTF-8'u'fy_DE.UTF-8'b'fy_de'u'fy_de'b'fy_NL.UTF-8'u'fy_NL.UTF-8'b'fy_nl'u'fy_nl'b'ga_IE.ISO8859-1'u'ga_IE.ISO8859-1'b'ga'u'ga'b'ga_ie'u'ga_ie'b'gl_ES.ISO8859-1'u'gl_ES.ISO8859-1'b'galego'u'galego'b'galician'u'galician'b'gd_GB.ISO8859-1'u'gd_GB.ISO8859-1'b'gd'u'gd'b'gd_gb'u'gd_gb'b'ger_de'u'ger_de'b'german'u'german'b'german.iso88591'u'german.iso88591'b'german_germany'u'german_germany'b'gez_ER.UTF-8'u'gez_ER.UTF-8'b'gez_er'u'gez_er'b'gez_ET.UTF-8'u'gez_ET.UTF-8'b'gez_et'u'gez_et'b'gl'u'gl'b'gl_es'u'gl_es'b'gu_IN.UTF-8'u'gu_IN.UTF-8'b'gu_in'u'gu_in'b'gv_GB.ISO8859-1'u'gv_GB.ISO8859-1'b'gv'u'gv'b'gv_gb'u'gv_gb'b'ha_NG.UTF-8'u'ha_NG.UTF-8'b'ha_ng'u'ha_ng'b'hak_TW.UTF-8'u'hak_TW.UTF-8'b'hak_tw'u'hak_tw'b'he_IL.ISO8859-8'u'he_IL.ISO8859-8'b'he'u'he'b'he_il'u'he_il'b'hi_IN.ISCII-DEV'u'hi_IN.ISCII-DEV'b'hi'u'hi'b'hi_in'u'hi_in'b'hi_in.isciidev'u'hi_in.isciidev'b'hif_FJ.UTF-8'u'hif_FJ.UTF-8'b'hif_fj'u'hif_fj'b'hne_IN.UTF-8'u'hne_IN.UTF-8'b'hne'u'hne'b'hne_in'u'hne_in'b'hr_hr'u'hr_hr'b'hrvatski'u'hrvatski'b'hsb_DE.ISO8859-2'u'hsb_DE.ISO8859-2'b'hsb_de'u'hsb_de'b'ht_HT.UTF-8'u'ht_HT.UTF-8'b'ht_ht'u'ht_ht'b'hu_HU.ISO8859-2'u'hu_HU.ISO8859-2'b'hu'u'hu'b'hu_hu'u'hu_hu'b'hungarian'u'hungarian'b'hy_AM.UTF-8'u'hy_AM.UTF-8'b'hy_am'u'hy_am'b'hy_AM.ARMSCII_8'u'hy_AM.ARMSCII_8'b'hy_am.armscii8'u'hy_am.armscii8'b'ia.UTF-8'u'ia.UTF-8'b'ia'u'ia'b'ia_FR.UTF-8'u'ia_FR.UTF-8'b'ia_fr'u'ia_fr'b'is_IS.ISO8859-1'u'is_IS.ISO8859-1'b'icelandic'u'icelandic'b'id_ID.ISO8859-1'u'id_ID.ISO8859-1'b'id_id'u'id_id'b'ig_NG.UTF-8'u'ig_NG.UTF-8'b'ig_ng'u'ig_ng'b'ik_CA.UTF-8'u'ik_CA.UTF-8'b'ik_ca'u'ik_ca'b'in_id'u'in_id'b'is_is'u'is_is'b'iso8859-1'u'iso8859-1'b'iso8859-15'u'iso8859-15'b'it_IT.ISO8859-1'u'it_IT.ISO8859-1'b'it'u'it'b'it_CH.ISO8859-1'u'it_CH.ISO8859-1'b'it_ch'u'it_ch'b'it_it'u'it_it'b'italian'u'italian'b'iu_CA.NUNACOM-8'u'iu_CA.NUNACOM-8'b'iu'u'iu'b'iu_ca'u'iu_ca'b'iu_ca.nunacom8'u'iu_ca.nunacom8'b'iw'u'iw'b'iw_il'u'iw_il'b'iw_IL.UTF-8'u'iw_IL.UTF-8'b'iw_il.utf8'u'iw_il.utf8'b'ja_JP.eucJP'u'ja_JP.eucJP'b'ja'u'ja'b'ja_jp'u'ja_jp'b'ja_jp.euc'u'ja_jp.euc'b'ja_JP.SJIS'u'ja_JP.SJIS'b'ja_jp.mscode'u'ja_jp.mscode'b'ja_jp.pck'u'ja_jp.pck'b'japan'u'japan'b'japanese'u'japanese'b'japanese-euc'u'japanese-euc'b'japanese.euc'u'japanese.euc'b'jp_jp'u'jp_jp'b'ka_GE.GEORGIAN-ACADEMY'u'ka_GE.GEORGIAN-ACADEMY'b'ka'u'ka'b'ka_ge'u'ka_ge'b'ka_ge.georgianacademy'u'ka_ge.georgianacademy'b'ka_GE.GEORGIAN-PS'u'ka_GE.GEORGIAN-PS'b'ka_ge.georgianps'u'ka_ge.georgianps'b'ka_ge.georgianrs'u'ka_ge.georgianrs'b'kab_DZ.UTF-8'u'kab_DZ.UTF-8'b'kab_dz'u'kab_dz'b'kk_KZ.ptcp154'u'kk_KZ.ptcp154'b'kk_kz'u'kk_kz'b'kl_GL.ISO8859-1'u'kl_GL.ISO8859-1'b'kl'u'kl'b'kl_gl'u'kl_gl'b'km_KH.UTF-8'u'km_KH.UTF-8'b'km_kh'u'km_kh'b'kn_IN.UTF-8'u'kn_IN.UTF-8'b'kn'u'kn'b'kn_in'u'kn_in'b'ko_KR.eucKR'u'ko_KR.eucKR'b'ko'u'ko'b'ko_kr'u'ko_kr'b'ko_kr.euc'u'ko_kr.euc'b'kok_IN.UTF-8'u'kok_IN.UTF-8'b'kok_in'u'kok_in'b'korean.euc'u'korean.euc'b'ks_IN.UTF-8'u'ks_IN.UTF-8'b'ks'u'ks'b'ks_in'u'ks_in'b'ks_IN.UTF-8@devanagari'u'ks_IN.UTF-8@devanagari'b'ks_in@devanagari.utf8'u'ks_in@devanagari.utf8'b'ku_TR.ISO8859-9'u'ku_TR.ISO8859-9'b'ku_tr'u'ku_tr'b'kw_GB.ISO8859-1'u'kw_GB.ISO8859-1'b'kw'u'kw'b'kw_gb'u'kw_gb'b'ky_KG.UTF-8'u'ky_KG.UTF-8'b'ky'u'ky'b'ky_kg'u'ky_kg'b'lb_LU.UTF-8'u'lb_LU.UTF-8'b'lb_lu'u'lb_lu'b'lg_UG.ISO8859-10'u'lg_UG.ISO8859-10'b'lg_ug'u'lg_ug'b'li_BE.UTF-8'u'li_BE.UTF-8'b'li_be'u'li_be'b'li_NL.UTF-8'u'li_NL.UTF-8'b'li_nl'u'li_nl'b'lij_IT.UTF-8'u'lij_IT.UTF-8'b'lij_it'u'lij_it'b'lt_LT.ISO8859-13'u'lt_LT.ISO8859-13'b'lithuanian'u'lithuanian'b'ln_CD.UTF-8'u'ln_CD.UTF-8'b'ln_cd'u'ln_cd'b'lo_LA.MULELAO-1'u'lo_LA.MULELAO-1'b'lo'u'lo'b'lo_la'u'lo_la'b'lo_LA.IBM-CP1133'u'lo_LA.IBM-CP1133'b'lo_la.cp1133'u'lo_la.cp1133'b'lo_la.ibmcp1133'u'lo_la.ibmcp1133'b'lo_la.mulelao1'u'lo_la.mulelao1'b'lt_lt'u'lt_lt'b'lv_LV.ISO8859-13'u'lv_LV.ISO8859-13'b'lv'u'lv'b'lv_lv'u'lv_lv'b'lzh_TW.UTF-8'u'lzh_TW.UTF-8'b'lzh_tw'u'lzh_tw'b'mag_IN.UTF-8'u'mag_IN.UTF-8'b'mag_in'u'mag_in'b'mai_IN.UTF-8'u'mai_IN.UTF-8'b'mai'u'mai'b'mai_in'u'mai_in'b'mai_NP.UTF-8'u'mai_NP.UTF-8'b'mai_np'u'mai_np'b'mfe_MU.UTF-8'u'mfe_MU.UTF-8'b'mfe_mu'u'mfe_mu'b'mg_MG.ISO8859-15'u'mg_MG.ISO8859-15'b'mg_mg'u'mg_mg'b'mhr_RU.UTF-8'u'mhr_RU.UTF-8'b'mhr_ru'u'mhr_ru'b'mi_NZ.ISO8859-1'u'mi_NZ.ISO8859-1'b'mi'u'mi'b'mi_nz'u'mi_nz'b'miq_NI.UTF-8'u'miq_NI.UTF-8'b'miq_ni'u'miq_ni'b'mjw_IN.UTF-8'u'mjw_IN.UTF-8'b'mjw_in'u'mjw_in'b'mk_MK.ISO8859-5'u'mk_MK.ISO8859-5'b'mk'u'mk'b'mk_mk'u'mk_mk'b'ml_IN.UTF-8'u'ml_IN.UTF-8'b'ml'u'ml'b'ml_in'u'ml_in'b'mn_MN.UTF-8'u'mn_MN.UTF-8'b'mn_mn'u'mn_mn'b'mni_IN.UTF-8'u'mni_IN.UTF-8'b'mni_in'u'mni_in'b'mr_IN.UTF-8'u'mr_IN.UTF-8'b'mr'u'mr'b'mr_in'u'mr_in'b'ms_MY.ISO8859-1'u'ms_MY.ISO8859-1'b'ms'u'ms'b'ms_my'u'ms_my'b'mt_MT.ISO8859-3'u'mt_MT.ISO8859-3'b'mt'u'mt'b'mt_mt'u'mt_mt'b'my_MM.UTF-8'u'my_MM.UTF-8'b'my_mm'u'my_mm'b'nan_TW.UTF-8'u'nan_TW.UTF-8'b'nan_tw'u'nan_tw'b'nb'u'nb'b'nb_no'u'nb_no'b'nds_DE.UTF-8'u'nds_DE.UTF-8'b'nds_de'u'nds_de'b'nds_NL.UTF-8'u'nds_NL.UTF-8'b'nds_nl'u'nds_nl'b'ne_NP.UTF-8'u'ne_NP.UTF-8'b'ne_np'u'ne_np'b'nhn_MX.UTF-8'u'nhn_MX.UTF-8'b'nhn_mx'u'nhn_mx'b'niu_NU.UTF-8'u'niu_NU.UTF-8'b'niu_nu'u'niu_nu'b'niu_NZ.UTF-8'u'niu_NZ.UTF-8'b'niu_nz'u'niu_nz'b'nl'u'nl'b'nl_AW.UTF-8'u'nl_AW.UTF-8'b'nl_aw'u'nl_aw'b'nl_be'u'nl_be'b'nl_nl'u'nl_nl'b'nn_NO.ISO8859-1'u'nn_NO.ISO8859-1'b'nn'u'nn'b'nn_no'u'nn_no'b'no_NO.ISO8859-1'u'no_NO.ISO8859-1'b'no'u'no'b'ny_NO.ISO8859-1'u'ny_NO.ISO8859-1'b'no@nynorsk'u'no@nynorsk'b'no_no'u'no_no'b'no_no.iso88591@bokmal'u'no_no.iso88591@bokmal'b'no_no.iso88591@nynorsk'u'no_no.iso88591@nynorsk'b'norwegian'u'norwegian'b'nr_ZA.ISO8859-1'u'nr_ZA.ISO8859-1'b'nr'u'nr'b'nr_za'u'nr_za'b'nso_ZA.ISO8859-15'u'nso_ZA.ISO8859-15'b'nso'u'nso'b'nso_za'u'nso_za'b'ny'u'ny'b'ny_no'u'ny_no'b'nynorsk'u'nynorsk'b'oc_FR.ISO8859-1'u'oc_FR.ISO8859-1'b'oc'u'oc'b'oc_fr'u'oc_fr'b'om_ET.UTF-8'u'om_ET.UTF-8'b'om_et'u'om_et'b'om_KE.ISO8859-1'u'om_KE.ISO8859-1'b'om_ke'u'om_ke'b'or_IN.UTF-8'u'or_IN.UTF-8'b'or_in'u'or_in'b'os_RU.UTF-8'u'os_RU.UTF-8'b'os_ru'u'os_ru'b'pa_IN.UTF-8'u'pa_IN.UTF-8'b'pa'u'pa'b'pa_in'u'pa_in'b'pa_PK.UTF-8'u'pa_PK.UTF-8'b'pa_pk'u'pa_pk'b'pap_AN.UTF-8'u'pap_AN.UTF-8'b'pap_an'u'pap_an'b'pap_AW.UTF-8'u'pap_AW.UTF-8'b'pap_aw'u'pap_aw'b'pap_CW.UTF-8'u'pap_CW.UTF-8'b'pap_cw'u'pap_cw'b'pd_US.ISO8859-1'u'pd_US.ISO8859-1'b'pd'u'pd'b'pd_DE.ISO8859-1'u'pd_DE.ISO8859-1'b'pd_de'u'pd_de'b'pd_us'u'pd_us'b'ph_PH.ISO8859-1'u'ph_PH.ISO8859-1'b'ph'u'ph'b'ph_ph'u'ph_ph'b'pl_PL.ISO8859-2'u'pl_PL.ISO8859-2'b'pl'u'pl'b'pl_pl'u'pl_pl'b'polish'u'polish'b'pt_PT.ISO8859-1'u'pt_PT.ISO8859-1'b'portuguese'u'portuguese'b'pt_BR.ISO8859-1'u'pt_BR.ISO8859-1'b'portuguese_brazil'u'portuguese_brazil'b'posix-utf2'u'posix-utf2'b'pp_AN.ISO8859-1'u'pp_AN.ISO8859-1'b'pp'u'pp'b'pp_an'u'pp_an'b'ps_AF.UTF-8'u'ps_AF.UTF-8'b'ps_af'u'ps_af'b'pt'u'pt'b'pt_br'u'pt_br'b'pt_pt'u'pt_pt'b'quz_PE.UTF-8'u'quz_PE.UTF-8'b'quz_pe'u'quz_pe'b'raj_IN.UTF-8'u'raj_IN.UTF-8'b'raj_in'u'raj_in'b'ro_RO.ISO8859-2'u'ro_RO.ISO8859-2'b'ro'u'ro'b'ro_ro'u'ro_ro'b'romanian'u'romanian'b'ru_RU.UTF-8'u'ru_RU.UTF-8'b'ru'u'ru'b'ru_ru'u'ru_ru'b'ru_UA.KOI8-U'u'ru_UA.KOI8-U'b'ru_ua'u'ru_ua'b'rumanian'u'rumanian'b'ru_RU.KOI8-R'u'ru_RU.KOI8-R'b'russian'u'russian'b'rw_RW.ISO8859-1'u'rw_RW.ISO8859-1'b'rw'u'rw'b'rw_rw'u'rw_rw'b'sa_IN.UTF-8'u'sa_IN.UTF-8'b'sa_in'u'sa_in'b'sat_IN.UTF-8'u'sat_IN.UTF-8'b'sat_in'u'sat_in'b'sc_IT.UTF-8'u'sc_IT.UTF-8'b'sc_it'u'sc_it'b'sd_IN.UTF-8'u'sd_IN.UTF-8'b'sd'u'sd'b'sd_in'u'sd_in'b'sd_IN.UTF-8@devanagari'u'sd_IN.UTF-8@devanagari'b'sd_in@devanagari.utf8'u'sd_in@devanagari.utf8'b'sd_PK.UTF-8'u'sd_PK.UTF-8'b'sd_pk'u'sd_pk'b'se_NO.UTF-8'u'se_NO.UTF-8'b'se_no'u'se_no'b'sr_RS.UTF-8@latin'u'sr_RS.UTF-8@latin'b'serbocroatian'u'serbocroatian'b'sgs_LT.UTF-8'u'sgs_LT.UTF-8'b'sgs_lt'u'sgs_lt'b'sh'u'sh'b'sr_CS.ISO8859-2'u'sr_CS.ISO8859-2'b'sh_ba.iso88592@bosnia'u'sh_ba.iso88592@bosnia'b'sh_HR.ISO8859-2'u'sh_HR.ISO8859-2'b'sh_hr'u'sh_hr'b'sh_hr.iso88592'u'sh_hr.iso88592'b'sh_sp'u'sh_sp'b'sh_yu'u'sh_yu'b'shn_MM.UTF-8'u'shn_MM.UTF-8'b'shn_mm'u'shn_mm'b'shs_CA.UTF-8'u'shs_CA.UTF-8'b'shs_ca'u'shs_ca'b'si_LK.UTF-8'u'si_LK.UTF-8'b'si'u'si'b'si_lk'u'si_lk'b'sid_ET.UTF-8'u'sid_ET.UTF-8'b'sid_et'u'sid_et'b'sinhala'u'sinhala'b'sk_SK.ISO8859-2'u'sk_SK.ISO8859-2'b'sk'u'sk'b'sk_sk'u'sk_sk'b'sl_SI.ISO8859-2'u'sl_SI.ISO8859-2'b'sl'u'sl'b'sl_CS.ISO8859-2'u'sl_CS.ISO8859-2'b'sl_cs'u'sl_cs'b'sl_si'u'sl_si'b'slovak'u'slovak'b'slovene'u'slovene'b'slovenian'u'slovenian'b'sm_WS.UTF-8'u'sm_WS.UTF-8'b'sm_ws'u'sm_ws'b'so_DJ.ISO8859-1'u'so_DJ.ISO8859-1'b'so_dj'u'so_dj'b'so_ET.UTF-8'u'so_ET.UTF-8'b'so_et'u'so_et'b'so_KE.ISO8859-1'u'so_KE.ISO8859-1'b'so_ke'u'so_ke'b'so_SO.ISO8859-1'u'so_SO.ISO8859-1'b'so_so'u'so_so'b'sr_CS.ISO8859-5'u'sr_CS.ISO8859-5'b'sp'u'sp'b'sp_yu'u'sp_yu'b'spanish'u'spanish'b'spanish_spain'u'spanish_spain'b'sq_AL.ISO8859-2'u'sq_AL.ISO8859-2'b'sq'u'sq'b'sq_al'u'sq_al'b'sq_MK.UTF-8'u'sq_MK.UTF-8'b'sq_mk'u'sq_mk'b'sr_RS.UTF-8'u'sr_RS.UTF-8'b'sr'u'sr'b'sr@cyrillic'u'sr@cyrillic'b'sr_CS.UTF-8@latin'u'sr_CS.UTF-8@latin'b'sr@latn'u'sr@latn'b'sr_CS.UTF-8'u'sr_CS.UTF-8'b'sr_cs'u'sr_cs'b'sr_cs.iso88592@latn'u'sr_cs.iso88592@latn'b'sr_cs@latn'u'sr_cs@latn'b'sr_ME.UTF-8'u'sr_ME.UTF-8'b'sr_me'u'sr_me'b'sr_rs'u'sr_rs'b'sr_rs@latn'u'sr_rs@latn'b'sr_sp'u'sr_sp'b'sr_yu'u'sr_yu'b'sr_CS.CP1251'u'sr_CS.CP1251'b'sr_yu.cp1251@cyrillic'u'sr_yu.cp1251@cyrillic'b'sr_yu.iso88592'u'sr_yu.iso88592'b'sr_yu.iso88595'u'sr_yu.iso88595'b'sr_yu.iso88595@cyrillic'u'sr_yu.iso88595@cyrillic'b'sr_yu.microsoftcp1251@cyrillic'u'sr_yu.microsoftcp1251@cyrillic'b'sr_yu.utf8'u'sr_yu.utf8'b'sr_yu.utf8@cyrillic'u'sr_yu.utf8@cyrillic'b'sr_yu@cyrillic'u'sr_yu@cyrillic'b'ss_ZA.ISO8859-1'u'ss_ZA.ISO8859-1'b'ss'u'ss'b'ss_za'u'ss_za'b'st_ZA.ISO8859-1'u'st_ZA.ISO8859-1'b'st'u'st'b'st_za'u'st_za'b'sv_SE.ISO8859-1'u'sv_SE.ISO8859-1'b'sv'u'sv'b'sv_FI.ISO8859-1'u'sv_FI.ISO8859-1'b'sv_fi'u'sv_fi'b'sv_se'u'sv_se'b'sw_KE.UTF-8'u'sw_KE.UTF-8'b'sw_ke'u'sw_ke'b'sw_TZ.UTF-8'u'sw_TZ.UTF-8'b'sw_tz'u'sw_tz'b'swedish'u'swedish'b'szl_PL.UTF-8'u'szl_PL.UTF-8'b'szl_pl'u'szl_pl'b'ta_IN.TSCII-0'u'ta_IN.TSCII-0'b'ta'u'ta'b'ta_in'u'ta_in'b'ta_in.tscii'u'ta_in.tscii'b'ta_in.tscii0'u'ta_in.tscii0'b'ta_LK.UTF-8'u'ta_LK.UTF-8'b'ta_lk'u'ta_lk'b'tcy_IN.UTF-8'u'tcy_IN.UTF-8'b'tcy_in.utf8'u'tcy_in.utf8'b'te_IN.UTF-8'u'te_IN.UTF-8'b'te'u'te'b'te_in'u'te_in'b'tg_TJ.KOI8-C'u'tg_TJ.KOI8-C'b'tg'u'tg'b'tg_tj'u'tg_tj'b'th_TH.ISO8859-11'u'th_TH.ISO8859-11'b'th'u'th'b'th_th'u'th_th'b'th_TH.TIS620'u'th_TH.TIS620'b'th_th.tactis'u'th_th.tactis'b'th_th.tis620'u'th_th.tis620'b'the_NP.UTF-8'u'the_NP.UTF-8'b'the_np'u'the_np'b'ti_ER.UTF-8'u'ti_ER.UTF-8'b'ti_er'u'ti_er'b'ti_ET.UTF-8'u'ti_ET.UTF-8'b'ti_et'u'ti_et'b'tig_ER.UTF-8'u'tig_ER.UTF-8'b'tig_er'u'tig_er'b'tk_TM.UTF-8'u'tk_TM.UTF-8'b'tk_tm'u'tk_tm'b'tl_PH.ISO8859-1'u'tl_PH.ISO8859-1'b'tl'u'tl'b'tl_ph'u'tl_ph'b'tn_ZA.ISO8859-15'u'tn_ZA.ISO8859-15'b'tn'u'tn'b'tn_za'u'tn_za'b'to_TO.UTF-8'u'to_TO.UTF-8'b'to_to'u'to_to'b'tpi_PG.UTF-8'u'tpi_PG.UTF-8'b'tpi_pg'u'tpi_pg'b'tr_TR.ISO8859-9'u'tr_TR.ISO8859-9'b'tr'u'tr'b'tr_CY.ISO8859-9'u'tr_CY.ISO8859-9'b'tr_cy'u'tr_cy'b'tr_tr'u'tr_tr'b'ts_ZA.ISO8859-1'u'ts_ZA.ISO8859-1'b'ts'u'ts'b'ts_za'u'ts_za'b'tt_RU.TATAR-CYR'u'tt_RU.TATAR-CYR'b'tt'u'tt'b'tt_ru'u'tt_ru'b'tt_ru.tatarcyr'u'tt_ru.tatarcyr'b'tt_RU.UTF-8@iqtelif'u'tt_RU.UTF-8@iqtelif'b'tt_ru@iqtelif'u'tt_ru@iqtelif'b'turkish'u'turkish'b'ug_CN.UTF-8'u'ug_CN.UTF-8'b'ug_cn'u'ug_cn'b'uk_UA.KOI8-U'u'uk_UA.KOI8-U'b'uk'u'uk'b'uk_ua'u'uk_ua'b'en_US.utf'u'en_US.utf'b'univ'u'univ'b'universal.utf8@ucs4'u'universal.utf8@ucs4'b'unm_US.UTF-8'u'unm_US.UTF-8'b'unm_us'u'unm_us'b'ur_PK.CP1256'u'ur_PK.CP1256'b'ur'u'ur'b'ur_IN.UTF-8'u'ur_IN.UTF-8'b'ur_in'u'ur_in'b'ur_pk'u'ur_pk'b'uz_UZ.UTF-8'u'uz_UZ.UTF-8'b'uz'u'uz'b'uz_uz'u'uz_uz'b'uz_uz@cyrillic'u'uz_uz@cyrillic'b've_ZA.UTF-8'u've_ZA.UTF-8'b've'u've'b've_za'u've_za'b'vi_VN.TCVN'u'vi_VN.TCVN'b'vi'u'vi'b'vi_vn'u'vi_vn'b'vi_vn.tcvn'u'vi_vn.tcvn'b'vi_vn.tcvn5712'u'vi_vn.tcvn5712'b'vi_VN.VISCII'u'vi_VN.VISCII'b'vi_vn.viscii'u'vi_vn.viscii'b'vi_vn.viscii111'u'vi_vn.viscii111'b'wa_BE.ISO8859-1'u'wa_BE.ISO8859-1'b'wa'u'wa'b'wa_be'u'wa_be'b'wae_CH.UTF-8'u'wae_CH.UTF-8'b'wae_ch'u'wae_ch'b'wal_ET.UTF-8'u'wal_ET.UTF-8'b'wal_et'u'wal_et'b'wo_SN.UTF-8'u'wo_SN.UTF-8'b'wo_sn'u'wo_sn'b'xh_ZA.ISO8859-1'u'xh_ZA.ISO8859-1'b'xh'u'xh'b'xh_za'u'xh_za'b'yi_US.CP1255'u'yi_US.CP1255'b'yi'u'yi'b'yi_us'u'yi_us'b'yo_NG.UTF-8'u'yo_NG.UTF-8'b'yo_ng'u'yo_ng'b'yue_HK.UTF-8'u'yue_HK.UTF-8'b'yue_hk'u'yue_hk'b'yuw_PG.UTF-8'u'yuw_PG.UTF-8'b'yuw_pg'u'yuw_pg'b'zh'u'zh'b'zh_CN.gb2312'u'zh_CN.gb2312'b'zh_cn'u'zh_cn'b'zh_TW.big5'u'zh_TW.big5'b'zh_cn.big5'u'zh_cn.big5'b'zh_cn.euc'u'zh_cn.euc'b'zh_HK.big5hkscs'u'zh_HK.big5hkscs'b'zh_hk'u'zh_hk'b'zh_hk.big5hk'u'zh_hk.big5hk'b'zh_SG.GB2312'u'zh_SG.GB2312'b'zh_sg'u'zh_sg'b'zh_SG.GBK'u'zh_SG.GBK'b'zh_sg.gbk'u'zh_sg.gbk'b'zh_tw'u'zh_tw'b'zh_tw.euc'u'zh_tw.euc'b'zh_tw.euctw'u'zh_tw.euctw'b'zu_ZA.ISO8859-1'u'zu_ZA.ISO8859-1'b'zu'u'zu'b'zu_za'u'zu_za'b'af_ZA'u'af_ZA'b'sq_AL'u'sq_AL'b'gsw_FR'u'gsw_FR'b'am_ET'u'am_ET'b'ar_SA'u'ar_SA'b'ar_IQ'u'ar_IQ'b'ar_EG'u'ar_EG'b'ar_LY'u'ar_LY'b'ar_DZ'u'ar_DZ'b'ar_MA'u'ar_MA'b'ar_TN'u'ar_TN'b'ar_OM'u'ar_OM'b'ar_YE'u'ar_YE'b'ar_SY'u'ar_SY'b'ar_JO'u'ar_JO'b'ar_LB'u'ar_LB'b'ar_KW'u'ar_KW'b'ar_AE'u'ar_AE'b'ar_BH'u'ar_BH'b'ar_QA'u'ar_QA'b'hy_AM'u'hy_AM'b'as_IN'u'as_IN'b'az_AZ'u'az_AZ'b'ba_RU'u'ba_RU'b'eu_ES'u'eu_ES'b'be_BY'u'be_BY'b'bn_IN'u'bn_IN'b'bs_BA'u'bs_BA'b'br_FR'u'br_FR'b'bg_BG'u'bg_BG'b'ca_ES'u'ca_ES'b'zh_CHS'u'zh_CHS'b'zh_TW'u'zh_TW'b'zh_CN'u'zh_CN'b'zh_HK'u'zh_HK'b'zh_SG'u'zh_SG'b'zh_MO'u'zh_MO'b'zh_CHT'u'zh_CHT'b'co_FR'u'co_FR'b'hr_HR'u'hr_HR'b'hr_BA'u'hr_BA'b'cs_CZ'u'cs_CZ'b'da_DK'u'da_DK'b'gbz_AF'u'gbz_AF'b'div_MV'u'div_MV'b'nl_NL'u'nl_NL'b'nl_BE'u'nl_BE'b'en_US'u'en_US'b'en_GB'u'en_GB'b'en_AU'u'en_AU'b'en_CA'u'en_CA'b'en_NZ'u'en_NZ'b'en_IE'u'en_IE'b'en_ZA'u'en_ZA'b'en_JA'u'en_JA'b'en_CB'u'en_CB'b'en_BZ'u'en_BZ'b'en_TT'u'en_TT'b'en_ZW'u'en_ZW'b'en_PH'u'en_PH'b'en_IN'u'en_IN'b'en_MY'u'en_MY'b'et_EE'u'et_EE'b'fo_FO'u'fo_FO'b'fil_PH'u'fil_PH'b'fi_FI'u'fi_FI'b'fr_FR'u'fr_FR'b'fr_BE'u'fr_BE'b'fr_CA'u'fr_CA'b'fr_CH'u'fr_CH'b'fr_LU'u'fr_LU'b'fr_MC'u'fr_MC'b'fy_NL'u'fy_NL'b'gl_ES'u'gl_ES'b'ka_GE'u'ka_GE'b'de_DE'u'de_DE'b'de_CH'u'de_CH'b'de_AT'u'de_AT'b'de_LU'u'de_LU'b'de_LI'u'de_LI'b'el_GR'u'el_GR'b'kl_GL'u'kl_GL'b'gu_IN'u'gu_IN'b'ha_NG'u'ha_NG'b'he_IL'u'he_IL'b'hi_IN'u'hi_IN'b'hu_HU'u'hu_HU'b'is_IS'u'is_IS'b'id_ID'u'id_ID'b'iu_CA'u'iu_CA'b'ga_IE'u'ga_IE'b'it_IT'u'it_IT'b'it_CH'u'it_CH'b'ja_JP'u'ja_JP'b'kn_IN'u'kn_IN'b'kk_KZ'u'kk_KZ'b'kh_KH'u'kh_KH'b'qut_GT'u'qut_GT'b'rw_RW'u'rw_RW'b'kok_IN'u'kok_IN'b'ko_KR'u'ko_KR'b'ky_KG'u'ky_KG'b'lo_LA'u'lo_LA'b'lv_LV'u'lv_LV'b'lt_LT'u'lt_LT'b'dsb_DE'u'dsb_DE'b'lb_LU'u'lb_LU'b'mk_MK'u'mk_MK'b'ms_MY'u'ms_MY'b'ms_BN'u'ms_BN'b'ml_IN'u'ml_IN'b'mt_MT'u'mt_MT'b'mi_NZ'u'mi_NZ'b'arn_CL'u'arn_CL'b'mr_IN'u'mr_IN'b'moh_CA'u'moh_CA'b'mn_MN'u'mn_MN'b'mn_CN'u'mn_CN'b'ne_NP'u'ne_NP'b'nb_NO'u'nb_NO'b'nn_NO'u'nn_NO'b'oc_FR'u'oc_FR'b'or_IN'u'or_IN'b'ps_AF'u'ps_AF'b'fa_IR'u'fa_IR'b'pl_PL'u'pl_PL'b'pt_BR'u'pt_BR'b'pt_PT'u'pt_PT'b'pa_IN'u'pa_IN'b'quz_BO'u'quz_BO'b'quz_EC'u'quz_EC'b'quz_PE'u'quz_PE'b'ro_RO'u'ro_RO'b'rm_CH'u'rm_CH'b'ru_RU'u'ru_RU'b'smn_FI'u'smn_FI'b'smj_NO'u'smj_NO'b'smj_SE'u'smj_SE'b'se_NO'u'se_NO'b'se_SE'u'se_SE'b'se_FI'u'se_FI'b'sms_FI'u'sms_FI'b'sma_NO'u'sma_NO'b'sma_SE'u'sma_SE'b'sa_IN'u'sa_IN'b'sr_SP'u'sr_SP'b'sr_BA'u'sr_BA'b'si_LK'u'si_LK'b'ns_ZA'u'ns_ZA'b'tn_ZA'u'tn_ZA'b'sk_SK'u'sk_SK'b'sl_SI'u'sl_SI'b'es_ES'u'es_ES'b'es_MX'u'es_MX'b'es_GT'u'es_GT'b'es_CR'u'es_CR'b'es_PA'u'es_PA'b'es_DO'u'es_DO'b'es_VE'u'es_VE'b'es_CO'u'es_CO'b'es_PE'u'es_PE'b'es_AR'u'es_AR'b'es_EC'u'es_EC'b'es_CL'u'es_CL'b'es_UR'u'es_UR'b'es_PY'u'es_PY'b'es_BO'u'es_BO'b'es_SV'u'es_SV'b'es_HN'u'es_HN'b'es_NI'u'es_NI'b'es_PR'u'es_PR'b'es_US'u'es_US'b'sw_KE'u'sw_KE'b'sv_SE'u'sv_SE'b'sv_FI'u'sv_FI'b'syr_SY'u'syr_SY'b'tg_TJ'u'tg_TJ'b'tmz_DZ'u'tmz_DZ'b'ta_IN'u'ta_IN'b'tt_RU'u'tt_RU'b'te_IN'u'te_IN'b'th_TH'u'th_TH'b'bo_BT'u'bo_BT'b'bo_CN'u'bo_CN'b'tr_TR'u'tr_TR'b'tk_TM'u'tk_TM'b'ug_CN'u'ug_CN'b'uk_UA'u'uk_UA'b'wen_DE'u'wen_DE'b'ur_PK'u'ur_PK'b'ur_IN'u'ur_IN'b'uz_UZ'u'uz_UZ'b'vi_VN'u'vi_VN'b'cy_GB'u'cy_GB'b'wo_SN'u'wo_SN'b'xh_ZA'u'xh_ZA'b'sah_RU'u'sah_RU'b'ii_CN'u'ii_CN'b'yo_NG'u'yo_NG'b'zu_ZA'u'zu_ZA'b' Test function. + 'u' Test function. + 'b'LC_'u'LC_'b'Locale defaults as determined by getdefaultlocale():'u'Locale defaults as determined by getdefaultlocale():'b'Language: 'u'Language: 'b'(undefined)'u'(undefined)'b'Encoding: 'u'Encoding: 'b'Locale settings on startup:'u'Locale settings on startup:'b' Language: 'u' Language: 'b' Encoding: 'u' Encoding: 'b'Locale settings after calling resetlocale():'u'Locale settings after calling resetlocale():'b'Locale settings after calling setlocale(LC_ALL, ""):'u'Locale settings after calling setlocale(LC_ALL, ""):'b'NOTE:'u'NOTE:'b'setlocale(LC_ALL, "") does not support the default locale'u'setlocale(LC_ALL, "") does not support the default locale'b'given in the OS environment variables.'u'given in the OS environment variables.'b'Locale aliasing:'u'Locale aliasing:'b'Number formatting:'u'Number formatting:'Synchronization primitives.Context manager. + + This enables the following idiom for acquiring and releasing a + lock around a block: + + with (yield from lock): + + + while failing loudly when accidentally using: + + with lock: + + + Deprecated, use 'async with' statement: + async with lock: + + _ContextManagerMixin"yield from" should be used as context manager expression'with (yield from lock)' is deprecated use 'async with lock' instead"'with (yield from lock)' is deprecated ""use 'async with lock' instead"__acquire_ctx'with await lock' is deprecated use 'async with lock' instead"'with await lock' is deprecated "Primitive lock objects. + + A primitive lock is a synchronization primitive that is not owned + by a particular coroutine when locked. A primitive lock is in one + of two states, 'locked' or 'unlocked'. + + It is created in the unlocked state. It has two basic methods, + acquire() and release(). When the state is unlocked, acquire() + changes the state to locked and returns immediately. When the + state is locked, acquire() blocks until a call to release() in + another coroutine changes it to unlocked, then the acquire() call + resets it to locked and returns. The release() method should only + be called in the locked state; it changes the state to unlocked + and returns immediately. If an attempt is made to release an + unlocked lock, a RuntimeError will be raised. + + When more than one coroutine is blocked in acquire() waiting for + the state to turn to unlocked, only one coroutine proceeds when a + release() call resets the state to unlocked; first coroutine which + is blocked in acquire() is being processed. + + acquire() is a coroutine and should be called with 'await'. + + Locks also support the asynchronous context management protocol. + 'async with lock' statement should be used. + + Usage: + + lock = Lock() + ... + await lock.acquire() + try: + ... + finally: + lock.release() + + Context manager usage: + + lock = Lock() + ... + async with lock: + ... + + Lock objects can be tested for locking state: + + if not lock.locked(): + await lock.acquire() + else: + # lock is acquired + ... + + _lockedThe loop argument is deprecated since Python 3.8, and scheduled for removal in Python 3.10."The loop argument is deprecated since Python 3.8, ""and scheduled for removal in Python 3.10."unlocked, waiters:]>Return True if lock is acquired.Acquire a lock. + + This method blocks until the lock is unlocked, then sets it to + locked and returns True. + _wake_up_firstRelease a lock. + + When the lock is locked, reset it to unlocked, and return. + If any other coroutines are blocked waiting for the lock to become + unlocked, allow exactly one of them to proceed. + + When invoked on an unlocked lock, a RuntimeError is raised. + + There is no return value. + Lock is not acquired.Wake up the first waiter if it isn't done.Asynchronous equivalent to threading.Event. + + Class implementing event objects. An event manages a flag that can be set + to true with the set() method and reset to false with the clear() method. + The wait() method blocks until the flag is true. The flag is initially + false. + is_setReturn True if and only if the internal flag is true.Set the internal flag to true. All coroutines waiting for it to + become true are awakened. Coroutine that call wait() once the flag is + true will not block at all. + Reset the internal flag to false. Subsequently, coroutines calling + wait() will block until set() is called to set the internal flag + to true again.Block until the internal flag is true. + + If the internal flag is true on entry, return True + immediately. Otherwise, block until another coroutine calls + set() to set the flag to true, then return True. + Asynchronous equivalent to threading.Condition. + + This class implements condition variable objects. A condition variable + allows one or more coroutines to wait until they are notified by another + coroutine. + + A new Lock object is created and used as the underlying lock. + loop argument must agree with lockWait until notified. + + If the calling coroutine has not acquired the lock when this + method is called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks + until it is awakened by a notify() or notify_all() call for + the same condition variable in another coroutine. Once + awakened, it re-acquires the lock and returns True. + cannot wait on un-acquired lockwait_forWait until a predicate becomes true. + + The predicate should be a callable which result will be + interpreted as a boolean value. The final predicate value is + the return value. + notifyBy default, wake up one coroutine waiting on this condition, if any. + If the calling coroutine has not acquired the lock when this method + is called, a RuntimeError is raised. + + This method wakes up at most n of the coroutines waiting for the + condition variable; it is a no-op if no coroutines are waiting. + + Note: an awakened coroutine does not actually return from its + wait() call until it can reacquire the lock. Since notify() does + not release the lock, its caller should. + cannot notify on un-acquired lockWake up all threads waiting on this condition. This method acts + like notify(), but wakes up all waiting threads instead of one. If the + calling thread has not acquired the lock when this method is called, + a RuntimeError is raised. + A Semaphore implementation. + + A semaphore manages an internal counter which is decremented by each + acquire() call and incremented by each release() call. The counter + can never go below zero; when acquire() finds that it is zero, it blocks, + waiting until some other thread calls release(). + + Semaphores also support the context management protocol. + + The optional argument gives the initial value for the internal + counter; it defaults to 1. If the value given is less than 0, + ValueError is raised. + Semaphore initial value must be >= 0unlocked, value:_wake_up_nextReturns True if semaphore can not be acquired immediately.Acquire a semaphore. + + If the internal counter is larger than zero on entry, + decrement it by one and return True immediately. If it is + zero on entry, block, waiting until some other coroutine has + called release() to make it larger than 0, and then return + True. + Release a semaphore, incrementing the internal counter by one. + When it was zero on entry and another coroutine is waiting for it to + become larger than zero again, wake up that coroutine. + A bounded semaphore implementation. + + This raises ValueError in release() if it would increase the value + above the initial value. + _bound_valueBoundedSemaphore released too many times# We have no use for the "as ..." clause in the with# statement for locks.# Crudely prevent reuse.# This must exist because __enter__ exists, even though that# always raises; that's how the with-statement works.# This is not a coroutine. It is meant to enable the idiom:# with (yield from lock):# # as an alternative to:# yield from lock.acquire()# try:# finally:# lock.release()# Deprecated, use 'async with' statement:# async with lock:# The flag is needed for legacy asyncio.iscoroutine()# To make "with await lock" work.# Finally block should be called before the CancelledError# handling as we don't want CancelledError to call# _wake_up_first() and attempt to wake up itself.# .done() necessarily means that a waiter will wake up later on and# either take the lock, or, if it was cancelled and lock wasn't# taken already, will hit this again and wake up a new waiter.# Export the lock's locked(), acquire() and release() methods.# Must reacquire lock even if wait is cancelled# See the similar code in Queue.get.b'Synchronization primitives.'u'Synchronization primitives.'b'Event'u'Event'b'Condition'u'Condition'b'Semaphore'u'Semaphore'b'BoundedSemaphore'u'BoundedSemaphore'b'Context manager. + + This enables the following idiom for acquiring and releasing a + lock around a block: + + with (yield from lock): + + + while failing loudly when accidentally using: + + with lock: + + + Deprecated, use 'async with' statement: + async with lock: + + 'u'Context manager. + + This enables the following idiom for acquiring and releasing a + lock around a block: + + with (yield from lock): + + + while failing loudly when accidentally using: + + with lock: + + + Deprecated, use 'async with' statement: + async with lock: + + 'b'"yield from" should be used as context manager expression'u'"yield from" should be used as context manager expression'b''with (yield from lock)' is deprecated use 'async with lock' instead'u''with (yield from lock)' is deprecated use 'async with lock' instead'b''with await lock' is deprecated use 'async with lock' instead'u''with await lock' is deprecated use 'async with lock' instead'b'Primitive lock objects. + + A primitive lock is a synchronization primitive that is not owned + by a particular coroutine when locked. A primitive lock is in one + of two states, 'locked' or 'unlocked'. + + It is created in the unlocked state. It has two basic methods, + acquire() and release(). When the state is unlocked, acquire() + changes the state to locked and returns immediately. When the + state is locked, acquire() blocks until a call to release() in + another coroutine changes it to unlocked, then the acquire() call + resets it to locked and returns. The release() method should only + be called in the locked state; it changes the state to unlocked + and returns immediately. If an attempt is made to release an + unlocked lock, a RuntimeError will be raised. + + When more than one coroutine is blocked in acquire() waiting for + the state to turn to unlocked, only one coroutine proceeds when a + release() call resets the state to unlocked; first coroutine which + is blocked in acquire() is being processed. + + acquire() is a coroutine and should be called with 'await'. + + Locks also support the asynchronous context management protocol. + 'async with lock' statement should be used. + + Usage: + + lock = Lock() + ... + await lock.acquire() + try: + ... + finally: + lock.release() + + Context manager usage: + + lock = Lock() + ... + async with lock: + ... + + Lock objects can be tested for locking state: + + if not lock.locked(): + await lock.acquire() + else: + # lock is acquired + ... + + 'u'Primitive lock objects. + + A primitive lock is a synchronization primitive that is not owned + by a particular coroutine when locked. A primitive lock is in one + of two states, 'locked' or 'unlocked'. + + It is created in the unlocked state. It has two basic methods, + acquire() and release(). When the state is unlocked, acquire() + changes the state to locked and returns immediately. When the + state is locked, acquire() blocks until a call to release() in + another coroutine changes it to unlocked, then the acquire() call + resets it to locked and returns. The release() method should only + be called in the locked state; it changes the state to unlocked + and returns immediately. If an attempt is made to release an + unlocked lock, a RuntimeError will be raised. + + When more than one coroutine is blocked in acquire() waiting for + the state to turn to unlocked, only one coroutine proceeds when a + release() call resets the state to unlocked; first coroutine which + is blocked in acquire() is being processed. + + acquire() is a coroutine and should be called with 'await'. + + Locks also support the asynchronous context management protocol. + 'async with lock' statement should be used. + + Usage: + + lock = Lock() + ... + await lock.acquire() + try: + ... + finally: + lock.release() + + Context manager usage: + + lock = Lock() + ... + async with lock: + ... + + Lock objects can be tested for locking state: + + if not lock.locked(): + await lock.acquire() + else: + # lock is acquired + ... + + 'b'The loop argument is deprecated since Python 3.8, and scheduled for removal in Python 3.10.'u'The loop argument is deprecated since Python 3.8, and scheduled for removal in Python 3.10.'b'locked'u'locked'b'unlocked'u'unlocked'b', waiters:'u', waiters:'b']>'u']>'b'Return True if lock is acquired.'u'Return True if lock is acquired.'b'Acquire a lock. + + This method blocks until the lock is unlocked, then sets it to + locked and returns True. + 'u'Acquire a lock. + + This method blocks until the lock is unlocked, then sets it to + locked and returns True. + 'b'Release a lock. + + When the lock is locked, reset it to unlocked, and return. + If any other coroutines are blocked waiting for the lock to become + unlocked, allow exactly one of them to proceed. + + When invoked on an unlocked lock, a RuntimeError is raised. + + There is no return value. + 'u'Release a lock. + + When the lock is locked, reset it to unlocked, and return. + If any other coroutines are blocked waiting for the lock to become + unlocked, allow exactly one of them to proceed. + + When invoked on an unlocked lock, a RuntimeError is raised. + + There is no return value. + 'b'Lock is not acquired.'u'Lock is not acquired.'b'Wake up the first waiter if it isn't done.'u'Wake up the first waiter if it isn't done.'b'Asynchronous equivalent to threading.Event. + + Class implementing event objects. An event manages a flag that can be set + to true with the set() method and reset to false with the clear() method. + The wait() method blocks until the flag is true. The flag is initially + false. + 'u'Asynchronous equivalent to threading.Event. + + Class implementing event objects. An event manages a flag that can be set + to true with the set() method and reset to false with the clear() method. + The wait() method blocks until the flag is true. The flag is initially + false. + 'b'Return True if and only if the internal flag is true.'u'Return True if and only if the internal flag is true.'b'Set the internal flag to true. All coroutines waiting for it to + become true are awakened. Coroutine that call wait() once the flag is + true will not block at all. + 'u'Set the internal flag to true. All coroutines waiting for it to + become true are awakened. Coroutine that call wait() once the flag is + true will not block at all. + 'b'Reset the internal flag to false. Subsequently, coroutines calling + wait() will block until set() is called to set the internal flag + to true again.'u'Reset the internal flag to false. Subsequently, coroutines calling + wait() will block until set() is called to set the internal flag + to true again.'b'Block until the internal flag is true. + + If the internal flag is true on entry, return True + immediately. Otherwise, block until another coroutine calls + set() to set the flag to true, then return True. + 'u'Block until the internal flag is true. + + If the internal flag is true on entry, return True + immediately. Otherwise, block until another coroutine calls + set() to set the flag to true, then return True. + 'b'Asynchronous equivalent to threading.Condition. + + This class implements condition variable objects. A condition variable + allows one or more coroutines to wait until they are notified by another + coroutine. + + A new Lock object is created and used as the underlying lock. + 'u'Asynchronous equivalent to threading.Condition. + + This class implements condition variable objects. A condition variable + allows one or more coroutines to wait until they are notified by another + coroutine. + + A new Lock object is created and used as the underlying lock. + 'b'loop argument must agree with lock'u'loop argument must agree with lock'b'Wait until notified. + + If the calling coroutine has not acquired the lock when this + method is called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks + until it is awakened by a notify() or notify_all() call for + the same condition variable in another coroutine. Once + awakened, it re-acquires the lock and returns True. + 'u'Wait until notified. + + If the calling coroutine has not acquired the lock when this + method is called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks + until it is awakened by a notify() or notify_all() call for + the same condition variable in another coroutine. Once + awakened, it re-acquires the lock and returns True. + 'b'cannot wait on un-acquired lock'u'cannot wait on un-acquired lock'b'Wait until a predicate becomes true. + + The predicate should be a callable which result will be + interpreted as a boolean value. The final predicate value is + the return value. + 'u'Wait until a predicate becomes true. + + The predicate should be a callable which result will be + interpreted as a boolean value. The final predicate value is + the return value. + 'b'By default, wake up one coroutine waiting on this condition, if any. + If the calling coroutine has not acquired the lock when this method + is called, a RuntimeError is raised. + + This method wakes up at most n of the coroutines waiting for the + condition variable; it is a no-op if no coroutines are waiting. + + Note: an awakened coroutine does not actually return from its + wait() call until it can reacquire the lock. Since notify() does + not release the lock, its caller should. + 'u'By default, wake up one coroutine waiting on this condition, if any. + If the calling coroutine has not acquired the lock when this method + is called, a RuntimeError is raised. + + This method wakes up at most n of the coroutines waiting for the + condition variable; it is a no-op if no coroutines are waiting. + + Note: an awakened coroutine does not actually return from its + wait() call until it can reacquire the lock. Since notify() does + not release the lock, its caller should. + 'b'cannot notify on un-acquired lock'u'cannot notify on un-acquired lock'b'Wake up all threads waiting on this condition. This method acts + like notify(), but wakes up all waiting threads instead of one. If the + calling thread has not acquired the lock when this method is called, + a RuntimeError is raised. + 'u'Wake up all threads waiting on this condition. This method acts + like notify(), but wakes up all waiting threads instead of one. If the + calling thread has not acquired the lock when this method is called, + a RuntimeError is raised. + 'b'A Semaphore implementation. + + A semaphore manages an internal counter which is decremented by each + acquire() call and incremented by each release() call. The counter + can never go below zero; when acquire() finds that it is zero, it blocks, + waiting until some other thread calls release(). + + Semaphores also support the context management protocol. + + The optional argument gives the initial value for the internal + counter; it defaults to 1. If the value given is less than 0, + ValueError is raised. + 'u'A Semaphore implementation. + + A semaphore manages an internal counter which is decremented by each + acquire() call and incremented by each release() call. The counter + can never go below zero; when acquire() finds that it is zero, it blocks, + waiting until some other thread calls release(). + + Semaphores also support the context management protocol. + + The optional argument gives the initial value for the internal + counter; it defaults to 1. If the value given is less than 0, + ValueError is raised. + 'b'Semaphore initial value must be >= 0'u'Semaphore initial value must be >= 0'b'unlocked, value:'u'unlocked, value:'b'Returns True if semaphore can not be acquired immediately.'u'Returns True if semaphore can not be acquired immediately.'b'Acquire a semaphore. + + If the internal counter is larger than zero on entry, + decrement it by one and return True immediately. If it is + zero on entry, block, waiting until some other coroutine has + called release() to make it larger than 0, and then return + True. + 'u'Acquire a semaphore. + + If the internal counter is larger than zero on entry, + decrement it by one and return True immediately. If it is + zero on entry, block, waiting until some other coroutine has + called release() to make it larger than 0, and then return + True. + 'b'Release a semaphore, incrementing the internal counter by one. + When it was zero on entry and another coroutine is waiting for it to + become larger than zero again, wake up that coroutine. + 'u'Release a semaphore, incrementing the internal counter by one. + When it was zero on entry and another coroutine is waiting for it to + become larger than zero again, wake up that coroutine. + 'b'A bounded semaphore implementation. + + This raises ValueError in release() if it would increase the value + above the initial value. + 'u'A bounded semaphore implementation. + + This raises ValueError in release() if it would increase the value + above the initial value. + 'b'BoundedSemaphore released too many times'u'BoundedSemaphore released too many times'u'asyncio.locks'u'locks'A simple log mechanism styled after PEP 282.Logthreshold%s wrong log level_global_logset_verbosity# The class here is styled after PEP 282 so that it could later be# replaced with a standard Python logging implementation.# emulate backslashreplace error handler# return the old threshold for use from testsb'A simple log mechanism styled after PEP 282.'u'A simple log mechanism styled after PEP 282.'b'%s wrong log level'u'%s wrong log level'u'distutils.log'Logging configuration.# Name the logger after the package.b'Logging configuration.'u'Logging configuration.'u'asyncio.log'Interface to the liblzma compression library. + +This module provides a class for reading and writing compressed files, +classes for incremental (de)compression, and convenience functions for +one-shot (de)compression. + +These classes and functions support both the XZ and legacy LZMA +container formats, as well as raw compressed data streams. +LZMAFileA file object providing transparent LZMA (de)compression. + + An LZMAFile can act as a wrapper for an existing file object, or + refer directly to a named file on disk. + + Note that LZMAFile provides a *binary* file interface - data read + is returned as bytes, and data to be written must be given as bytes. + presetOpen an LZMA-compressed file in binary mode. + + filename can be either an actual file name (given as a str, + bytes, or PathLike object), in which case the named file is + opened, or it can be an existing file object to read from or + write to. + + mode can be "r" for reading (default), "w" for (over)writing, + "x" for creating exclusively, or "a" for appending. These can + equivalently be given as "rb", "wb", "xb" and "ab" respectively. + + format specifies the container format to use for the file. + If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the + default is FORMAT_XZ. + + check specifies the integrity check to use. This argument can + only be used when opening a file for writing. For FORMAT_XZ, + the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not + support integrity checks - for these formats, check must be + omitted, or be CHECK_NONE. + + When opening a file for reading, the *preset* argument is not + meaningful, and should be omitted. The *filters* argument should + also be omitted, except when format is FORMAT_RAW (in which case + it is required). + + When opening a file for writing, the settings used by the + compressor can be specified either as a preset compression + level (with the *preset* argument), or in detail as a custom + filter chain (with the *filters* argument). For FORMAT_XZ and + FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset + level. For FORMAT_RAW, the caller must always specify a filter + chain; the raw compressor does not support preset compression + levels. + + preset (if provided) should be an integer in the range 0-9, + optionally OR-ed with the constant PRESET_EXTREME. + + filters (if provided) should be a sequence of dicts. Each dict + should have an entry for "id" indicating ID of the filter, plus + additional entries for options to the filter. + Cannot specify an integrity check when opening a file for reading"Cannot specify an integrity check ""when opening a file for reading"Cannot specify a preset compression level when opening a file for reading"Cannot specify a preset compression ""level when opening a file for reading"Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b"" if the file is already at EOF. + Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b"" if the file is at EOF. + Write a bytes object to the file. + + Returns the number of uncompressed bytes written, which is + always len(data). Note that due to buffering, the file on disk + may not reflect the data written until close() is called. + Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Possible values for whence are: + + 0: start of stream (default): offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + Open an LZMA-compressed file in binary or text mode. + + filename can be either an actual file name (given as a str, bytes, + or PathLike object), in which case the named file is opened, or it + can be an existing file object to read from or write to. + + The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb", + "a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text + mode. + + The format, check, preset and filters arguments specify the + compression settings, as for LZMACompressor, LZMADecompressor and + LZMAFile. + + For binary mode, this function is equivalent to the LZMAFile + constructor: LZMAFile(filename, mode, ...). In this case, the + encoding, errors and newline arguments must not be provided. + + For text mode, an LZMAFile object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error + handling behavior, and line ending(s). + + lz_modeCompress a block of data. + + Refer to LZMACompressor's docstring for a description of the + optional arguments *format*, *check*, *preset* and *filters*. + + For incremental compression, use an LZMACompressor instead. + Decompress a block of data. + + Refer to LZMADecompressor's docstring for a description of the + optional arguments *format*, *check* and *filters*. + + For incremental decompression, use an LZMADecompressor instead. + # Relies on the undocumented fact that BufferedReader.peek() always# returns at least one byte (except at EOF)# Leftover data is not a valid LZMA/XZ stream; ignore it.b'Interface to the liblzma compression library. + +This module provides a class for reading and writing compressed files, +classes for incremental (de)compression, and convenience functions for +one-shot (de)compression. + +These classes and functions support both the XZ and legacy LZMA +container formats, as well as raw compressed data streams. +'u'Interface to the liblzma compression library. + +This module provides a class for reading and writing compressed files, +classes for incremental (de)compression, and convenience functions for +one-shot (de)compression. + +These classes and functions support both the XZ and legacy LZMA +container formats, as well as raw compressed data streams. +'b'CHECK_NONE'u'CHECK_NONE'b'CHECK_CRC32'u'CHECK_CRC32'b'CHECK_CRC64'u'CHECK_CRC64'b'CHECK_SHA256'u'CHECK_SHA256'b'CHECK_ID_MAX'u'CHECK_ID_MAX'b'CHECK_UNKNOWN'u'CHECK_UNKNOWN'b'FILTER_LZMA1'u'FILTER_LZMA1'b'FILTER_LZMA2'u'FILTER_LZMA2'b'FILTER_DELTA'u'FILTER_DELTA'b'FILTER_X86'u'FILTER_X86'b'FILTER_IA64'u'FILTER_IA64'b'FILTER_ARM'u'FILTER_ARM'b'FILTER_ARMTHUMB'u'FILTER_ARMTHUMB'b'FILTER_POWERPC'u'FILTER_POWERPC'b'FILTER_SPARC'u'FILTER_SPARC'b'FORMAT_AUTO'u'FORMAT_AUTO'b'FORMAT_XZ'u'FORMAT_XZ'b'FORMAT_ALONE'u'FORMAT_ALONE'b'FORMAT_RAW'u'FORMAT_RAW'b'MF_HC3'u'MF_HC3'b'MF_HC4'u'MF_HC4'b'MF_BT2'u'MF_BT2'b'MF_BT3'u'MF_BT3'b'MF_BT4'u'MF_BT4'b'MODE_FAST'u'MODE_FAST'b'MODE_NORMAL'u'MODE_NORMAL'b'PRESET_DEFAULT'u'PRESET_DEFAULT'b'PRESET_EXTREME'u'PRESET_EXTREME'b'LZMACompressor'u'LZMACompressor'b'LZMADecompressor'u'LZMADecompressor'b'LZMAFile'u'LZMAFile'b'LZMAError'u'LZMAError'b'is_check_supported'u'is_check_supported'b'A file object providing transparent LZMA (de)compression. + + An LZMAFile can act as a wrapper for an existing file object, or + refer directly to a named file on disk. + + Note that LZMAFile provides a *binary* file interface - data read + is returned as bytes, and data to be written must be given as bytes. + 'u'A file object providing transparent LZMA (de)compression. + + An LZMAFile can act as a wrapper for an existing file object, or + refer directly to a named file on disk. + + Note that LZMAFile provides a *binary* file interface - data read + is returned as bytes, and data to be written must be given as bytes. + 'b'Open an LZMA-compressed file in binary mode. + + filename can be either an actual file name (given as a str, + bytes, or PathLike object), in which case the named file is + opened, or it can be an existing file object to read from or + write to. + + mode can be "r" for reading (default), "w" for (over)writing, + "x" for creating exclusively, or "a" for appending. These can + equivalently be given as "rb", "wb", "xb" and "ab" respectively. + + format specifies the container format to use for the file. + If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the + default is FORMAT_XZ. + + check specifies the integrity check to use. This argument can + only be used when opening a file for writing. For FORMAT_XZ, + the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not + support integrity checks - for these formats, check must be + omitted, or be CHECK_NONE. + + When opening a file for reading, the *preset* argument is not + meaningful, and should be omitted. The *filters* argument should + also be omitted, except when format is FORMAT_RAW (in which case + it is required). + + When opening a file for writing, the settings used by the + compressor can be specified either as a preset compression + level (with the *preset* argument), or in detail as a custom + filter chain (with the *filters* argument). For FORMAT_XZ and + FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset + level. For FORMAT_RAW, the caller must always specify a filter + chain; the raw compressor does not support preset compression + levels. + + preset (if provided) should be an integer in the range 0-9, + optionally OR-ed with the constant PRESET_EXTREME. + + filters (if provided) should be a sequence of dicts. Each dict + should have an entry for "id" indicating ID of the filter, plus + additional entries for options to the filter. + 'u'Open an LZMA-compressed file in binary mode. + + filename can be either an actual file name (given as a str, + bytes, or PathLike object), in which case the named file is + opened, or it can be an existing file object to read from or + write to. + + mode can be "r" for reading (default), "w" for (over)writing, + "x" for creating exclusively, or "a" for appending. These can + equivalently be given as "rb", "wb", "xb" and "ab" respectively. + + format specifies the container format to use for the file. + If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the + default is FORMAT_XZ. + + check specifies the integrity check to use. This argument can + only be used when opening a file for writing. For FORMAT_XZ, + the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not + support integrity checks - for these formats, check must be + omitted, or be CHECK_NONE. + + When opening a file for reading, the *preset* argument is not + meaningful, and should be omitted. The *filters* argument should + also be omitted, except when format is FORMAT_RAW (in which case + it is required). + + When opening a file for writing, the settings used by the + compressor can be specified either as a preset compression + level (with the *preset* argument), or in detail as a custom + filter chain (with the *filters* argument). For FORMAT_XZ and + FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset + level. For FORMAT_RAW, the caller must always specify a filter + chain; the raw compressor does not support preset compression + levels. + + preset (if provided) should be an integer in the range 0-9, + optionally OR-ed with the constant PRESET_EXTREME. + + filters (if provided) should be a sequence of dicts. Each dict + should have an entry for "id" indicating ID of the filter, plus + additional entries for options to the filter. + 'b'Cannot specify an integrity check when opening a file for reading'u'Cannot specify an integrity check when opening a file for reading'b'Cannot specify a preset compression level when opening a file for reading'u'Cannot specify a preset compression level when opening a file for reading'b'Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b"" if the file is already at EOF. + 'u'Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b"" if the file is already at EOF. + 'b'Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b"" if the file is at EOF. + 'u'Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b"" if the file is at EOF. + 'b'Write a bytes object to the file. + + Returns the number of uncompressed bytes written, which is + always len(data). Note that due to buffering, the file on disk + may not reflect the data written until close() is called. + 'u'Write a bytes object to the file. + + Returns the number of uncompressed bytes written, which is + always len(data). Note that due to buffering, the file on disk + may not reflect the data written until close() is called. + 'b'Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Possible values for whence are: + + 0: start of stream (default): offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + 'u'Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Possible values for whence are: + + 0: start of stream (default): offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + 'b'Open an LZMA-compressed file in binary or text mode. + + filename can be either an actual file name (given as a str, bytes, + or PathLike object), in which case the named file is opened, or it + can be an existing file object to read from or write to. + + The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb", + "a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text + mode. + + The format, check, preset and filters arguments specify the + compression settings, as for LZMACompressor, LZMADecompressor and + LZMAFile. + + For binary mode, this function is equivalent to the LZMAFile + constructor: LZMAFile(filename, mode, ...). In this case, the + encoding, errors and newline arguments must not be provided. + + For text mode, an LZMAFile object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error + handling behavior, and line ending(s). + + 'u'Open an LZMA-compressed file in binary or text mode. + + filename can be either an actual file name (given as a str, bytes, + or PathLike object), in which case the named file is opened, or it + can be an existing file object to read from or write to. + + The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb", + "a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text + mode. + + The format, check, preset and filters arguments specify the + compression settings, as for LZMACompressor, LZMADecompressor and + LZMAFile. + + For binary mode, this function is equivalent to the LZMAFile + constructor: LZMAFile(filename, mode, ...). In this case, the + encoding, errors and newline arguments must not be provided. + + For text mode, an LZMAFile object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error + handling behavior, and line ending(s). + + 'b'Compress a block of data. + + Refer to LZMACompressor's docstring for a description of the + optional arguments *format*, *check*, *preset* and *filters*. + + For incremental compression, use an LZMACompressor instead. + 'u'Compress a block of data. + + Refer to LZMACompressor's docstring for a description of the + optional arguments *format*, *check*, *preset* and *filters*. + + For incremental compression, use an LZMACompressor instead. + 'b'Decompress a block of data. + + Refer to LZMADecompressor's docstring for a description of the + optional arguments *format*, *check* and *filters*. + + For incremental decompression, use an LZMADecompressor instead. + 'u'Decompress a block of data. + + Refer to LZMADecompressor's docstring for a description of the + optional arguments *format*, *check* and *filters*. + + For incremental decompression, use an LZMADecompressor instead. + 'u'lzma'The machinery of importlib: finders, loaders, hooks, etc.Returns a list of all recognized module suffixes for this processb'The machinery of importlib: finders, loaders, hooks, etc.'u'The machinery of importlib: finders, loaders, hooks, etc.'b'Returns a list of all recognized module suffixes for this process'u'Returns a list of all recognized module suffixes for this process'u'importlib.machinery'u'machinery'main.pyb'python'u'python'b'main.py'u'main.py'u'example.src.main'u'example.src'u'example'u'src.main'u'src'Unittest main programExamples: + %(prog)s test_module - run tests from test_module + %(prog)s module.TestClass - run tests from module.TestClass + %(prog)s module.Class.test_method - run specified test method + %(prog)s path/to/test_file.py - run tests from test_file.py +MAIN_EXAMPLESExamples: + %(prog)s - run default set of tests + %(prog)s MyTestSuite - run suite 'MyTestSuite' + %(prog)s MyTestCase.testSomething - run MyTestCase.testSomething + %(prog)s MyTestCase - run all 'test*' test methods + in MyTestCase +MODULE_EXAMPLES_convert_namerel_pathpardir_convert_names_convert_select_pattern*%s*A command-line program that runs a set of tests; this is primarily + for making test modules conveniently executable. + catchbreakprogName_discovery_parserdefaultTesttestRunnertestLoadertb_localsparseArgsrunTestsusageExit_initArgParsers_print_help_main_parser_do_discoverytestNamescreateTestsfrom_discovery_getParentArgParserparent_parser_getMainArgParser_getDiscoveryArgParserVerbose output--quietQuiet output--localsShow local variables in tracebacks--failfastStop on first fail or error--catchCatch Ctrl-C and display results so far-b--bufferBuffer stdout and stderr during tests-kOnly run tests which match the given substringa list of any number of test modules, classes and test methods.'a list of any number of test modules, ''classes and test methods.'%s discoverFor test discovery all test modules must be importable from the top level directory of the project.'For test discovery all test modules must be ''importable from the top level directory of the ''project.'--start-directoryDirectory to start discovery ('.' default)--patternPattern to match tests ('test*.py' default)--top-level-directoryTop level directory of project (defaults to start directory)'Top level directory of project (defaults to ''start directory)'# on Linux / Mac OS X 'foo.PY' is not importable, but on# Windows it is. Simpler to do a case insensitive match# a better check would be to check that the name is a# valid Python module name.# on Windows both '\' and '/' are used as path# separators. Better to replace both than rely on os.path.sep# defaults for testing# even if DeprecationWarnings are ignored by default# print them anyway unless other warnings settings are# specified by the warnings arg or the -W python flag# here self.warnings is set either to the value passed# to the warnings args or to None.# If the user didn't pass a value self.warnings will# be None. This means that the behavior is unchanged# and depends on the values passed to -W.# this allows "python -m unittest -v" to still work for# test discovery.# to support python -m unittest ...# createTests will load tests from self.module# handle command line args for test discovery# for testing# didn't accept the tb_locals argument# didn't accept the verbosity, buffer or failfast arguments# it is assumed to be a TestRunner instanceb'Unittest main program'u'Unittest main program'b'Examples: + %(prog)s test_module - run tests from test_module + %(prog)s module.TestClass - run tests from module.TestClass + %(prog)s module.Class.test_method - run specified test method + %(prog)s path/to/test_file.py - run tests from test_file.py +'u'Examples: + %(prog)s test_module - run tests from test_module + %(prog)s module.TestClass - run tests from module.TestClass + %(prog)s module.Class.test_method - run specified test method + %(prog)s path/to/test_file.py - run tests from test_file.py +'b'Examples: + %(prog)s - run default set of tests + %(prog)s MyTestSuite - run suite 'MyTestSuite' + %(prog)s MyTestCase.testSomething - run MyTestCase.testSomething + %(prog)s MyTestCase - run all 'test*' test methods + in MyTestCase +'u'Examples: + %(prog)s - run default set of tests + %(prog)s MyTestSuite - run suite 'MyTestSuite' + %(prog)s MyTestCase.testSomething - run MyTestCase.testSomething + %(prog)s MyTestCase - run all 'test*' test methods + in MyTestCase +'b'*%s*'u'*%s*'b'A command-line program that runs a set of tests; this is primarily + for making test modules conveniently executable. + 'u'A command-line program that runs a set of tests; this is primarily + for making test modules conveniently executable. + 'b'discover'u'discover'b'verbosity'u'verbosity'b'Verbose output'u'Verbose output'b'--quiet'u'--quiet'b'Quiet output'u'Quiet output'b'--locals'u'--locals'b'tb_locals'u'tb_locals'b'Show local variables in tracebacks'u'Show local variables in tracebacks'b'--failfast'u'--failfast'b'Stop on first fail or error'u'Stop on first fail or error'b'--catch'u'--catch'b'catchbreak'u'catchbreak'b'Catch Ctrl-C and display results so far'u'Catch Ctrl-C and display results so far'b'-b'u'-b'b'--buffer'u'--buffer'b'buffer'u'buffer'b'Buffer stdout and stderr during tests'u'Buffer stdout and stderr during tests'b'-k'u'-k'b'testNamePatterns'u'testNamePatterns'b'Only run tests which match the given substring'u'Only run tests which match the given substring'b'tests'u'tests'b'a list of any number of test modules, classes and test methods.'u'a list of any number of test modules, classes and test methods.'b'%s discover'u'%s discover'b'For test discovery all test modules must be importable from the top level directory of the project.'u'For test discovery all test modules must be importable from the top level directory of the project.'b'--start-directory'u'--start-directory'b'Directory to start discovery ('.' default)'u'Directory to start discovery ('.' default)'b'--pattern'u'--pattern'b'pattern'u'pattern'b'Pattern to match tests ('test*.py' default)'u'Pattern to match tests ('test*.py' default)'b'--top-level-directory'u'--top-level-directory'b'Top level directory of project (defaults to start directory)'u'Top level directory of project (defaults to start directory)'u'unittest.main'u'This module contains functions that can read and write Python values in +a binary format. The format is specific to Python, but independent of +machine architecture issues. + +Not all Python object types are supported; in general, only objects +whose value is independent from a particular invocation of Python can be +written and read by this module. The following types are supported: +None, integers, floating point numbers, strings, bytes, bytearrays, +tuples, lists, sets, dictionaries, and code objects, where it +should be understood that tuples, lists and dictionaries are only +supported as long as the values contained therein are themselves +supported; and recursive lists and dictionaries should not be written +(they will cause infinite loops). + +Variables: + +version -- indicates the format that the module uses. Version 0 is the + historical format, version 1 shares interned strings and version 2 + uses a binary format for floating point numbers. + Version 3 shares common object references (New in version 3.4). + +Functions: + +dump() -- write value to a file +load() -- read value from a file +dumps() -- marshal value as a bytes object +loads() -- read value from a bytes-like object'u'This module provides access to the mathematical functions +defined by the C standard.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/math.cpython-38-darwin.so'u'math'acosacoshasinasinhatanatan2atanhceilcombcoscoshdegreesdist2.718281828459045erferfcexpm1fabsfactorialfloorfmodfrexpfsumgcdhypotiscloseisfiniteisqrtldexplgammalog1plog2perm3.141592653589793radianssinsinhtantanh6.283185307179586trunc Python 'mbcs' Codec for Windows + + +Cloned by Mark Hammond (mhammond@skippinet.com.au) from ascii.py, +which was written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +mbcs_encodembcs_decode# Import them explicitly to cause an ImportError# on non-Windows systems# for IncrementalDecoder, IncrementalEncoder, ...### Codec APIs### encodings module APIb' Python 'mbcs' Codec for Windows + + +Cloned by Mark Hammond (mhammond@skippinet.com.au) from ascii.py, +which was written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +'u' Python 'mbcs' Codec for Windows + + +Cloned by Mark Hammond (mhammond@skippinet.com.au) from ascii.py, +which was written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +'u'encodings.mbcs'Basic message object for the email package object model.email._encoded_wordsSEMISPACE[ \(\)<>@,;:\\"/\[\]\?=]tspecials_splitparam_formatparamConvenience function to format and return a key=value pair. + + This will quote the value if needed or if quote is true. If value is a + three tuple (charset, language, value), it will be encoded according + to RFC2231 rules. If it contains non-ascii characters it will likewise + be encoded according to RFC2231 rules, using the utf-8 charset and + a null language. + encode_rfc2231%s="%s"_parseparam_unquotevalueBasic message object. + + A message object is defined as something that has a bunch of RFC 2822 + headers and a payload. It may optionally have an envelope header + (a.k.a. Unix-From or From_ header). If the message is a container (i.e. a + multipart or a message/rfc822), then the payload is a list of Message + objects, otherwise it is a string. + + Message objects implement part of the `mapping' interface, which assumes + there is exactly one occurrence of the header per message. Some headers + do in fact appear multiple times (e.g. Received) and for those headers, + you must use the explicit API to set or get all the headers. Not all of + the mapping methods are implemented. + _unixfromtext/plain_default_typeReturn the entire formatted message as a string. + as_stringunixfrommaxheaderlenReturn the entire formatted message as a string. + + Optional 'unixfrom', when true, means include the Unix From_ envelope + header. For backward compatibility reasons, if maxheaderlen is + not specified it defaults to 0, so you must override it explicitly + if you want a different maxheaderlen. 'policy' is passed to the + Generator instance used to serialize the message; if it is not + specified the policy associated with the message instance is used. + + If the message object contains binary data that is not encoded + according to RFC standards, the non-compliant data will be replaced by + unicode "unknown character" code points. + email.generatorflattenReturn the entire formatted message as a bytes object. + as_bytesReturn the entire formatted message as a bytes object. + + Optional 'unixfrom', when true, means include the Unix From_ envelope + header. 'policy' is passed to the BytesGenerator instance used to + serialize the message; if not specified the policy associated with + the message instance is used. + BytesGeneratorReturn True if the message consists of multiple parts.get_unixfromAdd the given payload to the current payload. + + The current payload will always be a list of objects after this method + is called. If you want to set the payload to a scalar object, use + set_payload() instead. + Attach is not valid on a message with a non-multipart payload"Attach is not valid on a message with a"" non-multipart payload"Return a reference to the payload. + + The payload will either be a list object or a string. If you mutate + the list object, you modify the message's payload in place. Optional + i returns that index into the payload. + + Optional decode is a flag indicating whether the payload should be + decoded or not, according to the Content-Transfer-Encoding header + (default is False). + + When True and the message is not a multipart, the payload will be + decoded if this header's value is `quoted-printable' or `base64'. If + some other encoding is used, or the header is missing, or if the + payload has bogus data (i.e. bogus base64 or uuencoded data), the + payload is returned as-is. + + If the message is a multipart and the decode flag is True, then None + is returned. + Expected list, got %sbpayloadget_paramx-uuencodeuuencodeuuex-uuein_fileout_fileSet the payload to the given value. + + Optional charset sets the message's default character set. See + set_charset() for details. + set_charsetSet the charset of the payload to a given character set. + + charset can be a Charset instance, a string naming a character set, or + None. If it is a string it will be converted to a Charset instance. + If charset is None, the charset parameter will be removed from the + Content-Type field. Anything else will generate a TypeError. + + The message will be assumed to be of type text/* encoded with + charset.input_charset. It will be converted to charset.output_charset + and encoded properly, if needed, when generating the plain text + representation of the message. MIME headers (MIME-Version, + Content-Type, Content-Transfer-Encoding) will be added as needed. + del_paramMIME-Versionadd_headerset_paramget_charsetReturn the Charset instance associated with the message's payload. + Return the total number of headers, including duplicates.Get a header value. + + Return None if the header is missing instead of raising an exception. + + Note that if the header appeared multiple times, exactly which + occurrence gets returned is undefined. Use get_all() to get all + the values matching a header field name. + Set the value of a header. + + Note: this does not overwrite an existing header with the same field + name. Use __delitem__() first to delete any existing headers. + max_countThere may be at most {} {} headers in a message"There may be at most {} {} headers ""in a message"Delete all occurrences of a header, if present. + + Does not raise an exception if the header is missing. + newheadersReturn a list of all the message's header field names. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + Return a list of all the message's header values. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + Get all the message's header fields and values. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + failobjGet a header value. + + Like __getitem__() but return failobj instead of None when the field + is missing. + Store name and value in the model without modification. + + This is an "internal" API, intended only for use by a parser. + raw_itemsReturn the (name, value) header pairs without modification. + + This is an "internal" API, intended only for use by a generator. + Return a list of all the values for the named field. + + These will be sorted in the order they appeared in the original + message, and may contain duplicates. Any fields deleted and + re-inserted are always appended to the header list. + + If no such fields exist, failobj is returned (defaults to None). + _paramsExtended header setting. + + name is the header field to add. keyword arguments can be used to set + additional parameters for the header field, with underscores converted + to dashes. Normally the parameter will be added as key="value" unless + value is None, in which case only the key will be added. If a + parameter value contains non-ASCII characters it can be specified as a + three-tuple of (charset, language, value), in which case it will be + encoded according to RFC2231 rules. Otherwise it will be encoded using + the utf-8 charset and a language of ''. + + Examples: + + msg.add_header('content-disposition', 'attachment', filename='bud.gif') + msg.add_header('content-disposition', 'attachment', + filename=('utf-8', '', Fußballer.ppt')) + msg.add_header('content-disposition', 'attachment', + filename='Fußballer.ppt')) + replace_headerReplace a header. + + Replace the first matching header found in the message, retaining + header order and case. If no matching header was found, a KeyError is + raised. + Return the message's content type. + + The returned string is coerced to lower case of the form + `maintype/subtype'. If there was no Content-Type header in the + message, the default type as given by get_default_type() will be + returned. Since according to RFC 2045, messages always have a default + type this will always return a value. + + RFC 2045 defines a message's default type to be text/plain unless it + appears inside a multipart/digest container, in which case it would be + message/rfc822. + get_default_typectypeReturn the message's main content type. + + This is the `maintype' part of the string returned by + get_content_type(). + get_content_subtypeReturns the message's sub-content type. + + This is the `subtype' part of the string returned by + get_content_type(). + Return the `default' content type. + + Most messages have a default content type of text/plain, except for + messages that are subparts of multipart/digest containers. Such + subparts have a default content type of message/rfc822. + Set the `default' content type. + + ctype should be either "text/plain" or "message/rfc822", although this + is not enforced. The default content type is not stored in the + Content-Type header. + _get_params_preservedecode_paramsget_paramsReturn the message's Content-Type parameters, as a list. + + The elements of the returned list are 2-tuples of key/value pairs, as + split on the `=' sign. The left hand side of the `=' is the key, + while the right hand side is the value. If there is no `=' sign in + the parameter the value is the empty string. The value is as + described in the get_param() method. + + Optional failobj is the object to return if there is no Content-Type + header. Optional header is the header to search instead of + Content-Type. If unquote is True, the value is unquoted. + Return the parameter value if found in the Content-Type header. + + Optional failobj is the object to return if there is no Content-Type + header, or the Content-Type header has no such parameter. Optional + header is the header to search instead of Content-Type. + + Parameter keys are always compared case insensitively. The return + value can either be a string, or a 3-tuple if the parameter was RFC + 2231 encoded. When it's a 3-tuple, the elements of the value are of + the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and + LANGUAGE can be None, in which case you should consider VALUE to be + encoded in the us-ascii charset. You can usually ignore LANGUAGE. + The parameter value (either the returned string, or the VALUE item in + the 3-tuple) is always unquoted, unless unquote is set to False. + + If your application doesn't care whether the parameter was RFC 2231 + encoded, it can turn the return value into a string as follows: + + rawparam = msg.get_param('foo') + param = email.utils.collapse_rfc2231_value(rawparam) + + requoteSet a parameter in the Content-Type header. + + If the parameter already exists in the header, its value will be + replaced with the new value. + + If header is Content-Type and has not yet been defined for this + message, it will be set to "text/plain" and the new parameter and + value will be appended as per RFC 2045. + + An alternate header can be specified in the header argument, and all + parameters will be quoted as necessary unless requote is False. + + If charset is specified, the parameter will be encoded according to RFC + 2231. Optional language specifies the RFC 2231 language, defaulting + to the empty string. Both charset and language should be strings. + old_paramappend_paramRemove the given parameter completely from the Content-Type header. + + The header will be re-written in place without the parameter or its + value. All values will be quoted as necessary unless requote is + False. Optional header specifies an alternative to the Content-Type + header. + new_ctypeSet the main type and subtype for the Content-Type header. + + type must be a string in the form "maintype/subtype", otherwise a + ValueError is raised. + + This method replaces the Content-Type header, keeping all the + parameters in place. If requote is False, this leaves the existing + header's quoting as is. Otherwise, the parameters will be quoted (the + default). + + An alternative header can be specified in the header argument. When + the Content-Type header is set, we'll always also add a MIME-Version + header. + mime-versionReturn the filename associated with the payload if present. + + The filename is extracted from the Content-Disposition header's + `filename' parameter, and it is unquoted. If that header is missing + the `filename' parameter, this method falls back to looking for the + `name' parameter. + content-dispositioncollapse_rfc2231_valueReturn the boundary associated with the payload if present. + + The boundary is extracted from the Content-Type header's `boundary' + parameter, and it is unquoted. + set_boundarySet the boundary parameter in Content-Type to 'boundary'. + + This is subtly different than deleting the Content-Type header and + adding a new one with a new boundary parameter via add_header(). The + main difference is that using the set_boundary() method preserves the + order of the Content-Type header in the original message. + + HeaderParseError is raised if the message has no Content-Type header. + No Content-Type header foundnewparamsfoundppkpvget_content_charsetReturn the charset parameter of the Content-Type header. + + The returned string is always coerced to lower case. If there is no + Content-Type header, or if that header has no charset parameter, + failobj is returned. + pcharsetget_charsetsReturn a list containing the charset(s) used in this message. + + The returned list of items describes the Content-Type headers' + charset parameter for this message and all the subparts in its + payload. + + Each item will either be a string (the value of the charset parameter + in the Content-Type header of that part) or the value of the + 'failobj' parameter (defaults to None), if the part does not have a + main MIME type of "text", or the charset is not defined. + + The list will contain one string for each part of the message, plus + one for the container message (i.e. self), so that a non-multipart + message will still return a list of length 1. + get_content_dispositionReturn the message's content-disposition if it exists, or None. + + The return values can be either 'inline', 'attachment' or None + according to the rfc2183. + c_demail.iteratorsMIMEPartemail.policyReturn the entire formatted message as a string. + + Optional 'unixfrom', when true, means include the Unix From_ envelope + header. maxheaderlen is retained for backward compatibility with the + base Message class, but defaults to None, meaning that the policy value + for max_line_length controls the header maximum length. 'policy' is + passed to the Generator instance used to serialize the message; if it + is not specified the policy associated with the message instance is + used. + is_attachmentcontent_dispositionattachment_find_bodypreferencelistmaintyperelatedsubpartiter_partscontent-idsubpartsget_bodyplainReturn best candidate mime part for display as 'body' of message. + + Do a depth first search, starting with self, looking for the first part + matching each of the items in preferencelist, and return the part + corresponding to the first item that has a match, or None if no items + have a match. If 'related' is not included in preferencelist, consider + the root part of any multipart/related encountered as a candidate + match. Ignore parts with 'Content-Disposition: attachment'. + best_prio_body_typesiter_attachmentsReturn an iterator over the non-main parts of a multipart. + + Skip the first of each occurrence of text/plain, text/html, + multipart/related, or multipart/alternative in the multipart (unless + they have a 'Content-Disposition: attachment' header) and include all + remaining subparts in the returned iterator. When applied to a + multipart/related, return all parts except the root part. Return an + empty iterator when applied to a multipart/alternative or a + non-multipart. + attachmentsReturn an iterator over all immediate subparts of a multipart. + + Return an empty iterator for a non-multipart. + get_contentcontent_manager_make_multipartdisallowed_subtypesexisting_subtypeCannot convert {} to {}keep_headerspart_headerscontent-multipart/make_relatedmixedmake_alternativemake_mixed_add_multipart_subtype_dispmake_Content-Dispositionadd_relatedinlineadd_alternativeadd_attachmentclear_content# Intrapackage imports# Regular expression that matches `special' characters in parameters, the# existence of which force quoting of the parameter value.# Split header parameters. BAW: this may be too simple. It isn't# strictly RFC 2045 (section 5.1) compliant, but it catches most headers# found in the wild. We may eventually need a full fledged parser.# RDM: we might have a Header here; for now just stringify it.# A tuple is used for RFC 2231 encoded parameter values where items# are (charset, language, value). charset is a string, not a Charset# instance. RFC 2231 encoded values are never quoted, per RFC.# Encode as per RFC 2231# BAW: Please check this. I think that if quote is set it should# force quoting even if not necessary.# RDM This might be a Header, so for now stringify it.# This is different than utils.collapse_rfc2231_value() because it doesn't# try to convert the value to a unicode. Message.get_param() and# Message.get_params() are both currently defined to return the tuple in# the face of RFC 2231 parameters.# Defaults for multipart messages# Default content type# Unix From_ line# Payload manipulation.# Here is the logic table for this code, based on the email5.0.0 code:# i decode is_multipart result# ------ ------ ------------ ------------------------------# None True True None# i True True None# None False True _payload (a list)# i False True _payload element i (a Message)# i False False error (not a list)# i True False error (not a list)# None False False _payload# None True False _payload decoded (bytes)# Note that Barry planned to factor out the 'decode' case, but that# isn't so easy now that we handle the 8 bit data, which needs to be# converted in both the decode and non-decode path.# For backward compatibility, Use isinstance and this error message# instead of the more logical is_multipart test.# cte might be a Header, so for now stringify it.# payload may be bytes here.# This won't happen for RFC compliant messages (messages# containing only ASCII code points in the unicode input).# If it does happen, turn the string into bytes in a way# guaranteed not to fail.# XXX: this is a bit of a hack; decode_b should probably be factored# out somewhere, but I haven't figured out where yet.# Some decoding problem# This 'if' is for backward compatibility, it allows unicode# through even though that won't work correctly if the# message is serialized.# MAPPING INTERFACE (partial)# "Internal" methods (public API, but only intended for use by a parser# or generator, not normal application code.# Additional useful stuff# Use these three methods instead of the three above.# This should have no parameters# RFC 2045, section 5.2 says if its invalid, use text/plain# Like get_params() but preserves the quoting of values. BAW:# should this be part of the public interface?# Must have been a bare attribute# BAW: should we be strict?# Set the Content-Type, you get a MIME-Version# Skip the first param; it's the old type.# RFC 2046 says that boundaries may begin but not end in w/s# There was no Content-Type header, and we don't know what type# to set it to, so raise an exception.# The original Content-Type header had no boundary attribute.# Tack one on the end. BAW: should we raise an exception# instead???# Replace the existing Content-Type header with the new value# RFC 2231 encoded, so decode it, and it better end up as ascii.# LookupError will be raised if the charset isn't known to# Python. UnicodeError will be raised if the encoded text# contains a character not in the charset.# charset characters must be in us-ascii range# RFC 2046, $4.1.2 says charsets are not case sensitive# I.e. def walk(self): ...# Certain malformed messages can have content type set to `multipart/*`# but still have single part body, in which case payload.copy() can# fail with AttributeError.# payload is not a list, it is most probably a string.# For related, we treat everything but the root as an attachment.# The root may be indicated by 'start'; if there's no start or we# can't find the named start, treat the first subpart as the root.# Otherwise we more or less invert the remaining logic in get_body.# This only really works in edge cases (ex: non-text related or# alternatives) if the sending agent sets content-disposition.# Only skip the first example of each candidate type.# There is existing content, move it to the first subpart.b'Basic message object for the email package object model.'u'Basic message object for the email package object model.'b'Message'u'Message'b'EmailMessage'u'EmailMessage'b'[ \(\)<>@,;:\\"/\[\]\?=]'u'[ \(\)<>@,;:\\"/\[\]\?=]'b'Convenience function to format and return a key=value pair. + + This will quote the value if needed or if quote is true. If value is a + three tuple (charset, language, value), it will be encoded according + to RFC2231 rules. If it contains non-ascii characters it will likewise + be encoded according to RFC2231 rules, using the utf-8 charset and + a null language. + 'u'Convenience function to format and return a key=value pair. + + This will quote the value if needed or if quote is true. If value is a + three tuple (charset, language, value), it will be encoded according + to RFC2231 rules. If it contains non-ascii characters it will likewise + be encoded according to RFC2231 rules, using the utf-8 charset and + a null language. + 'b'%s="%s"'u'%s="%s"'b'Basic message object. + + A message object is defined as something that has a bunch of RFC 2822 + headers and a payload. It may optionally have an envelope header + (a.k.a. Unix-From or From_ header). If the message is a container (i.e. a + multipart or a message/rfc822), then the payload is a list of Message + objects, otherwise it is a string. + + Message objects implement part of the `mapping' interface, which assumes + there is exactly one occurrence of the header per message. Some headers + do in fact appear multiple times (e.g. Received) and for those headers, + you must use the explicit API to set or get all the headers. Not all of + the mapping methods are implemented. + 'u'Basic message object. + + A message object is defined as something that has a bunch of RFC 2822 + headers and a payload. It may optionally have an envelope header + (a.k.a. Unix-From or From_ header). If the message is a container (i.e. a + multipart or a message/rfc822), then the payload is a list of Message + objects, otherwise it is a string. + + Message objects implement part of the `mapping' interface, which assumes + there is exactly one occurrence of the header per message. Some headers + do in fact appear multiple times (e.g. Received) and for those headers, + you must use the explicit API to set or get all the headers. Not all of + the mapping methods are implemented. + 'b'text/plain'u'text/plain'b'Return the entire formatted message as a string. + 'u'Return the entire formatted message as a string. + 'b'Return the entire formatted message as a string. + + Optional 'unixfrom', when true, means include the Unix From_ envelope + header. For backward compatibility reasons, if maxheaderlen is + not specified it defaults to 0, so you must override it explicitly + if you want a different maxheaderlen. 'policy' is passed to the + Generator instance used to serialize the message; if it is not + specified the policy associated with the message instance is used. + + If the message object contains binary data that is not encoded + according to RFC standards, the non-compliant data will be replaced by + unicode "unknown character" code points. + 'u'Return the entire formatted message as a string. + + Optional 'unixfrom', when true, means include the Unix From_ envelope + header. For backward compatibility reasons, if maxheaderlen is + not specified it defaults to 0, so you must override it explicitly + if you want a different maxheaderlen. 'policy' is passed to the + Generator instance used to serialize the message; if it is not + specified the policy associated with the message instance is used. + + If the message object contains binary data that is not encoded + according to RFC standards, the non-compliant data will be replaced by + unicode "unknown character" code points. + 'b'Return the entire formatted message as a bytes object. + 'u'Return the entire formatted message as a bytes object. + 'b'Return the entire formatted message as a bytes object. + + Optional 'unixfrom', when true, means include the Unix From_ envelope + header. 'policy' is passed to the BytesGenerator instance used to + serialize the message; if not specified the policy associated with + the message instance is used. + 'u'Return the entire formatted message as a bytes object. + + Optional 'unixfrom', when true, means include the Unix From_ envelope + header. 'policy' is passed to the BytesGenerator instance used to + serialize the message; if not specified the policy associated with + the message instance is used. + 'b'Return True if the message consists of multiple parts.'u'Return True if the message consists of multiple parts.'b'Add the given payload to the current payload. + + The current payload will always be a list of objects after this method + is called. If you want to set the payload to a scalar object, use + set_payload() instead. + 'u'Add the given payload to the current payload. + + The current payload will always be a list of objects after this method + is called. If you want to set the payload to a scalar object, use + set_payload() instead. + 'b'Attach is not valid on a message with a non-multipart payload'u'Attach is not valid on a message with a non-multipart payload'b'Return a reference to the payload. + + The payload will either be a list object or a string. If you mutate + the list object, you modify the message's payload in place. Optional + i returns that index into the payload. + + Optional decode is a flag indicating whether the payload should be + decoded or not, according to the Content-Transfer-Encoding header + (default is False). + + When True and the message is not a multipart, the payload will be + decoded if this header's value is `quoted-printable' or `base64'. If + some other encoding is used, or the header is missing, or if the + payload has bogus data (i.e. bogus base64 or uuencoded data), the + payload is returned as-is. + + If the message is a multipart and the decode flag is True, then None + is returned. + 'u'Return a reference to the payload. + + The payload will either be a list object or a string. If you mutate + the list object, you modify the message's payload in place. Optional + i returns that index into the payload. + + Optional decode is a flag indicating whether the payload should be + decoded or not, according to the Content-Transfer-Encoding header + (default is False). + + When True and the message is not a multipart, the payload will be + decoded if this header's value is `quoted-printable' or `base64'. If + some other encoding is used, or the header is missing, or if the + payload has bogus data (i.e. bogus base64 or uuencoded data), the + payload is returned as-is. + + If the message is a multipart and the decode flag is True, then None + is returned. + 'b'Expected list, got %s'u'Expected list, got %s'b'x-uuencode'u'x-uuencode'b'uuencode'u'uuencode'b'uue'u'uue'b'x-uue'u'x-uue'b'Set the payload to the given value. + + Optional charset sets the message's default character set. See + set_charset() for details. + 'u'Set the payload to the given value. + + Optional charset sets the message's default character set. See + set_charset() for details. + 'b'Set the charset of the payload to a given character set. + + charset can be a Charset instance, a string naming a character set, or + None. If it is a string it will be converted to a Charset instance. + If charset is None, the charset parameter will be removed from the + Content-Type field. Anything else will generate a TypeError. + + The message will be assumed to be of type text/* encoded with + charset.input_charset. It will be converted to charset.output_charset + and encoded properly, if needed, when generating the plain text + representation of the message. MIME headers (MIME-Version, + Content-Type, Content-Transfer-Encoding) will be added as needed. + 'u'Set the charset of the payload to a given character set. + + charset can be a Charset instance, a string naming a character set, or + None. If it is a string it will be converted to a Charset instance. + If charset is None, the charset parameter will be removed from the + Content-Type field. Anything else will generate a TypeError. + + The message will be assumed to be of type text/* encoded with + charset.input_charset. It will be converted to charset.output_charset + and encoded properly, if needed, when generating the plain text + representation of the message. MIME headers (MIME-Version, + Content-Type, Content-Transfer-Encoding) will be added as needed. + 'b'MIME-Version'u'MIME-Version'b'Return the Charset instance associated with the message's payload. + 'u'Return the Charset instance associated with the message's payload. + 'b'Return the total number of headers, including duplicates.'u'Return the total number of headers, including duplicates.'b'Get a header value. + + Return None if the header is missing instead of raising an exception. + + Note that if the header appeared multiple times, exactly which + occurrence gets returned is undefined. Use get_all() to get all + the values matching a header field name. + 'u'Get a header value. + + Return None if the header is missing instead of raising an exception. + + Note that if the header appeared multiple times, exactly which + occurrence gets returned is undefined. Use get_all() to get all + the values matching a header field name. + 'b'Set the value of a header. + + Note: this does not overwrite an existing header with the same field + name. Use __delitem__() first to delete any existing headers. + 'u'Set the value of a header. + + Note: this does not overwrite an existing header with the same field + name. Use __delitem__() first to delete any existing headers. + 'b'There may be at most {} {} headers in a message'u'There may be at most {} {} headers in a message'b'Delete all occurrences of a header, if present. + + Does not raise an exception if the header is missing. + 'u'Delete all occurrences of a header, if present. + + Does not raise an exception if the header is missing. + 'b'Return a list of all the message's header field names. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + 'u'Return a list of all the message's header field names. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted �q<� \ No newline at end of file diff --git a/example/codeql-db/db-python/default/pools/0/pageDump/page-000000002 b/example/codeql-db/db-python/default/pools/0/pageDump/page-000000002 new file mode 100644 index 0000000000000000000000000000000000000000..ddbf2f3d10c55c00756b562dc315a500697e1faf --- /dev/null +++ b/example/codeql-db/db-python/default/pools/0/pageDump/page-000000002 @@ -0,0 +1,28979 @@ +u'Return a list of all the message's header field names. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + 'b'Return a list of all the message's header values. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + 'u'Return a list of all the message's header values. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + 'b'Get all the message's header fields and values. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + 'u'Get all the message's header fields and values. + + These will be sorted in the order they appeared in the original + message, or were added to the message, and may contain duplicates. + Any fields deleted and re-inserted are always appended to the header + list. + 'b'Get a header value. + + Like __getitem__() but return failobj instead of None when the field + is missing. + 'u'Get a header value. + + Like __getitem__() but return failobj instead of None when the field + is missing. + 'b'Store name and value in the model without modification. + + This is an "internal" API, intended only for use by a parser. + 'u'Store name and value in the model without modification. + + This is an "internal" API, intended only for use by a parser. + 'b'Return the (name, value) header pairs without modification. + + This is an "internal" API, intended only for use by a generator. + 'u'Return the (name, value) header pairs without modification. + + This is an "internal" API, intended only for use by a generator. + 'b'Return a list of all the values for the named field. + + These will be sorted in the order they appeared in the original + message, and may contain duplicates. Any fields deleted and + re-inserted are always appended to the header list. + + If no such fields exist, failobj is returned (defaults to None). + 'u'Return a list of all the values for the named field. + + These will be sorted in the order they appeared in the original + message, and may contain duplicates. Any fields deleted and + re-inserted are always appended to the header list. + + If no such fields exist, failobj is returned (defaults to None). + 'b'Extended header setting. + + name is the header field to add. keyword arguments can be used to set + additional parameters for the header field, with underscores converted + to dashes. Normally the parameter will be added as key="value" unless + value is None, in which case only the key will be added. If a + parameter value contains non-ASCII characters it can be specified as a + three-tuple of (charset, language, value), in which case it will be + encoded according to RFC2231 rules. Otherwise it will be encoded using + the utf-8 charset and a language of ''. + + Examples: + + msg.add_header('content-disposition', 'attachment', filename='bud.gif') + msg.add_header('content-disposition', 'attachment', + filename=('utf-8', '', Fußballer.ppt')) + msg.add_header('content-disposition', 'attachment', + filename='Fußballer.ppt')) + 'u'Extended header setting. + + name is the header field to add. keyword arguments can be used to set + additional parameters for the header field, with underscores converted + to dashes. Normally the parameter will be added as key="value" unless + value is None, in which case only the key will be added. If a + parameter value contains non-ASCII characters it can be specified as a + three-tuple of (charset, language, value), in which case it will be + encoded according to RFC2231 rules. Otherwise it will be encoded using + the utf-8 charset and a language of ''. + + Examples: + + msg.add_header('content-disposition', 'attachment', filename='bud.gif') + msg.add_header('content-disposition', 'attachment', + filename=('utf-8', '', Fußballer.ppt')) + msg.add_header('content-disposition', 'attachment', + filename='Fußballer.ppt')) + 'b'Replace a header. + + Replace the first matching header found in the message, retaining + header order and case. If no matching header was found, a KeyError is + raised. + 'u'Replace a header. + + Replace the first matching header found in the message, retaining + header order and case. If no matching header was found, a KeyError is + raised. + 'b'Return the message's content type. + + The returned string is coerced to lower case of the form + `maintype/subtype'. If there was no Content-Type header in the + message, the default type as given by get_default_type() will be + returned. Since according to RFC 2045, messages always have a default + type this will always return a value. + + RFC 2045 defines a message's default type to be text/plain unless it + appears inside a multipart/digest container, in which case it would be + message/rfc822. + 'u'Return the message's content type. + + The returned string is coerced to lower case of the form + `maintype/subtype'. If there was no Content-Type header in the + message, the default type as given by get_default_type() will be + returned. Since according to RFC 2045, messages always have a default + type this will always return a value. + + RFC 2045 defines a message's default type to be text/plain unless it + appears inside a multipart/digest container, in which case it would be + message/rfc822. + 'b'Return the message's main content type. + + This is the `maintype' part of the string returned by + get_content_type(). + 'u'Return the message's main content type. + + This is the `maintype' part of the string returned by + get_content_type(). + 'b'Returns the message's sub-content type. + + This is the `subtype' part of the string returned by + get_content_type(). + 'u'Returns the message's sub-content type. + + This is the `subtype' part of the string returned by + get_content_type(). + 'b'Return the `default' content type. + + Most messages have a default content type of text/plain, except for + messages that are subparts of multipart/digest containers. Such + subparts have a default content type of message/rfc822. + 'u'Return the `default' content type. + + Most messages have a default content type of text/plain, except for + messages that are subparts of multipart/digest containers. Such + subparts have a default content type of message/rfc822. + 'b'Set the `default' content type. + + ctype should be either "text/plain" or "message/rfc822", although this + is not enforced. The default content type is not stored in the + Content-Type header. + 'u'Set the `default' content type. + + ctype should be either "text/plain" or "message/rfc822", although this + is not enforced. The default content type is not stored in the + Content-Type header. + 'b'Return the message's Content-Type parameters, as a list. + + The elements of the returned list are 2-tuples of key/value pairs, as + split on the `=' sign. The left hand side of the `=' is the key, + while the right hand side is the value. If there is no `=' sign in + the parameter the value is the empty string. The value is as + described in the get_param() method. + + Optional failobj is the object to return if there is no Content-Type + header. Optional header is the header to search instead of + Content-Type. If unquote is True, the value is unquoted. + 'u'Return the message's Content-Type parameters, as a list. + + The elements of the returned list are 2-tuples of key/value pairs, as + split on the `=' sign. The left hand side of the `=' is the key, + while the right hand side is the value. If there is no `=' sign in + the parameter the value is the empty string. The value is as + described in the get_param() method. + + Optional failobj is the object to return if there is no Content-Type + header. Optional header is the header to search instead of + Content-Type. If unquote is True, the value is unquoted. + 'b'Return the parameter value if found in the Content-Type header. + + Optional failobj is the object to return if there is no Content-Type + header, or the Content-Type header has no such parameter. Optional + header is the header to search instead of Content-Type. + + Parameter keys are always compared case insensitively. The return + value can either be a string, or a 3-tuple if the parameter was RFC + 2231 encoded. When it's a 3-tuple, the elements of the value are of + the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and + LANGUAGE can be None, in which case you should consider VALUE to be + encoded in the us-ascii charset. You can usually ignore LANGUAGE. + The parameter value (either the returned string, or the VALUE item in + the 3-tuple) is always unquoted, unless unquote is set to False. + + If your application doesn't care whether the parameter was RFC 2231 + encoded, it can turn the return value into a string as follows: + + rawparam = msg.get_param('foo') + param = email.utils.collapse_rfc2231_value(rawparam) + + 'u'Return the parameter value if found in the Content-Type header. + + Optional failobj is the object to return if there is no Content-Type + header, or the Content-Type header has no such parameter. Optional + header is the header to search instead of Content-Type. + + Parameter keys are always compared case insensitively. The return + value can either be a string, or a 3-tuple if the parameter was RFC + 2231 encoded. When it's a 3-tuple, the elements of the value are of + the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and + LANGUAGE can be None, in which case you should consider VALUE to be + encoded in the us-ascii charset. You can usually ignore LANGUAGE. + The parameter value (either the returned string, or the VALUE item in + the 3-tuple) is always unquoted, unless unquote is set to False. + + If your application doesn't care whether the parameter was RFC 2231 + encoded, it can turn the return value into a string as follows: + + rawparam = msg.get_param('foo') + param = email.utils.collapse_rfc2231_value(rawparam) + + 'b'Set a parameter in the Content-Type header. + + If the parameter already exists in the header, its value will be + replaced with the new value. + + If header is Content-Type and has not yet been defined for this + message, it will be set to "text/plain" and the new parameter and + value will be appended as per RFC 2045. + + An alternate header can be specified in the header argument, and all + parameters will be quoted as necessary unless requote is False. + + If charset is specified, the parameter will be encoded according to RFC + 2231. Optional language specifies the RFC 2231 language, defaulting + to the empty string. Both charset and language should be strings. + 'u'Set a parameter in the Content-Type header. + + If the parameter already exists in the header, its value will be + replaced with the new value. + + If header is Content-Type and has not yet been defined for this + message, it will be set to "text/plain" and the new parameter and + value will be appended as per RFC 2045. + + An alternate header can be specified in the header argument, and all + parameters will be quoted as necessary unless requote is False. + + If charset is specified, the parameter will be encoded according to RFC + 2231. Optional language specifies the RFC 2231 language, defaulting + to the empty string. Both charset and language should be strings. + 'b'Remove the given parameter completely from the Content-Type header. + + The header will be re-written in place without the parameter or its + value. All values will be quoted as necessary unless requote is + False. Optional header specifies an alternative to the Content-Type + header. + 'u'Remove the given parameter completely from the Content-Type header. + + The header will be re-written in place without the parameter or its + value. All values will be quoted as necessary unless requote is + False. Optional header specifies an alternative to the Content-Type + header. + 'b'Set the main type and subtype for the Content-Type header. + + type must be a string in the form "maintype/subtype", otherwise a + ValueError is raised. + + This method replaces the Content-Type header, keeping all the + parameters in place. If requote is False, this leaves the existing + header's quoting as is. Otherwise, the parameters will be quoted (the + default). + + An alternative header can be specified in the header argument. When + the Content-Type header is set, we'll always also add a MIME-Version + header. + 'u'Set the main type and subtype for the Content-Type header. + + type must be a string in the form "maintype/subtype", otherwise a + ValueError is raised. + + This method replaces the Content-Type header, keeping all the + parameters in place. If requote is False, this leaves the existing + header's quoting as is. Otherwise, the parameters will be quoted (the + default). + + An alternative header can be specified in the header argument. When + the Content-Type header is set, we'll always also add a MIME-Version + header. + 'b'mime-version'u'mime-version'b'Return the filename associated with the payload if present. + + The filename is extracted from the Content-Disposition header's + `filename' parameter, and it is unquoted. If that header is missing + the `filename' parameter, this method falls back to looking for the + `name' parameter. + 'u'Return the filename associated with the payload if present. + + The filename is extracted from the Content-Disposition header's + `filename' parameter, and it is unquoted. If that header is missing + the `filename' parameter, this method falls back to looking for the + `name' parameter. + 'b'content-disposition'u'content-disposition'b'Return the boundary associated with the payload if present. + + The boundary is extracted from the Content-Type header's `boundary' + parameter, and it is unquoted. + 'u'Return the boundary associated with the payload if present. + + The boundary is extracted from the Content-Type header's `boundary' + parameter, and it is unquoted. + 'b'boundary'u'boundary'b'Set the boundary parameter in Content-Type to 'boundary'. + + This is subtly different than deleting the Content-Type header and + adding a new one with a new boundary parameter via add_header(). The + main difference is that using the set_boundary() method preserves the + order of the Content-Type header in the original message. + + HeaderParseError is raised if the message has no Content-Type header. + 'u'Set the boundary parameter in Content-Type to 'boundary'. + + This is subtly different than deleting the Content-Type header and + adding a new one with a new boundary parameter via add_header(). The + main difference is that using the set_boundary() method preserves the + order of the Content-Type header in the original message. + + HeaderParseError is raised if the message has no Content-Type header. + 'b'No Content-Type header found'u'No Content-Type header found'b'Return the charset parameter of the Content-Type header. + + The returned string is always coerced to lower case. If there is no + Content-Type header, or if that header has no charset parameter, + failobj is returned. + 'u'Return the charset parameter of the Content-Type header. + + The returned string is always coerced to lower case. If there is no + Content-Type header, or if that header has no charset parameter, + failobj is returned. + 'b'Return a list containing the charset(s) used in this message. + + The returned list of items describes the Content-Type headers' + charset parameter for this message and all the subparts in its + payload. + + Each item will either be a string (the value of the charset parameter + in the Content-Type header of that part) or the value of the + 'failobj' parameter (defaults to None), if the part does not have a + main MIME type of "text", or the charset is not defined. + + The list will contain one string for each part of the message, plus + one for the container message (i.e. self), so that a non-multipart + message will still return a list of length 1. + 'u'Return a list containing the charset(s) used in this message. + + The returned list of items describes the Content-Type headers' + charset parameter for this message and all the subparts in its + payload. + + Each item will either be a string (the value of the charset parameter + in the Content-Type header of that part) or the value of the + 'failobj' parameter (defaults to None), if the part does not have a + main MIME type of "text", or the charset is not defined. + + The list will contain one string for each part of the message, plus + one for the container message (i.e. self), so that a non-multipart + message will still return a list of length 1. + 'b'Return the message's content-disposition if it exists, or None. + + The return values can be either 'inline', 'attachment' or None + according to the rfc2183. + 'u'Return the message's content-disposition if it exists, or None. + + The return values can be either 'inline', 'attachment' or None + according to the rfc2183. + 'b'Return the entire formatted message as a string. + + Optional 'unixfrom', when true, means include the Unix From_ envelope + header. maxheaderlen is retained for backward compatibility with the + base Message class, but defaults to None, meaning that the policy value + for max_line_length controls the header maximum length. 'policy' is + passed to the Generator instance used to serialize the message; if it + is not specified the policy associated with the message instance is + used. + 'u'Return the entire formatted message as a string. + + Optional 'unixfrom', when true, means include the Unix From_ envelope + header. maxheaderlen is retained for backward compatibility with the + base Message class, but defaults to None, meaning that the policy value + for max_line_length controls the header maximum length. 'policy' is + passed to the Generator instance used to serialize the message; if it + is not specified the policy associated with the message instance is + used. + 'b'attachment'u'attachment'b'related'u'related'b'content-id'u'content-id'b'plain'u'plain'b'Return best candidate mime part for display as 'body' of message. + + Do a depth first search, starting with self, looking for the first part + matching each of the items in preferencelist, and return the part + corresponding to the first item that has a match, or None if no items + have a match. If 'related' is not included in preferencelist, consider + the root part of any multipart/related encountered as a candidate + match. Ignore parts with 'Content-Disposition: attachment'. + 'u'Return best candidate mime part for display as 'body' of message. + + Do a depth first search, starting with self, looking for the first part + matching each of the items in preferencelist, and return the part + corresponding to the first item that has a match, or None if no items + have a match. If 'related' is not included in preferencelist, consider + the root part of any multipart/related encountered as a candidate + match. Ignore parts with 'Content-Disposition: attachment'. + 'b'alternative'u'alternative'b'Return an iterator over the non-main parts of a multipart. + + Skip the first of each occurrence of text/plain, text/html, + multipart/related, or multipart/alternative in the multipart (unless + they have a 'Content-Disposition: attachment' header) and include all + remaining subparts in the returned iterator. When applied to a + multipart/related, return all parts except the root part. Return an + empty iterator when applied to a multipart/alternative or a + non-multipart. + 'u'Return an iterator over the non-main parts of a multipart. + + Skip the first of each occurrence of text/plain, text/html, + multipart/related, or multipart/alternative in the multipart (unless + they have a 'Content-Disposition: attachment' header) and include all + remaining subparts in the returned iterator. When applied to a + multipart/related, return all parts except the root part. Return an + empty iterator when applied to a multipart/alternative or a + non-multipart. + 'b'Return an iterator over all immediate subparts of a multipart. + + Return an empty iterator for a non-multipart. + 'u'Return an iterator over all immediate subparts of a multipart. + + Return an empty iterator for a non-multipart. + 'b'Cannot convert {} to {}'u'Cannot convert {} to {}'b'content-'u'content-'b'multipart/'u'multipart/'b'mixed'u'mixed'b'make_'u'make_'b'Content-Disposition'u'Content-Disposition'b'inline'u'inline'u'email.message'Guess the MIME type of a file. + +This module defines two useful functions: + +guess_type(url, strict=True) -- guess the MIME type and encoding of a URL. + +guess_extension(type, strict=True) -- guess the extension for a given MIME type. + +It also contains the following, for tuning the behavior: + +Data: + +knownfiles -- list of files to parse +inited -- flag set when init() has been called +suffix_map -- dictionary mapping suffixes to suffixes +encodings_map -- dictionary mapping suffixes to encodings +types_map -- dictionary mapping suffixes to types + +Functions: + +init([files]) -- parse a list of files, default knownfiles (on Windows, the + default values are taken from the registry) +read_mime_types(file) -- parse one file, return a dictionary or None +knownfilesinitedMimeTypesguess_typeguess_all_extensionsguess_extensionadd_typeread_mime_typessuffix_mapencodings_maptypes_mapcommon_types/etc/mime.types/etc/httpd/mime.types/etc/httpd/conf/mime.types/etc/apache/mime.types/etc/apache2/mime.types/usr/local/etc/httpd/conf/mime.types/usr/local/lib/netscape/mime.types/usr/local/etc/mime.types_dbMIME-types datastore. + + This datastore can handle information from mime.types-style files + and supports basic determination of MIME type from a filename or + URL, and can guess a reasonable extension given a MIME type. + _encodings_map_default_suffix_map_defaulttypes_map_inv_types_map_default_common_types_defaultAdd a mapping between a type and an extension. + + When the extension is already known, the new + type will replace the old one. When the type + is already known the extension will be added + to the list of known extensions. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + extsGuess the type of a file which is either a URL or a path-like object. + + Return value is a tuple (type, encoding) where type is None if + the type can't be guessed (no or unknown suffix) or a string + of the form type/subtype, usable for a MIME Content-type + header; and encoding is None for no encoding or the name of + the program used to encode (e.g. compress or gzip). The + mappings are table driven. Encoding suffixes are case + sensitive; type suffixes are first tried case sensitive, then + case insensitive. + + The suffixes .tgz, .taz and .tz (case sensitive!) are all + mapped to '.tar.gz'. (This is table-driven too, using the + dictionary suffix_map.) + + Optional `strict' argument when False adds a bunch of commonly found, + but non-standard types. + schemecommasemiGuess the extensions for a file based on its MIME type. + + Return value is a list of strings giving the possible filename + extensions, including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data stream, + but would be mapped to the MIME type `type' by guess_type(). + + Optional `strict' argument when false adds a bunch of commonly found, + but non-standard types. + Guess the extension for a file based on its MIME type. + + Return value is a string giving a filename extension, + including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data + stream, but would be mapped to the MIME type `type' by + guess_type(). If no extension can be guessed for `type', None + is returned. + + Optional `strict' argument when false adds a bunch of commonly found, + but non-standard types. + + Read a single mime.types-format file, specified by pathname. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + readfp + Read a single mime.types-format file. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + suffread_windows_registry + Load the MIME types database from Windows registry. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + enum_typesmimedbEnumKeyHKEY_CLASSES_ROOThkcrsubkeynamesubkeyQueryValueExContent TypemimetypedatatypeREG_SZGuess the type of a file based on its URL. + + Return value is a tuple (type, encoding) where type is None if the + type can't be guessed (no or unknown suffix) or a string of the + form type/subtype, usable for a MIME Content-type header; and + encoding is None for no encoding or the name of the program used + to encode (e.g. compress or gzip). The mappings are table + driven. Encoding suffixes are case sensitive; type suffixes are + first tried case sensitive, then case insensitive. + + The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped + to ".tar.gz". (This is table-driven too, using the dictionary + suffix_map). + + Optional `strict' argument when false adds a bunch of commonly found, but + non-standard types. + Guess the extensions for a file based on its MIME type. + + Return value is a list of strings giving the possible filename + extensions, including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data + stream, but would be mapped to the MIME type `type' by + guess_type(). If no extension can be guessed for `type', None + is returned. + + Optional `strict' argument when false adds a bunch of commonly found, + but non-standard types. + Guess the extension for a file based on its MIME type. + + Return value is a string giving a filename extension, including the + leading dot ('.'). The extension is not guaranteed to have been + associated with any particular data stream, but would be mapped to the + MIME type `type' by guess_type(). If no extension can be guessed for + `type', None is returned. + + Optional `strict' argument when false adds a bunch of commonly found, + but non-standard types. + Add a mapping between a type and an extension. + + When the extension is already known, the new + type will replace the old one. When the type + is already known the extension will be added + to the list of known extensions. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + db_default_mime_types.svg.gz.svgz.tar.gz.tgz.taz.tz.tar.bz2.tbz2.tar.xz.txz.Zbzip2.bz2xz.xzapplication/javascript.js.mjsapplication/json.jsonapplication/manifest+json.webmanifestapplication/msword.doc.dot.wizapplication/octet-stream.bin.dll.o.objapplication/oda.odaapplication/pdf.pdfapplication/pkcs7-mime.p7capplication/postscript.ps.ai.epsapplication/vnd.apple.mpegurl.m3u.m3u8application/vnd.ms-excel.xls.xlbapplication/vnd.ms-powerpoint.ppt.pot.ppa.pps.pwzapplication/wasm.wasmapplication/x-bcpio.bcpioapplication/x-cpio.cpioapplication/x-csh.cshapplication/x-dvi.dviapplication/x-gtar.gtarapplication/x-hdf.hdfapplication/x-hdf5.h5application/x-latex.latexapplication/x-mif.mifapplication/x-netcdf.cdf.ncapplication/x-pkcs12.p12.pfxapplication/x-pn-realaudio.ramapplication/x-python-code.pyoapplication/x-sh.shapplication/x-shar.sharapplication/x-shockwave-flash.swfapplication/x-sv4cpio.sv4cpioapplication/x-sv4crc.sv4crcapplication/x-tar.tarapplication/x-tcl.tclapplication/x-tex.texapplication/x-texinfo.texi.texinfoapplication/x-troff.roff.t.trapplication/x-troff-man.manapplication/x-troff-me.meapplication/x-troff-ms.msapplication/x-ustar.ustarapplication/x-wais-source.srcapplication/xml.xsl.rdf.wsdl.xpdlapplication/zip.zipaudio/basic.au.sndaudio/mpeg.mp3.mp2audio/x-aiff.aif.aifc.aiffaudio/x-pn-realaudio.raaudio/x-wav.wavimage/bmp.bmpimage/gif.gifimage/ief.iefimage/jpeg.jpg.jpe.jpegimage/png.pngimage/svg+xml.svgimage/tiff.tiff.tifimage/vnd.microsoft.icon.icoimage/x-cmu-raster.rasimage/x-ms-bmpimage/x-portable-anymap.pnmimage/x-portable-bitmap.pbmimage/x-portable-graymap.pgmimage/x-portable-pixmap.ppmimage/x-rgb.rgbimage/x-xbitmap.xbmimage/x-xpixmap.xpmimage/x-xwindowdump.xwd.eml.mht.mhtml.nwstext/css.csstext/csv.csvtext/html.html.htm.bat.h.ksh.pltext/richtext.rtxtext/tab-separated-values.tsvtext/x-pythontext/x-setext.etxtext/x-sgml.sgm.sgmltext/x-vcard.vcf.xmlvideo/mp4.mp4video/mpeg.mpeg.m1v.mpa.mpe.mpgvideo/quicktime.mov.qtvideo/webm.webmvideo/x-msvideo.avivideo/x-sgi-movie.movieapplication/rtf.rtfaudio/midi.midi.midimage/jpgimage/pict.pict.pct.pictext/xul.xulUsage: mimetypes.py [options] type + +Options: + --help / -h -- print this message and exit + --lenient / -l -- additionally search of some common, but non-standard + types. + --extension / -e -- guess extension instead of type + +More than one type argument may be given. +USAGEhlelenient-h--help--lenient--extensiongtypeguessI don't know anything about typetype:encoding:# Apache# Apache 1# Apache 2# Apache 1.2# Apache 1.3# dict for (non-strict, strict)# syntax of data URLs:# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data# mediatype := [ type "/" subtype ] *( ";" parameter )# data := *urlchar# parameter := attribute "=" value# type/subtype defaults to "text/plain"# bad data URL# never compressed, so encoding is None# Windows only# Only check file extensions# raises OSError if no 'Content Type' value# so that MimeTypes.__init__() doesn't call us again# Make the DB a global variable now that it is fully initialized# Before adding new types, make sure they are either registered with IANA,# at http://www.iana.org/assignments/media-types# or extensions, i.e. using the x- prefix# If you add to these, please keep them sorted by mime type.# Make sure the entry with the preferred file extension for a particular mime type# appears before any others of the same mimetype.# These are non-standard types, commonly found in the wild. They will# only match if strict=0 flag is given to the API methods.# Please sort these toob'Guess the MIME type of a file. + +This module defines two useful functions: + +guess_type(url, strict=True) -- guess the MIME type and encoding of a URL. + +guess_extension(type, strict=True) -- guess the extension for a given MIME type. + +It also contains the following, for tuning the behavior: + +Data: + +knownfiles -- list of files to parse +inited -- flag set when init() has been called +suffix_map -- dictionary mapping suffixes to suffixes +encodings_map -- dictionary mapping suffixes to encodings +types_map -- dictionary mapping suffixes to types + +Functions: + +init([files]) -- parse a list of files, default knownfiles (on Windows, the + default values are taken from the registry) +read_mime_types(file) -- parse one file, return a dictionary or None +'u'Guess the MIME type of a file. + +This module defines two useful functions: + +guess_type(url, strict=True) -- guess the MIME type and encoding of a URL. + +guess_extension(type, strict=True) -- guess the extension for a given MIME type. + +It also contains the following, for tuning the behavior: + +Data: + +knownfiles -- list of files to parse +inited -- flag set when init() has been called +suffix_map -- dictionary mapping suffixes to suffixes +encodings_map -- dictionary mapping suffixes to encodings +types_map -- dictionary mapping suffixes to types + +Functions: + +init([files]) -- parse a list of files, default knownfiles (on Windows, the + default values are taken from the registry) +read_mime_types(file) -- parse one file, return a dictionary or None +'b'knownfiles'u'knownfiles'b'inited'u'inited'b'MimeTypes'u'MimeTypes'b'guess_type'u'guess_type'b'guess_all_extensions'u'guess_all_extensions'b'guess_extension'u'guess_extension'b'add_type'u'add_type'b'init'u'init'b'read_mime_types'u'read_mime_types'b'suffix_map'u'suffix_map'b'encodings_map'u'encodings_map'b'types_map'u'types_map'b'common_types'u'common_types'b'/etc/mime.types'u'/etc/mime.types'b'/etc/httpd/mime.types'u'/etc/httpd/mime.types'b'/etc/httpd/conf/mime.types'u'/etc/httpd/conf/mime.types'b'/etc/apache/mime.types'u'/etc/apache/mime.types'b'/etc/apache2/mime.types'u'/etc/apache2/mime.types'b'/usr/local/etc/httpd/conf/mime.types'u'/usr/local/etc/httpd/conf/mime.types'b'/usr/local/lib/netscape/mime.types'u'/usr/local/lib/netscape/mime.types'b'/usr/local/etc/mime.types'u'/usr/local/etc/mime.types'b'MIME-types datastore. + + This datastore can handle information from mime.types-style files + and supports basic determination of MIME type from a filename or + URL, and can guess a reasonable extension given a MIME type. + 'u'MIME-types datastore. + + This datastore can handle information from mime.types-style files + and supports basic determination of MIME type from a filename or + URL, and can guess a reasonable extension given a MIME type. + 'b'Add a mapping between a type and an extension. + + When the extension is already known, the new + type will replace the old one. When the type + is already known the extension will be added + to the list of known extensions. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + 'u'Add a mapping between a type and an extension. + + When the extension is already known, the new + type will replace the old one. When the type + is already known the extension will be added + to the list of known extensions. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + 'b'Guess the type of a file which is either a URL or a path-like object. + + Return value is a tuple (type, encoding) where type is None if + the type can't be guessed (no or unknown suffix) or a string + of the form type/subtype, usable for a MIME Content-type + header; and encoding is None for no encoding or the name of + the program used to encode (e.g. compress or gzip). The + mappings are table driven. Encoding suffixes are case + sensitive; type suffixes are first tried case sensitive, then + case insensitive. + + The suffixes .tgz, .taz and .tz (case sensitive!) are all + mapped to '.tar.gz'. (This is table-driven too, using the + dictionary suffix_map.) + + Optional `strict' argument when False adds a bunch of commonly found, + but non-standard types. + 'u'Guess the type of a file which is either a URL or a path-like object. + + Return value is a tuple (type, encoding) where type is None if + the type can't be guessed (no or unknown suffix) or a string + of the form type/subtype, usable for a MIME Content-type + header; and encoding is None for no encoding or the name of + the program used to encode (e.g. compress or gzip). The + mappings are table driven. Encoding suffixes are case + sensitive; type suffixes are first tried case sensitive, then + case insensitive. + + The suffixes .tgz, .taz and .tz (case sensitive!) are all + mapped to '.tar.gz'. (This is table-driven too, using the + dictionary suffix_map.) + + Optional `strict' argument when False adds a bunch of commonly found, + but non-standard types. + 'b'Guess the extensions for a file based on its MIME type. + + Return value is a list of strings giving the possible filename + extensions, including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data stream, + but would be mapped to the MIME type `type' by guess_type(). + + Optional `strict' argument when false adds a bunch of commonly found, + but non-standard types. + 'u'Guess the extensions for a file based on its MIME type. + + Return value is a list of strings giving the possible filename + extensions, including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data stream, + but would be mapped to the MIME type `type' by guess_type(). + + Optional `strict' argument when false adds a bunch of commonly found, + but non-standard types. + 'b'Guess the extension for a file based on its MIME type. + + Return value is a string giving a filename extension, + including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data + stream, but would be mapped to the MIME type `type' by + guess_type(). If no extension can be guessed for `type', None + is returned. + + Optional `strict' argument when false adds a bunch of commonly found, + but non-standard types. + 'u'Guess the extension for a file based on its MIME type. + + Return value is a string giving a filename extension, + including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data + stream, but would be mapped to the MIME type `type' by + guess_type(). If no extension can be guessed for `type', None + is returned. + + Optional `strict' argument when false adds a bunch of commonly found, + but non-standard types. + 'b' + Read a single mime.types-format file, specified by pathname. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + 'u' + Read a single mime.types-format file, specified by pathname. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + 'b' + Read a single mime.types-format file. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + 'u' + Read a single mime.types-format file. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + 'b' + Load the MIME types database from Windows registry. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + 'u' + Load the MIME types database from Windows registry. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + 'b'Content Type'u'Content Type'b'Guess the type of a file based on its URL. + + Return value is a tuple (type, encoding) where type is None if the + type can't be guessed (no or unknown suffix) or a string of the + form type/subtype, usable for a MIME Content-type header; and + encoding is None for no encoding or the name of the program used + to encode (e.g. compress or gzip). The mappings are table + driven. Encoding suffixes are case sensitive; type suffixes are + first tried case sensitive, then case insensitive. + + The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped + to ".tar.gz". (This is table-driven too, using the dictionary + suffix_map). + + Optional `strict' argument when false adds a bunch of commonly found, but + non-standard types. + 'u'Guess the type of a file based on its URL. + + Return value is a tuple (type, encoding) where type is None if the + type can't be guessed (no or unknown suffix) or a string of the + form type/subtype, usable for a MIME Content-type header; and + encoding is None for no encoding or the name of the program used + to encode (e.g. compress or gzip). The mappings are table + driven. Encoding suffixes are case sensitive; type suffixes are + first tried case sensitive, then case insensitive. + + The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped + to ".tar.gz". (This is table-driven too, using the dictionary + suffix_map). + + Optional `strict' argument when false adds a bunch of commonly found, but + non-standard types. + 'b'Guess the extensions for a file based on its MIME type. + + Return value is a list of strings giving the possible filename + extensions, including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data + stream, but would be mapped to the MIME type `type' by + guess_type(). If no extension can be guessed for `type', None + is returned. + + Optional `strict' argument when false adds a bunch of commonly found, + but non-standard types. + 'u'Guess the extensions for a file based on its MIME type. + + Return value is a list of strings giving the possible filename + extensions, including the leading dot ('.'). The extension is not + guaranteed to have been associated with any particular data + stream, but would be mapped to the MIME type `type' by + guess_type(). If no extension can be guessed for `type', None + is returned. + + Optional `strict' argument when false adds a bunch of commonly found, + but non-standard types. + 'b'Guess the extension for a file based on its MIME type. + + Return value is a string giving a filename extension, including the + leading dot ('.'). The extension is not guaranteed to have been + associated with any particular data stream, but would be mapped to the + MIME type `type' by guess_type(). If no extension can be guessed for + `type', None is returned. + + Optional `strict' argument when false adds a bunch of commonly found, + but non-standard types. + 'u'Guess the extension for a file based on its MIME type. + + Return value is a string giving a filename extension, including the + leading dot ('.'). The extension is not guaranteed to have been + associated with any particular data stream, but would be mapped to the + MIME type `type' by guess_type(). If no extension can be guessed for + `type', None is returned. + + Optional `strict' argument when false adds a bunch of commonly found, + but non-standard types. + 'b'Add a mapping between a type and an extension. + + When the extension is already known, the new + type will replace the old one. When the type + is already known the extension will be added + to the list of known extensions. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + 'u'Add a mapping between a type and an extension. + + When the extension is already known, the new + type will replace the old one. When the type + is already known the extension will be added + to the list of known extensions. + + If strict is true, information will be added to + list of standard types, else to the list of non-standard + types. + 'b'.svg.gz'u'.svg.gz'b'.svgz'u'.svgz'b'.tar.gz'u'.tar.gz'b'.tgz'u'.tgz'b'.taz'u'.taz'b'.tz'u'.tz'b'.tar.bz2'u'.tar.bz2'b'.tbz2'u'.tbz2'b'.tar.xz'u'.tar.xz'b'.txz'u'.txz'b'.Z'u'.Z'b'bzip2'u'bzip2'b'.bz2'u'.bz2'b'xz'u'xz'b'.xz'u'.xz'b'application/javascript'u'application/javascript'b'.js'u'.js'b'.mjs'u'.mjs'b'application/json'u'application/json'b'.json'u'.json'b'application/manifest+json'u'application/manifest+json'b'.webmanifest'u'.webmanifest'b'application/msword'u'application/msword'b'.doc'u'.doc'b'.dot'u'.dot'b'.wiz'u'.wiz'b'application/octet-stream'u'application/octet-stream'b'.bin'u'.bin'b'.dll'u'.dll'b'.o'u'.o'b'.obj'u'.obj'b'application/oda'u'application/oda'b'.oda'u'.oda'b'application/pdf'u'application/pdf'b'.pdf'u'.pdf'b'application/pkcs7-mime'u'application/pkcs7-mime'b'.p7c'u'.p7c'b'application/postscript'u'application/postscript'b'.ps'u'.ps'b'.ai'u'.ai'b'.eps'u'.eps'b'application/vnd.apple.mpegurl'u'application/vnd.apple.mpegurl'b'.m3u'u'.m3u'b'.m3u8'u'.m3u8'b'application/vnd.ms-excel'u'application/vnd.ms-excel'b'.xls'u'.xls'b'.xlb'u'.xlb'b'application/vnd.ms-powerpoint'u'application/vnd.ms-powerpoint'b'.ppt'u'.ppt'b'.pot'u'.pot'b'.ppa'u'.ppa'b'.pps'u'.pps'b'.pwz'u'.pwz'b'application/wasm'u'application/wasm'b'.wasm'u'.wasm'b'application/x-bcpio'u'application/x-bcpio'b'.bcpio'u'.bcpio'b'application/x-cpio'u'application/x-cpio'b'.cpio'u'.cpio'b'application/x-csh'u'application/x-csh'b'.csh'u'.csh'b'application/x-dvi'u'application/x-dvi'b'.dvi'u'.dvi'b'application/x-gtar'u'application/x-gtar'b'.gtar'u'.gtar'b'application/x-hdf'u'application/x-hdf'b'.hdf'u'.hdf'b'application/x-hdf5'u'application/x-hdf5'b'.h5'u'.h5'b'application/x-latex'u'application/x-latex'b'.latex'u'.latex'b'application/x-mif'u'application/x-mif'b'.mif'u'.mif'b'application/x-netcdf'u'application/x-netcdf'b'.cdf'u'.cdf'b'.nc'u'.nc'b'application/x-pkcs12'u'application/x-pkcs12'b'.p12'u'.p12'b'.pfx'u'.pfx'b'application/x-pn-realaudio'u'application/x-pn-realaudio'b'.ram'u'.ram'b'application/x-python-code'u'application/x-python-code'b'.pyo'u'.pyo'b'application/x-sh'u'application/x-sh'b'.sh'u'.sh'b'application/x-shar'u'application/x-shar'b'.shar'u'.shar'b'application/x-shockwave-flash'u'application/x-shockwave-flash'b'.swf'u'.swf'b'application/x-sv4cpio'u'application/x-sv4cpio'b'.sv4cpio'u'.sv4cpio'b'application/x-sv4crc'u'application/x-sv4crc'b'.sv4crc'u'.sv4crc'b'application/x-tar'u'application/x-tar'b'.tar'u'.tar'b'application/x-tcl'u'application/x-tcl'b'.tcl'u'.tcl'b'application/x-tex'u'application/x-tex'b'.tex'u'.tex'b'application/x-texinfo'u'application/x-texinfo'b'.texi'u'.texi'b'.texinfo'u'.texinfo'b'application/x-troff'u'application/x-troff'b'.roff'u'.roff'b'.t'u'.t'b'.tr'u'.tr'b'application/x-troff-man'u'application/x-troff-man'b'.man'u'.man'b'application/x-troff-me'u'application/x-troff-me'b'.me'u'.me'b'application/x-troff-ms'u'application/x-troff-ms'b'.ms'u'.ms'b'application/x-ustar'u'application/x-ustar'b'.ustar'u'.ustar'b'application/x-wais-source'u'application/x-wais-source'b'.src'u'.src'b'application/xml'u'application/xml'b'.xsl'u'.xsl'b'.rdf'u'.rdf'b'.wsdl'u'.wsdl'b'.xpdl'u'.xpdl'b'application/zip'u'application/zip'b'.zip'u'.zip'b'audio/basic'u'audio/basic'b'.au'u'.au'b'.snd'u'.snd'b'audio/mpeg'u'audio/mpeg'b'.mp3'u'.mp3'b'.mp2'u'.mp2'b'audio/x-aiff'u'audio/x-aiff'b'.aif'u'.aif'b'.aifc'u'.aifc'b'.aiff'u'.aiff'b'audio/x-pn-realaudio'u'audio/x-pn-realaudio'b'.ra'u'.ra'b'audio/x-wav'u'audio/x-wav'b'.wav'u'.wav'b'image/bmp'u'image/bmp'b'.bmp'u'.bmp'b'image/gif'u'image/gif'b'.gif'u'.gif'b'image/ief'u'image/ief'b'.ief'u'.ief'b'image/jpeg'u'image/jpeg'b'.jpg'u'.jpg'b'.jpe'u'.jpe'b'.jpeg'u'.jpeg'b'image/png'u'image/png'b'.png'u'.png'b'image/svg+xml'u'image/svg+xml'b'.svg'u'.svg'b'image/tiff'u'image/tiff'b'.tiff'u'.tiff'b'.tif'u'.tif'b'image/vnd.microsoft.icon'u'image/vnd.microsoft.icon'b'.ico'u'.ico'b'image/x-cmu-raster'u'image/x-cmu-raster'b'.ras'u'.ras'b'image/x-ms-bmp'u'image/x-ms-bmp'b'image/x-portable-anymap'u'image/x-portable-anymap'b'.pnm'u'.pnm'b'image/x-portable-bitmap'u'image/x-portable-bitmap'b'.pbm'u'.pbm'b'image/x-portable-graymap'u'image/x-portable-graymap'b'.pgm'u'.pgm'b'image/x-portable-pixmap'u'image/x-portable-pixmap'b'.ppm'u'.ppm'b'image/x-rgb'u'image/x-rgb'b'.rgb'u'.rgb'b'image/x-xbitmap'u'image/x-xbitmap'b'.xbm'u'.xbm'b'image/x-xpixmap'u'image/x-xpixmap'b'.xpm'u'.xpm'b'image/x-xwindowdump'u'image/x-xwindowdump'b'.xwd'u'.xwd'b'.eml'u'.eml'b'.mht'u'.mht'b'.mhtml'u'.mhtml'b'.nws'u'.nws'b'text/css'u'text/css'b'.css'u'.css'b'text/csv'u'text/csv'b'.csv'u'.csv'b'text/html'u'text/html'b'.html'u'.html'b'.htm'u'.htm'b'.bat'u'.bat'b'.h'u'.h'b'.ksh'u'.ksh'b'.pl'u'.pl'b'text/richtext'u'text/richtext'b'.rtx'u'.rtx'b'text/tab-separated-values'u'text/tab-separated-values'b'.tsv'u'.tsv'b'text/x-python'u'text/x-python'b'text/x-setext'u'text/x-setext'b'.etx'u'.etx'b'text/x-sgml'u'text/x-sgml'b'.sgm'u'.sgm'b'.sgml'u'.sgml'b'text/x-vcard'u'text/x-vcard'b'.vcf'u'.vcf'b'.xml'u'.xml'b'video/mp4'u'video/mp4'b'.mp4'u'.mp4'b'video/mpeg'u'video/mpeg'b'.mpeg'u'.mpeg'b'.m1v'u'.m1v'b'.mpa'u'.mpa'b'.mpe'u'.mpe'b'.mpg'u'.mpg'b'video/quicktime'u'video/quicktime'b'.mov'u'.mov'b'.qt'u'.qt'b'video/webm'u'video/webm'b'.webm'u'.webm'b'video/x-msvideo'u'video/x-msvideo'b'.avi'u'.avi'b'video/x-sgi-movie'u'video/x-sgi-movie'b'.movie'u'.movie'b'application/rtf'u'application/rtf'b'.rtf'u'.rtf'b'audio/midi'u'audio/midi'b'.midi'u'.midi'b'.mid'u'.mid'b'image/jpg'u'image/jpg'b'image/pict'u'image/pict'b'.pict'u'.pict'b'.pct'u'.pct'b'.pic'u'.pic'b'text/xul'u'text/xul'b'.xul'u'.xul'b'Usage: mimetypes.py [options] type + +Options: + --help / -h -- print this message and exit + --lenient / -l -- additionally search of some common, but non-standard + types. + --extension / -e -- guess extension instead of type + +More than one type argument may be given. +'u'Usage: mimetypes.py [options] type + +Options: + --help / -h -- print this message and exit + --lenient / -l -- additionally search of some common, but non-standard + types. + --extension / -e -- guess extension instead of type + +More than one type argument may be given. +'b'hle'u'hle'b'lenient'u'lenient'b'extension'u'extension'b'-h'u'-h'b'--help'u'--help'b'--lenient'u'--lenient'b'--extension'u'--extension'b'I don't know anything about type'u'I don't know anything about type'b'type:'u'type:'b'encoding:'u'encoding:'u'mimetypes'An object-oriented interface to .netrc files.shlexNetrcParseErrorException raised on syntax errors in the .netrc file.%s (%s, line %s)default_netrc.netrclexerwordchars!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~commenterssaved_linenoget_tokeninstreamentrynamemacdef +bad toplevel token %raccountpush_tokenmalformed %s entry %s terminated by %sst_uidgetuidgetpwuidfowneruid %s~/.netrc file owner (%s) does not match current user (%s)"~/.netrc file owner (%s) does not match"" current user (%s)"~/.netrc access too permissive: access permissions must restrict access to only the owner"~/.netrc access too permissive: access"" permissions must restrict access to only"" the owner"bad follower token %rReturn a (user, account, password) tuple for given host.Dump the class data in the format of a .netrc file.repmachine + login account password macdef # Module and documentation by Eric S. Raymond, 21 Dec 1998# Look for a machine, default, or macdef top-level keyword# Just skip to end of macdefs# We're looking at start of an entry for a named machine or default.b'An object-oriented interface to .netrc files.'u'An object-oriented interface to .netrc files.'b'netrc'u'netrc'b'NetrcParseError'u'NetrcParseError'b'Exception raised on syntax errors in the .netrc file.'u'Exception raised on syntax errors in the .netrc file.'b'%s (%s, line %s)'u'%s (%s, line %s)'u'~'b'.netrc'u'.netrc'b'!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~'u'!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~'b'machine'u'machine'b'macdef'u'macdef'b' +'u' +'b'bad toplevel token %r'u'bad toplevel token %r'b'malformed %s entry %s terminated by %s'u'malformed %s entry %s terminated by %s'b'login'u'login'b'account'u'account'b'password'u'password'b'uid %s'u'uid %s'b'~/.netrc file owner (%s) does not match current user (%s)'u'~/.netrc file owner (%s) does not match current user (%s)'b'~/.netrc access too permissive: access permissions must restrict access to only the owner'u'~/.netrc access too permissive: access permissions must restrict access to only the owner'b'bad follower token %r'u'bad follower token %r'b'Return a (user, account, password) tuple for given host.'u'Return a (user, account, password) tuple for given host.'b'Dump the class data in the format of a .netrc file.'u'Dump the class data in the format of a .netrc file.'b'machine 'u'machine 'b' + login 'u' + login 'b' account 'u' account 'b' password 'u' password 'b'macdef 'u'macdef 'An NNTP client class based on: +- RFC 977: Network News Transfer Protocol +- RFC 2980: Common NNTP Extensions +- RFC 3977: Network News Transfer Protocol (version 2) + +Example: + +>>> from nntplib import NNTP +>>> s = NNTP('news') +>>> resp, count, first, last, name = s.group('comp.lang.python') +>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last) +Group comp.lang.python has 51 articles, range 5770 to 5821 +>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last)) +>>> resp = s.quit() +>>> + +Here 'resp' is the server response line. +Error responses are turned into exceptions. + +To post an article from a file: +>>> f = open(filename, 'rb') # file containing article, including header +>>> resp = s.post(f) +>>> + +For descriptions of all methods, read the comments in the code below. +Note that all arguments and return values representing article numbers +are strings, not numbers, since they are rarely used for calculations. +_have_sslemail.header_email_decode_headerNNTPNNTPErrorNNTPReplyErrorNNTPPermanentErrorNNTPProtocolErrorNNTPDataErrorBase class for all nntplib exceptionsNo response givenUnexpected [123]xx reply4xx errors5xx errorsResponse does not begin with [1-5]Error in response data119NNTP_PORT563NNTP_SSL_PORT282_LONGRESPmessage-idreferences:bytes:lines_DEFAULT_OVERVIEW_FMT_OVERVIEW_FMT_ALTERNATIVES_CRLFGroupInfoArticleInfomessage_idTakes a unicode string representing a munged header value + and decodes it as a (possibly non-ASCII) readable value._parse_overview_fmtParse a list of string representing the response to LIST OVERVIEW.FMT + and return a list of header/metadata names. + Raises NNTPDataError if the response is not compliant + (cf. RFC 3977, section 8.4).LIST OVERVIEW.FMT response too shortLIST OVERVIEW.FMT redefines default fields_parse_overviewdata_process_funcParse the response to an OVER or XOVER command according to the + overview format `fmt`.n_defaultsoverviewarticle_numberfield_nameis_metadataOVER/XOVER response doesn't include names of additional headers"OVER/XOVER response doesn't include ""names of additional headers"_parse_datetimedate_strtime_strParse a pair of (date, time) strings, and return a datetime object. + If only the date is given, it is assumed to be date and time + concatenated together (e.g. response to the DATE command). + _unparse_datetimelegacyFormat a date or datetime object as a pair of (date, time) strings + in the format required by the NEWNEWS and NEWGROUPS commands. If a + date object is passed, the time is assumed to be midnight (00h00). + + The returned representation depends on the legacy flag: + * if legacy is False (the default): + date has the YYYYMMDD format and time the HHMMSS format + * if legacy is True: + date has the YYMMDD format and time the HHMMSS format. + RFC 3977 compliant servers should understand both formats; therefore, + legacy is only needed when talking to old servers. + 000000{0.hour:02d}{0.minute:02d}{0.second:02d}{0:02d}{1.month:02d}{1.day:02d}{0:04d}{1.month:02d}{1.day:02d}_encrypt_onhostnameWrap a socket in SSL/TLS. Arguments: + - sock: Socket to wrap + - context: SSL context to use for the encrypted connection + Returns: + - sock: New, encrypted socket. + _NNTPBasereadermodeInitialize an instance. Arguments: + - file: file-like object (open for read/write in binary mode) + - host: hostname of the server + - readermode: if true, send 'mode reader' command after + connecting. + - timeout: timeout (in seconds) used for socket connections + + readermode is sometimes necessary if you are connecting to an + NNTP server on the local machine and intend to call + reader-specific commands, such as `group'. If you get + unexpected NNTPPermanentErrors, you might need to set + readermode. + _getresp_capsgetcapabilitiesreadermode_afterauthREADER_setreadermodetls_onauthenticatedis_connectedGet the welcome message from the server + (this is read and squirreled away by __init__()). + If the response code is 200, posting is allowed; + if it 201, posting is not allowed.Get the server capabilities, as read by __init__(). + If the CAPABILITIES command is not supported, an empty dict is + returned.nntp_versionnntp_implementationcapabilitiescapsIMPLEMENTATIONSet the debugging level. Argument 'level' means: + 0: no debugging output (default) + 1: print commands and responses but not body text etc. + 2: also print raw lines read and sent before stripping CR/LF_putlineInternal: send one line to the server, appending CRLF. + The `line` must be a bytes-like object.nntplib.putline_putcmdInternal: send one command to the server (through _putline()). + The `line` must be a unicode string._getlinestrip_crlfInternal: return one line from the server, stripping _CRLF. + Raise EOFError if the connection is closed. + Returns a bytes object.line too longInternal: get a response from the server. + Raise various errors if the response indicates an error. + Returns a unicode string.123_getlongrespInternal: get a response plus following text from the server. + Raise various errors if the response indicates an error. + + Returns a (response, lines) tuple where `response` is a unicode + string and `lines` is a list of bytes objects. + If `file` is a file-like object, it must be open in binary mode. + openedFile. +terminators_shortcmdInternal: send a command and get the response. + Same return value as _getresp()._longcmdInternal: send a command and get the response plus following text. + Same return value as _getlongresp()._longcmdstringInternal: send a command and get the response plus following text. + Same as _longcmd() and _getlongresp(), except that the returned `lines` + are unicode strings rather than bytes objects. + _getoverviewfmtInternal: get the overview format. Queries the server if not + already done, else returns the cached value._cachedoverviewfmtLIST OVERVIEW.FMT_grouplistProcess a CAPABILITIES command. Not supported by all servers. + Return: + - resp: server response if successful + - caps: a dictionary mapping capability names to lists of tokens + (for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] }) + CAPABILITIESnewgroupsProcess a NEWGROUPS command. Arguments: + - date: a date or datetime object + Return: + - resp: server response if successful + - list: list of newsgroup names + the date parameter must be a date or datetime object, not '{:40}'"the date parameter must be a date or datetime object, ""not '{:40}'"NEWGROUPS {0} {1}newnewsProcess a NEWNEWS command. Arguments: + - group: group name or '*' + - date: a date or datetime object + Return: + - resp: server response if successful + - list: list of message ids + NEWNEWS {0} {1} {2}group_patternProcess a LIST or LIST ACTIVE command. Arguments: + - group_pattern: a pattern indicating which groups to query + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of (group, last, first, flag) (strings) + LIST ACTIVE _getdescriptionsreturn_all^(?P[^ ]+)[ ]+(.*)$line_patLIST NEWSGROUPS XGTITLE raw_lineGet a description for a single group. If more than one + group matches ('group' is a pattern), return the first. If no + group matches, return an empty string. + + This elides the response code from the server, since it can + only be '215' or '285' (for xgtitle) anyway. If the response + code is needed, use the 'descriptions' method. + + NOTE: This neither checks for a wildcard in 'group' nor does + it check whether the group actually exists.descriptionsGet descriptions for a range of groups.Process a GROUP command. Argument: + - group: the group name + Returns: + - resp: server response if successful + - count: number of articles + - first: first article number + - last: last article number + - name: the group name + GROUP Process a HELP command. Argument: + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of strings returned by the server in response to the + HELP command + HELP_statparseInternal: parse the response line of a STAT, NEXT, LAST, + ARTICLE, HEAD or BODY command.art_num_statcmdInternal: process a STAT, NEXT or LAST command.message_specProcess a STAT command. Argument: + - message_spec: article number or message id (if not specified, + the current article is selected) + Returns: + - resp: server response if successful + - art_num: the article number + - message_id: the message id + STAT {0}STATProcess a NEXT command. No arguments. Return as for STAT.Process a LAST command. No arguments. Return as for STAT._artcmdInternal: process a HEAD, BODY or ARTICLE command.Process a HEAD command. Argument: + - message_spec: article number or message id + - file: filename string or file object to store the headers in + Returns: + - resp: server response if successful + - ArticleInfo: (article number, message id, list of header lines) + HEAD {0}Process a BODY command. Argument: + - message_spec: article number or message id + - file: filename string or file object to store the body in + Returns: + - resp: server response if successful + - ArticleInfo: (article number, message id, list of body lines) + BODY {0}BODYarticleProcess an ARTICLE command. Argument: + - message_spec: article number or message id + - file: filename string or file object to store the article in + Returns: + - resp: server response if successful + - ArticleInfo: (article number, message id, list of article lines) + ARTICLE {0}ARTICLEslaveProcess a SLAVE command. Returns: + - resp: server response if successful + SLAVExhdrProcess an XHDR command (optional server extension). Arguments: + - hdr: the header type (e.g. 'subject') + - str: an article nr, a message id, or a range nr1-nr2 + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of (nr, value) strings + ^([0-9]+) ?(.*) +?XHDR {0} {1}remove_numberxoverProcess an XOVER command (optional server extension) Arguments: + - start: start of range + - end: end of range + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of dicts containing the response fields + XOVER {0}-{1}overProcess an OVER command. If the command isn't supported, fall + back to XOVER. Arguments: + - message_spec: + - either a message id, indicating the article to fetch + information about + - or a (start, end) tuple, indicating a range of article numbers; + if end is None, information up to the newest message will be + retrieved + - or None, indicating the current article number must be used + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of dicts containing the response fields + + NOTE: the "message id" form isn't supported by XOVER + OVERXOVER {0}-{1}xgtitleProcess an XGTITLE command (optional server extension) Arguments: + - group: group name wildcard (i.e. news.*) + Returns: + - resp: server response if successful + - list: list of (name,title) stringsThe XGTITLE extension is not actively used, use descriptions() instead"The XGTITLE extension is not actively used, ""use descriptions() instead"^([^ ]+)[ ]+(.*)$raw_linesxpathProcess an XPATH command (optional server extension) Arguments: + - id: Message id of article + Returns: + resp: server response if successful + path: directory path to article + The XPATH extension is not actively usedXPATH {0}resp_numProcess the DATE command. + Returns: + - resp: server response if successful + - date: datetime object + DATE_post. +Process a POST command. Arguments: + - data: bytes object, iterable or file containing the article + Returns: + - resp: server response if successfulihaveProcess an IHAVE command. Arguments: + - message_id: message-id of the article + - data: file containing the article + Returns: + - resp: server response if successful + Note that if the server refuses the article an exception is raised.IHAVE {0}Process a QUIT command and close the socket. Returns: + - resp: server response if successfulusenetrcAlready logged in.At least one of `user` and `usenetrc` must be specifiedauthinfo user 381authinfo pass 281mode reader480Process a STARTTLS command. Arguments: + - context: SSL context to use for the encrypted connection + TLS is already enabled.TLS cannot be started after authentication.STARTTLS382rwbTLS failed to start.Initialize an instance. Arguments: + - host: hostname to connect to + - port: port to connect to (default the standard NNTP port) + - user: username to authenticate with + - password: password to use with username + - readermode: if true, send 'mode reader' command after + connecting. + - usenetrc: allow loading username and password from ~/.netrc file + if not specified explicitly + - timeout: timeout (in seconds) used for socket connections + + readermode is sometimes necessary if you are connecting to an + NNTP server on the local machine and intend to call + reader-specific commands, such as `group'. If you get + unexpected NNTPPermanentErrors, you might need to set + readermode. + nntplib.connectNNTP_SSLThis works identically to NNTP.__init__, except for the change + in default port and the `ssl_context` argument for SSL connections. + nntplib built-in demo - display the latest articles in a newsgroup--groupgmane.comp.python.generalgroup to fetch messages from (default: %(default)s)--servernews.gmane.ioNNTP server hostname (default: %(default)s)--portNNTP port number (default: %s / %s)-n--nb-articlesnumber of articles to fetch (default: %(default)s)-S--ssluse NNTP over SSLGrouphasarticles, rangecutlimnb_articlesoverviewsartnumauthor{:7} {:20} {:42} ({})# RFC 977 by Brian Kantor and Phil Lapsley.# xover, xgtitle, xpath, date methods by Kevan Heydon# Incompatible changes from the 2.x nntplib:# - all commands are encoded as UTF-8 data (using the "surrogateescape"# error handler), except for raw message data (POST, IHAVE)# - all responses are decoded as UTF-8 data (using the "surrogateescape"# error handler), except for raw message data (ARTICLE, HEAD, BODY)# - the `file` argument to various methods is keyword-only# - NNTP.date() returns a datetime object# - NNTP.newgroups() and NNTP.newnews() take a datetime (or date) object,# rather than a pair of (date, time) strings.# - NNTP.newgroups() and NNTP.list() return a list of GroupInfo named tuples# - NNTP.descriptions() returns a dict mapping group names to descriptions# - NNTP.xover() returns a list of dicts mapping field names (header or metadata)# to field values; each dict representing a message overview.# - NNTP.article(), NNTP.head() and NNTP.body() return a (response, ArticleInfo)# tuple.# - the "internal" methods have been marked private (they now start with# an underscore)# Other changes from the 2.x/3.1 nntplib:# - automatic querying of capabilities at connect# - New method NNTP.getcapabilities()# - New method NNTP.over()# - New helper function decode_header()# - NNTP.post() and NNTP.ihave() accept file objects, bytes-like objects and# arbitrary iterables yielding lines.# - An extensive test suite :-)# - return structured data (GroupInfo etc.) everywhere# - support HDR# Imports# maximal line length when calling readline(). This is to prevent# reading arbitrary length lines. RFC 3977 limits NNTP line length to# 512 characters, including CRLF. We have selected 2048 just to be on# the safe side.# Exceptions raised when an error or invalid response is received# Standard port used by NNTP servers# Response numbers that are followed by additional text (e.g. article)# HELP# CAPABILITIES# LISTGROUP (also not multi-line with GROUP)# LIST# ARTICLE# HEAD, XHDR# BODY# OVER, XOVER# HDR# NEWNEWS# NEWGROUPS# XGTITLE# Default decoded value for LIST OVERVIEW.FMT if not supported# Alternative names allowed in LIST OVERVIEW.FMT response# Helper function(s)# Metadata name (e.g. ":bytes")# Header name (e.g. "Subject:" or "Xref:full")# Should we do something with the suffix?# XXX should we raise an error? Some servers might not# support LIST OVERVIEW.FMT and still return additional# headers.# Non-default header names are included in full in the response# (unless the field is totally empty)# RFC 3977 doesn't say how to interpret 2-char years. Assume that# there are no dates before 1970 on Usenet.# Generate a default SSL context if none was passed.# The classes themselves# UTF-8 is the character set for all NNTP commands and responses: they# are automatically encoded (when sending) and decoded (and receiving)# by this class.# However, some multi-line data blocks can contain arbitrary bytes (for# example, latin-1 or utf-16 data in the body of a message). Commands# taking (POST, IHAVE) or returning (HEAD, BODY, ARTICLE) raw message# data will therefore only accept and produce bytes objects.# Furthermore, since there could be non-compliant servers out there,# we use 'surrogateescape' as the error handler for fault tolerance# and easy round-tripping. This could be useful for some applications# (e.g. NNTP gateways).# Inquire about capabilities (RFC 3977).# 'MODE READER' is sometimes necessary to enable 'reader' mode.# However, the order in which 'MODE READER' and 'AUTHINFO' need to# arrive differs between some NNTP servers. If _setreadermode() fails# with an authorization failed error, it will set this to True;# the login() routine will interpret that as a request to try again# after performing its normal function.# Enable only if we're not already in READER mode anyway.# Capabilities might have changed after MODE READER# RFC 4642 2.2.2: Both the client and the server MUST know if there is# a TLS session active. A client MUST NOT attempt to start a TLS# session if a TLS session is already active.# Log in and encryption setup order is left to subclasses.# Server doesn't support capabilities# The server can advertise several supported versions,# choose the highest.# If a string was passed then open a file with that name# XXX lines = None instead?# If this method created the file, then it must close it# Not supported by server?# Parse lines into "group last first flag"# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first# Now the deprecated XGTITLE. This either raises an error# or succeeds with the same output structure as LIST# NEWSGROUPS.# Nothing found# Raises a specific exception if posting is not allowed# We don't use _putline() because:# - we don't want additional CRLF if the file or iterable is already# in the right format# - we don't want a spurious flush() after each line is written# If no login/password was specified but netrc was requested,# try to get them from ~/.netrc# Presume that if .netrc has an entry, NNRP authentication is required.# Perform NNTP authentication if needed.# Capabilities might have changed after login# Attempt to send mode reader if it was requested after login.# Only do so if we're not in reader mode already.# Error 5xx, probably 'not implemented'# Need authorization before 'mode reader'# Per RFC 4642, STARTTLS MUST NOT be sent after authentication or if# a TLS session already exists.# Capabilities may change after TLS starts up, so ask for them# again.# Test retrieval when run as a script.b'An NNTP client class based on: +- RFC 977: Network News Transfer Protocol +- RFC 2980: Common NNTP Extensions +- RFC 3977: Network News Transfer Protocol (version 2) + +Example: + +>>> from nntplib import NNTP +>>> s = NNTP('news') +>>> resp, count, first, last, name = s.group('comp.lang.python') +>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last) +Group comp.lang.python has 51 articles, range 5770 to 5821 +>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last)) +>>> resp = s.quit() +>>> + +Here 'resp' is the server response line. +Error responses are turned into exceptions. + +To post an article from a file: +>>> f = open(filename, 'rb') # file containing article, including header +>>> resp = s.post(f) +>>> + +For descriptions of all methods, read the comments in the code below. +Note that all arguments and return values representing article numbers +are strings, not numbers, since they are rarely used for calculations. +'u'An NNTP client class based on: +- RFC 977: Network News Transfer Protocol +- RFC 2980: Common NNTP Extensions +- RFC 3977: Network News Transfer Protocol (version 2) + +Example: + +>>> from nntplib import NNTP +>>> s = NNTP('news') +>>> resp, count, first, last, name = s.group('comp.lang.python') +>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last) +Group comp.lang.python has 51 articles, range 5770 to 5821 +>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last)) +>>> resp = s.quit() +>>> + +Here 'resp' is the server response line. +Error responses are turned into exceptions. + +To post an article from a file: +>>> f = open(filename, 'rb') # file containing article, including header +>>> resp = s.post(f) +>>> + +For descriptions of all methods, read the comments in the code below. +Note that all arguments and return values representing article numbers +are strings, not numbers, since they are rarely used for calculations. +'b'NNTP'u'NNTP'b'NNTPError'u'NNTPError'b'NNTPReplyError'u'NNTPReplyError'b'NNTPTemporaryError'u'NNTPTemporaryError'b'NNTPPermanentError'u'NNTPPermanentError'b'NNTPProtocolError'u'NNTPProtocolError'b'NNTPDataError'u'NNTPDataError'b'Base class for all nntplib exceptions'u'Base class for all nntplib exceptions'b'No response given'u'No response given'b'Unexpected [123]xx reply'u'Unexpected [123]xx reply'b'4xx errors'u'4xx errors'b'5xx errors'u'5xx errors'b'Response does not begin with [1-5]'u'Response does not begin with [1-5]'b'Error in response data'u'Error in response data'b'100'u'100'b'101'u'101'b'211'u'211'b'215'u'215'b'220'u'220'b'221'u'221'b'222'u'222'b'224'u'224'b'230'u'230'b'282'u'282'b'subject'u'subject'b'date'u'date'b'message-id'u'message-id'b'references'u'references'b':bytes'u':bytes'b':lines'u':lines'b'bytes'u'bytes'b'lines'u'lines'b'GroupInfo'u'GroupInfo'b'flag'u'flag'b'ArticleInfo'u'ArticleInfo'b'number'u'number'b'message_id'u'message_id'b'Takes a unicode string representing a munged header value + and decodes it as a (possibly non-ASCII) readable value.'u'Takes a unicode string representing a munged header value + and decodes it as a (possibly non-ASCII) readable value.'b'Parse a list of string representing the response to LIST OVERVIEW.FMT + and return a list of header/metadata names. + Raises NNTPDataError if the response is not compliant + (cf. RFC 3977, section 8.4).'u'Parse a list of string representing the response to LIST OVERVIEW.FMT + and return a list of header/metadata names. + Raises NNTPDataError if the response is not compliant + (cf. RFC 3977, section 8.4).'b'LIST OVERVIEW.FMT response too short'u'LIST OVERVIEW.FMT response too short'b'LIST OVERVIEW.FMT redefines default fields'u'LIST OVERVIEW.FMT redefines default fields'b'Parse the response to an OVER or XOVER command according to the + overview format `fmt`.'u'Parse the response to an OVER or XOVER command according to the + overview format `fmt`.'b'OVER/XOVER response doesn't include names of additional headers'u'OVER/XOVER response doesn't include names of additional headers'b'Parse a pair of (date, time) strings, and return a datetime object. + If only the date is given, it is assumed to be date and time + concatenated together (e.g. response to the DATE command). + 'u'Parse a pair of (date, time) strings, and return a datetime object. + If only the date is given, it is assumed to be date and time + concatenated together (e.g. response to the DATE command). + 'b'Format a date or datetime object as a pair of (date, time) strings + in the format required by the NEWNEWS and NEWGROUPS commands. If a + date object is passed, the time is assumed to be midnight (00h00). + + The returned representation depends on the legacy flag: + * if legacy is False (the default): + date has the YYYYMMDD format and time the HHMMSS format + * if legacy is True: + date has the YYMMDD format and time the HHMMSS format. + RFC 3977 compliant servers should understand both formats; therefore, + legacy is only needed when talking to old servers. + 'u'Format a date or datetime object as a pair of (date, time) strings + in the format required by the NEWNEWS and NEWGROUPS commands. If a + date object is passed, the time is assumed to be midnight (00h00). + + The returned representation depends on the legacy flag: + * if legacy is False (the default): + date has the YYYYMMDD format and time the HHMMSS format + * if legacy is True: + date has the YYMMDD format and time the HHMMSS format. + RFC 3977 compliant servers should understand both formats; therefore, + legacy is only needed when talking to old servers. + 'b'000000'u'000000'b'{0.hour:02d}{0.minute:02d}{0.second:02d}'u'{0.hour:02d}{0.minute:02d}{0.second:02d}'b'{0:02d}{1.month:02d}{1.day:02d}'u'{0:02d}{1.month:02d}{1.day:02d}'b'{0:04d}{1.month:02d}{1.day:02d}'u'{0:04d}{1.month:02d}{1.day:02d}'b'Wrap a socket in SSL/TLS. Arguments: + - sock: Socket to wrap + - context: SSL context to use for the encrypted connection + Returns: + - sock: New, encrypted socket. + 'u'Wrap a socket in SSL/TLS. Arguments: + - sock: Socket to wrap + - context: SSL context to use for the encrypted connection + Returns: + - sock: New, encrypted socket. + 'b'Initialize an instance. Arguments: + - file: file-like object (open for read/write in binary mode) + - host: hostname of the server + - readermode: if true, send 'mode reader' command after + connecting. + - timeout: timeout (in seconds) used for socket connections + + readermode is sometimes necessary if you are connecting to an + NNTP server on the local machine and intend to call + reader-specific commands, such as `group'. If you get + unexpected NNTPPermanentErrors, you might need to set + readermode. + 'u'Initialize an instance. Arguments: + - file: file-like object (open for read/write in binary mode) + - host: hostname of the server + - readermode: if true, send 'mode reader' command after + connecting. + - timeout: timeout (in seconds) used for socket connections + + readermode is sometimes necessary if you are connecting to an + NNTP server on the local machine and intend to call + reader-specific commands, such as `group'. If you get + unexpected NNTPPermanentErrors, you might need to set + readermode. + 'b'READER'u'READER'b'Get the welcome message from the server + (this is read and squirreled away by __init__()). + If the response code is 200, posting is allowed; + if it 201, posting is not allowed.'u'Get the welcome message from the server + (this is read and squirreled away by __init__()). + If the response code is 200, posting is allowed; + if it 201, posting is not allowed.'b'Get the server capabilities, as read by __init__(). + If the CAPABILITIES command is not supported, an empty dict is + returned.'u'Get the server capabilities, as read by __init__(). + If the CAPABILITIES command is not supported, an empty dict is + returned.'b'IMPLEMENTATION'u'IMPLEMENTATION'b'Set the debugging level. Argument 'level' means: + 0: no debugging output (default) + 1: print commands and responses but not body text etc. + 2: also print raw lines read and sent before stripping CR/LF'u'Set the debugging level. Argument 'level' means: + 0: no debugging output (default) + 1: print commands and responses but not body text etc. + 2: also print raw lines read and sent before stripping CR/LF'b'Internal: send one line to the server, appending CRLF. + The `line` must be a bytes-like object.'u'Internal: send one line to the server, appending CRLF. + The `line` must be a bytes-like object.'b'nntplib.putline'u'nntplib.putline'b'Internal: send one command to the server (through _putline()). + The `line` must be a unicode string.'u'Internal: send one command to the server (through _putline()). + The `line` must be a unicode string.'b'Internal: return one line from the server, stripping _CRLF. + Raise EOFError if the connection is closed. + Returns a bytes object.'u'Internal: return one line from the server, stripping _CRLF. + Raise EOFError if the connection is closed. + Returns a bytes object.'b'line too long'u'line too long'b'Internal: get a response from the server. + Raise various errors if the response indicates an error. + Returns a unicode string.'u'Internal: get a response from the server. + Raise various errors if the response indicates an error. + Returns a unicode string.'b'123'u'123'b'Internal: get a response plus following text from the server. + Raise various errors if the response indicates an error. + + Returns a (response, lines) tuple where `response` is a unicode + string and `lines` is a list of bytes objects. + If `file` is a file-like object, it must be open in binary mode. + 'u'Internal: get a response plus following text from the server. + Raise various errors if the response indicates an error. + + Returns a (response, lines) tuple where `response` is a unicode + string and `lines` is a list of bytes objects. + If `file` is a file-like object, it must be open in binary mode. + 'b'. +'b'Internal: send a command and get the response. + Same return value as _getresp().'u'Internal: send a command and get the response. + Same return value as _getresp().'b'Internal: send a command and get the response plus following text. + Same return value as _getlongresp().'u'Internal: send a command and get the response plus following text. + Same return value as _getlongresp().'b'Internal: send a command and get the response plus following text. + Same as _longcmd() and _getlongresp(), except that the returned `lines` + are unicode strings rather than bytes objects. + 'u'Internal: send a command and get the response plus following text. + Same as _longcmd() and _getlongresp(), except that the returned `lines` + are unicode strings rather than bytes objects. + 'b'Internal: get the overview format. Queries the server if not + already done, else returns the cached value.'u'Internal: get the overview format. Queries the server if not + already done, else returns the cached value.'b'LIST OVERVIEW.FMT'u'LIST OVERVIEW.FMT'b'Process a CAPABILITIES command. Not supported by all servers. + Return: + - resp: server response if successful + - caps: a dictionary mapping capability names to lists of tokens + (for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] }) + 'u'Process a CAPABILITIES command. Not supported by all servers. + Return: + - resp: server response if successful + - caps: a dictionary mapping capability names to lists of tokens + (for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] }) + 'b'CAPABILITIES'u'CAPABILITIES'b'Process a NEWGROUPS command. Arguments: + - date: a date or datetime object + Return: + - resp: server response if successful + - list: list of newsgroup names + 'u'Process a NEWGROUPS command. Arguments: + - date: a date or datetime object + Return: + - resp: server response if successful + - list: list of newsgroup names + 'b'the date parameter must be a date or datetime object, not '{:40}''u'the date parameter must be a date or datetime object, not '{:40}''b'NEWGROUPS {0} {1}'u'NEWGROUPS {0} {1}'b'Process a NEWNEWS command. Arguments: + - group: group name or '*' + - date: a date or datetime object + Return: + - resp: server response if successful + - list: list of message ids + 'u'Process a NEWNEWS command. Arguments: + - group: group name or '*' + - date: a date or datetime object + Return: + - resp: server response if successful + - list: list of message ids + 'b'NEWNEWS {0} {1} {2}'u'NEWNEWS {0} {1} {2}'b'Process a LIST or LIST ACTIVE command. Arguments: + - group_pattern: a pattern indicating which groups to query + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of (group, last, first, flag) (strings) + 'u'Process a LIST or LIST ACTIVE command. Arguments: + - group_pattern: a pattern indicating which groups to query + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of (group, last, first, flag) (strings) + 'b'LIST ACTIVE 'u'LIST ACTIVE 'b'^(?P[^ ]+)[ ]+(.*)$'u'^(?P[^ ]+)[ ]+(.*)$'b'LIST NEWSGROUPS 'u'LIST NEWSGROUPS 'b'XGTITLE 'u'XGTITLE 'b'Get a description for a single group. If more than one + group matches ('group' is a pattern), return the first. If no + group matches, return an empty string. + + This elides the response code from the server, since it can + only be '215' or '285' (for xgtitle) anyway. If the response + code is needed, use the 'descriptions' method. + + NOTE: This neither checks for a wildcard in 'group' nor does + it check whether the group actually exists.'u'Get a description for a single group. If more than one + group matches ('group' is a pattern), return the first. If no + group matches, return an empty string. + + This elides the response code from the server, since it can + only be '215' or '285' (for xgtitle) anyway. If the response + code is needed, use the 'descriptions' method. + + NOTE: This neither checks for a wildcard in 'group' nor does + it check whether the group actually exists.'b'Get descriptions for a range of groups.'u'Get descriptions for a range of groups.'b'Process a GROUP command. Argument: + - group: the group name + Returns: + - resp: server response if successful + - count: number of articles + - first: first article number + - last: last article number + - name: the group name + 'u'Process a GROUP command. Argument: + - group: the group name + Returns: + - resp: server response if successful + - count: number of articles + - first: first article number + - last: last article number + - name: the group name + 'b'GROUP 'u'GROUP 'b'Process a HELP command. Argument: + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of strings returned by the server in response to the + HELP command + 'u'Process a HELP command. Argument: + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of strings returned by the server in response to the + HELP command + 'b'HELP'u'HELP'b'Internal: parse the response line of a STAT, NEXT, LAST, + ARTICLE, HEAD or BODY command.'u'Internal: parse the response line of a STAT, NEXT, LAST, + ARTICLE, HEAD or BODY command.'b'Internal: process a STAT, NEXT or LAST command.'u'Internal: process a STAT, NEXT or LAST command.'b'Process a STAT command. Argument: + - message_spec: article number or message id (if not specified, + the current article is selected) + Returns: + - resp: server response if successful + - art_num: the article number + - message_id: the message id + 'u'Process a STAT command. Argument: + - message_spec: article number or message id (if not specified, + the current article is selected) + Returns: + - resp: server response if successful + - art_num: the article number + - message_id: the message id + 'b'STAT {0}'u'STAT {0}'b'STAT'u'STAT'b'Process a NEXT command. No arguments. Return as for STAT.'u'Process a NEXT command. No arguments. Return as for STAT.'b'NEXT'u'NEXT'b'Process a LAST command. No arguments. Return as for STAT.'u'Process a LAST command. No arguments. Return as for STAT.'b'LAST'u'LAST'b'Internal: process a HEAD, BODY or ARTICLE command.'u'Internal: process a HEAD, BODY or ARTICLE command.'b'Process a HEAD command. Argument: + - message_spec: article number or message id + - file: filename string or file object to store the headers in + Returns: + - resp: server response if successful + - ArticleInfo: (article number, message id, list of header lines) + 'u'Process a HEAD command. Argument: + - message_spec: article number or message id + - file: filename string or file object to store the headers in + Returns: + - resp: server response if successful + - ArticleInfo: (article number, message id, list of header lines) + 'b'HEAD {0}'u'HEAD {0}'b'Process a BODY command. Argument: + - message_spec: article number or message id + - file: filename string or file object to store the body in + Returns: + - resp: server response if successful + - ArticleInfo: (article number, message id, list of body lines) + 'u'Process a BODY command. Argument: + - message_spec: article number or message id + - file: filename string or file object to store the body in + Returns: + - resp: server response if successful + - ArticleInfo: (article number, message id, list of body lines) + 'b'BODY {0}'u'BODY {0}'b'BODY'u'BODY'b'Process an ARTICLE command. Argument: + - message_spec: article number or message id + - file: filename string or file object to store the article in + Returns: + - resp: server response if successful + - ArticleInfo: (article number, message id, list of article lines) + 'u'Process an ARTICLE command. Argument: + - message_spec: article number or message id + - file: filename string or file object to store the article in + Returns: + - resp: server response if successful + - ArticleInfo: (article number, message id, list of article lines) + 'b'ARTICLE {0}'u'ARTICLE {0}'b'ARTICLE'u'ARTICLE'b'Process a SLAVE command. Returns: + - resp: server response if successful + 'u'Process a SLAVE command. Returns: + - resp: server response if successful + 'b'SLAVE'u'SLAVE'b'Process an XHDR command (optional server extension). Arguments: + - hdr: the header type (e.g. 'subject') + - str: an article nr, a message id, or a range nr1-nr2 + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of (nr, value) strings + 'u'Process an XHDR command (optional server extension). Arguments: + - hdr: the header type (e.g. 'subject') + - str: an article nr, a message id, or a range nr1-nr2 + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of (nr, value) strings + 'b'^([0-9]+) ?(.*) +?'u'^([0-9]+) ?(.*) +?'b'XHDR {0} {1}'u'XHDR {0} {1}'b'Process an XOVER command (optional server extension) Arguments: + - start: start of range + - end: end of range + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of dicts containing the response fields + 'u'Process an XOVER command (optional server extension) Arguments: + - start: start of range + - end: end of range + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of dicts containing the response fields + 'b'XOVER {0}-{1}'u'XOVER {0}-{1}'b'Process an OVER command. If the command isn't supported, fall + back to XOVER. Arguments: + - message_spec: + - either a message id, indicating the article to fetch + information about + - or a (start, end) tuple, indicating a range of article numbers; + if end is None, information up to the newest message will be + retrieved + - or None, indicating the current article number must be used + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of dicts containing the response fields + + NOTE: the "message id" form isn't supported by XOVER + 'u'Process an OVER command. If the command isn't supported, fall + back to XOVER. Arguments: + - message_spec: + - either a message id, indicating the article to fetch + information about + - or a (start, end) tuple, indicating a range of article numbers; + if end is None, information up to the newest message will be + retrieved + - or None, indicating the current article number must be used + - file: Filename string or file object to store the result in + Returns: + - resp: server response if successful + - list: list of dicts containing the response fields + + NOTE: the "message id" form isn't supported by XOVER + 'b'OVER'u'OVER'b'XOVER'u'XOVER'b' {0}-{1}'u' {0}-{1}'b'Process an XGTITLE command (optional server extension) Arguments: + - group: group name wildcard (i.e. news.*) + Returns: + - resp: server response if successful + - list: list of (name,title) strings'u'Process an XGTITLE command (optional server extension) Arguments: + - group: group name wildcard (i.e. news.*) + Returns: + - resp: server response if successful + - list: list of (name,title) strings'b'The XGTITLE extension is not actively used, use descriptions() instead'u'The XGTITLE extension is not actively used, use descriptions() instead'b'^([^ ]+)[ ]+(.*)$'u'^([^ ]+)[ ]+(.*)$'b'Process an XPATH command (optional server extension) Arguments: + - id: Message id of article + Returns: + resp: server response if successful + path: directory path to article + 'u'Process an XPATH command (optional server extension) Arguments: + - id: Message id of article + Returns: + resp: server response if successful + path: directory path to article + 'b'The XPATH extension is not actively used'u'The XPATH extension is not actively used'b'XPATH {0}'u'XPATH {0}'b'223'u'223'b'Process the DATE command. + Returns: + - resp: server response if successful + - date: datetime object + 'u'Process the DATE command. + Returns: + - resp: server response if successful + - date: datetime object + 'b'DATE'u'DATE'b'111'u'111'b'. +'b'Process a POST command. Arguments: + - data: bytes object, iterable or file containing the article + Returns: + - resp: server response if successful'u'Process a POST command. Arguments: + - data: bytes object, iterable or file containing the article + Returns: + - resp: server response if successful'b'Process an IHAVE command. Arguments: + - message_id: message-id of the article + - data: file containing the article + Returns: + - resp: server response if successful + Note that if the server refuses the article an exception is raised.'u'Process an IHAVE command. Arguments: + - message_id: message-id of the article + - data: file containing the article + Returns: + - resp: server response if successful + Note that if the server refuses the article an exception is raised.'b'IHAVE {0}'u'IHAVE {0}'b'Process a QUIT command and close the socket. Returns: + - resp: server response if successful'u'Process a QUIT command and close the socket. Returns: + - resp: server response if successful'b'Already logged in.'u'Already logged in.'b'At least one of `user` and `usenetrc` must be specified'u'At least one of `user` and `usenetrc` must be specified'b'authinfo user 'u'authinfo user 'b'381'u'381'b'authinfo pass 'u'authinfo pass 'b'281'u'281'b'mode reader'u'mode reader'b'480'u'480'b'Process a STARTTLS command. Arguments: + - context: SSL context to use for the encrypted connection + 'u'Process a STARTTLS command. Arguments: + - context: SSL context to use for the encrypted connection + 'b'TLS is already enabled.'u'TLS is already enabled.'b'TLS cannot be started after authentication.'u'TLS cannot be started after authentication.'b'STARTTLS'u'STARTTLS'b'382'u'382'b'rwb'u'rwb'b'TLS failed to start.'u'TLS failed to start.'b'Initialize an instance. Arguments: + - host: hostname to connect to + - port: port to connect to (default the standard NNTP port) + - user: username to authenticate with + - password: password to use with username + - readermode: if true, send 'mode reader' command after + connecting. + - usenetrc: allow loading username and password from ~/.netrc file + if not specified explicitly + - timeout: timeout (in seconds) used for socket connections + + readermode is sometimes necessary if you are connecting to an + NNTP server on the local machine and intend to call + reader-specific commands, such as `group'. If you get + unexpected NNTPPermanentErrors, you might need to set + readermode. + 'u'Initialize an instance. Arguments: + - host: hostname to connect to + - port: port to connect to (default the standard NNTP port) + - user: username to authenticate with + - password: password to use with username + - readermode: if true, send 'mode reader' command after + connecting. + - usenetrc: allow loading username and password from ~/.netrc file + if not specified explicitly + - timeout: timeout (in seconds) used for socket connections + + readermode is sometimes necessary if you are connecting to an + NNTP server on the local machine and intend to call + reader-specific commands, such as `group'. If you get + unexpected NNTPPermanentErrors, you might need to set + readermode. + 'b'nntplib.connect'u'nntplib.connect'b'This works identically to NNTP.__init__, except for the change + in default port and the `ssl_context` argument for SSL connections. + 'u'This works identically to NNTP.__init__, except for the change + in default port and the `ssl_context` argument for SSL connections. + 'b'NNTP_SSL'u'NNTP_SSL'b' nntplib built-in demo - display the latest articles in a newsgroup'u' nntplib built-in demo - display the latest articles in a newsgroup'b'--group'u'--group'b'gmane.comp.python.general'u'gmane.comp.python.general'b'group to fetch messages from (default: %(default)s)'u'group to fetch messages from (default: %(default)s)'b'--server'u'--server'b'news.gmane.io'u'news.gmane.io'b'NNTP server hostname (default: %(default)s)'u'NNTP server hostname (default: %(default)s)'b'--port'u'--port'b'NNTP port number (default: %s / %s)'u'NNTP port number (default: %s / %s)'b'-n'u'-n'b'--nb-articles'u'--nb-articles'b'number of articles to fetch (default: %(default)s)'u'number of articles to fetch (default: %(default)s)'b'-S'u'-S'b'--ssl'u'--ssl'b'use NNTP over SSL'u'use NNTP over SSL'b'Group'u'Group'b'has'u'has'b'articles, range'u'articles, range'b'{:7} {:20} {:42} ({})'u'{:7} {:20} {:42} ({})'u'nntplib'Common pathname manipulations, WindowsNT/95 version. + +Instead of importing this module directly, import os and refer to this +module as os.path. +.;C:\bindefpathnuldevnullgenericpathismountexpandvarssupports_unicode_filenamescommonpath_get_bothseps\/Normalize case of pathname. + + Makes all characters lowercase and all slashes into backslashes.Test whether a path is absolute\\?\colonresult_driveresult_pathp_drivep_pathSplit a pathname into drive/UNC sharepoint and relative path specifiers. + Returns a 2-tuple (drive_or_unc, path); either part may be empty. + + If you assign + result = splitdrive(p) + It is always true that: + result[0] + result[1] == p + + If the path contained a drive letter, drive_or_unc will contain everything + up to and including the colon. e.g. splitdrive("c:/dir") returns ("c:", "/dir") + + If the path contained a UNC path, the drive_or_unc will contain the host name + and share up to but not including the fourth directory separator character. + e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir") + + Paths cannot contain both a drive letter and a UNC path. + + normpSplit a pathname. + + Return tuple (head, tail) where tail is everything after the final slash. + Either part may be empty.Returns the final component of a pathnameReturns the directory component of a pathnameTest whether a path is a symbolic link. + This will always return false for Windows prior to 6.0. + Test whether a path exists. Returns True for broken symbolic links_getvolumepathnameTest whether a path is a mount point (a drive root, the root of a + share, or a mounted volume)Expand ~ and ~user constructs. + + If user or $HOME is unknown, do nothing.USERPROFILEuserhomeHOMEPATHHOMEDRIVEExpand shell variables of the forms $var, ${var} and %var%. + + Unknown variables are left unchanged._-varcharsbracerbracedollarenvironbpathlenNormalize path, eliminating double slashes, etc.\\.\special_prefixescomps_abspath_fallbackReturn the absolute version of a path as a fallback function in case + `nt._getfullpathname` is not available or raises OSError. See bpo-31047 for + more. + + getcwdbReturn the absolute version of a path._getfinalpathname_nt_readlink_readlink_deep439043924393allowed_winerrorold_path_getfinalpathname_nonstrict19201921new_path\\?\UNC\unc_prefixnew_unc_prefix\\.\NULhad_prefixinitial_winerrorspathReturn a relative version of a pathno path specifiedstart_abspath_absstart_drivestart_restpath_drivepath_restpath is on mount %r, start on mount %rstart_listpath_liste1e2rel_listGiven a sequence of path names, returns the longest common sub-path.commonpath() arg is an empty sequencedrivesplitssplit_pathsCan't mix absolute and relative pathsPaths don't have the same drivecommon_isdir# Module 'ntpath' -- common operations on WinNT/Win95 pathnames# strings representing various path-related bits and pieces# These are primarily for export; internally, they are hardcoded.# Should be set before imports for resolving cyclic dependency.# Normalize the case of a pathname and map slashes to backslashes.# Other normalizations (such as optimizing '../' away) are not done# (this is done by normpath).# Return whether a path is absolute.# Trivial in Posix, harder on Windows.# For Windows it is absolute if it starts with a slash or backslash (current# volume), or if a pathname after the volume-letter-and-colon or UNC-resource# starts with a slash or backslash.# Paths beginning with \\?\ are always absolute, but do not# necessarily contain a drive.# Join two (or more) paths.#23780: Ensure compatible data type even if p is null.# Second path is absolute# Different drives => ignore the first path entirely# Same drive in different case# Second path is relative to the first## add separator between UNC and non-absolute path# Split a path in a drive specification (a drive letter followed by a# colon) and the path specification.# It is always true that drivespec + pathspec == p# is a UNC path:# vvvvvvvvvvvvvvvvvvvv drive letter or UNC path# \\machine\mountpoint\directory\etc\...# directory ^^^^^^^^^^^^^^^# a UNC path can't have two slashes in a row# (after the initial two)# Split a path in head (everything up to the last '/') and tail (the# rest). After the trailing '/' is stripped, the invariant# join(head, tail) == p holds.# The resulting head won't end in '/' unless it is the root.# set i to index beyond p's last slash# now tail has no slashes# remove trailing slashes from head, unless it's all slashes# Return the tail (basename) part of a path.# Return the head (dirname) part of a path.# Is a path a symbolic link?# This will always return false on systems where os.lstat doesn't exist.# Being true for dangling symbolic links is also useful.# Is a path a mount point?# Any drive letter root (eg c:\)# Any share UNC (eg \\server\share)# Any volume mounted on a filesystem folder# No one method detects all three situations. Historically we've lexically# detected drive letter roots and share UNCs. The canonical approach to# detecting mounted volumes (querying the reparse tag) fails for the most# common case: drive letter roots. The alternative which uses GetVolumePathName# fails if the drive letter is the result of a SUBST.# Expand paths beginning with '~' or '~user'.# '~' means $HOME; '~user' means that user's home directory.# If the path doesn't begin with '~', or if the user or $HOME is unknown,# the path is returned unchanged (leaving error reporting to whatever# function is called with the expanded path as argument).# See also module 'glob' for expansion of *, ? and [...] in pathnames.# (A function should also be defined to do full *sh-style environment# variable expansion.)#~user# Expand paths containing shell variable substitutions.# The following rules apply:# - no expansion within single quotes# - '$$' is translated into '$'# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%# - ${varname} is accepted.# - $varname is accepted.# - %varname% is accepted.# - varnames can be made out of letters, digits and the characters '_-'# (though is not verified in the ${varname} and %varname% cases)# XXX With COMMAND.COM you can use any characters in a variable name,# XXX except '^|<>='.# no expansion within single quotes# variable or '%'# variable or '$$'# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.# Previously, this function also truncated pathnames to 8+3 format,# but as this module is called "ntpath", that's obviously wrong!# in the case of paths with these prefixes:# \\.\ -> device names# \\?\ -> literal paths# do not do any normalization, but return the path# unchanged apart from the call to os.fspath()# collapse initial backslashes# If the path is now empty, substitute '.'# Return an absolute path.# not running on Windows - mock up something sensible# use native Windows method on Windows# realpath is a no-op on systems without _getfinalpathname support.# These error codes indicate that we should stop reading links and# return the path we currently have.# 1: ERROR_INVALID_FUNCTION# 2: ERROR_FILE_NOT_FOUND# 3: ERROR_DIRECTORY_NOT_FOUND# 5: ERROR_ACCESS_DENIED# 21: ERROR_NOT_READY (implies drive with no media)# 32: ERROR_SHARING_VIOLATION (probably an NTFS paging file)# 50: ERROR_NOT_SUPPORTED (implies no support for reparse points)# 67: ERROR_BAD_NET_NAME (implies remote server unavailable)# 87: ERROR_INVALID_PARAMETER# 4390: ERROR_NOT_A_REPARSE_POINT# 4392: ERROR_INVALID_REPARSE_DATA# 4393: ERROR_REPARSE_TAG_INVALID# Links may be relative, so resolve them against their# own location# If it's something other than a symlink, we don't know# what it's actually going to be resolved against, so# just return the old path.# Stop on reparse points that are not symlinks# These error codes indicate that we should stop resolving the path# and return the value we currently have.# 50: ERROR_NOT_SUPPORTED# 123: ERROR_INVALID_NAME# 1920: ERROR_CANT_ACCESS_FILE# 1921: ERROR_CANT_RESOLVE_FILENAME (implies unfollowable symlink)# Non-strict algorithm is to find as much of the target directory# as we can and join the rest.# The OS could not resolve this path fully, so we attempt# to follow the link ourselves. If we succeed, join the tail# and return.# If we fail to readlink(), let's keep traversing# TODO (bpo-38186): Request the real file name from the directory# entry using FindFirstFileW. For now, we will return the path# as best we have it# bpo-38081: Special case for realpath(b'nul')# bpo-38081: Special case for realpath('nul')# The path returned by _getfinalpathname will always start with \\?\ -# strip off that prefix unless it was already provided on the original# path.# For UNC paths, the prefix will actually be \\?\UNC\# Handle that case as well.# Ensure that the non-prefixed path resolves to the same path# If the path does not exist and originally did not exist, then# strip the prefix anyway.# Win9x family and earlier have no Unicode filename support.# Work out how much of the filepath is shared by start and path.# Return the longest common sub-path of the sequence of paths given as input.# The function is case-insensitive and 'separator-insensitive', i.e. if the# only difference between two paths is the use of '\' versus '/' as separator,# they are deemed to be equal.# However, the returned path will have the standard '\' separator (even if the# given paths had the alternative '/' separator) and will have the case of the# first path given in the sequence. Additionally, any trailing separator is# stripped from the returned path.# Check that all drive letters or UNC paths match. The check is made only# now otherwise type errors for mixing strings and bytes would not be# caught.# The genericpath.isdir implementation uses os.stat and checks the mode# attribute to tell whether or not the path is a directory.# This is overkill on Windows - just pass the path to GetFileAttributes# and check the attribute from there.# Use genericpath.isdir as imported above.b'Common pathname manipulations, WindowsNT/95 version. + +Instead of importing this module directly, import os and refer to this +module as os.path. +'u'Common pathname manipulations, WindowsNT/95 version. + +Instead of importing this module directly, import os and refer to this +module as os.path. +'b'.;C:\bin'u'.;C:\bin'b'nul'u'nul'b'normcase'u'normcase'b'isabs'u'isabs'b'join'u'join'b'splitdrive'u'splitdrive'b'split'u'split'b'splitext'u'splitext'b'basename'u'basename'b'dirname'u'dirname'b'islink'u'islink'b'lexists'u'lexists'b'ismount'u'ismount'b'expanduser'u'expanduser'b'expandvars'u'expandvars'b'normpath'u'normpath'b'abspath'u'abspath'b'curdir'u'curdir'b'pardir'u'pardir'b'pathsep'u'pathsep'b'defpath'u'defpath'b'altsep'u'altsep'b'extsep'u'extsep'b'devnull'u'devnull'b'realpath'u'realpath'b'supports_unicode_filenames'u'supports_unicode_filenames'b'relpath'u'relpath'b'commonpath'u'commonpath'b'\/'u'\/'b'Normalize case of pathname. + + Makes all characters lowercase and all slashes into backslashes.'u'Normalize case of pathname. + + Makes all characters lowercase and all slashes into backslashes.'b'Test whether a path is absolute'u'Test whether a path is absolute'b'\\?\'u'\\?\'b'Split a pathname into drive/UNC sharepoint and relative path specifiers. + Returns a 2-tuple (drive_or_unc, path); either part may be empty. + + If you assign + result = splitdrive(p) + It is always true that: + result[0] + result[1] == p + + If the path contained a drive letter, drive_or_unc will contain everything + up to and including the colon. e.g. splitdrive("c:/dir") returns ("c:", "/dir") + + If the path contained a UNC path, the drive_or_unc will contain the host name + and share up to but not including the fourth directory separator character. + e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir") + + Paths cannot contain both a drive letter and a UNC path. + + 'u'Split a pathname into drive/UNC sharepoint and relative path specifiers. + Returns a 2-tuple (drive_or_unc, path); either part may be empty. + + If you assign + result = splitdrive(p) + It is always true that: + result[0] + result[1] == p + + If the path contained a drive letter, drive_or_unc will contain everything + up to and including the colon. e.g. splitdrive("c:/dir") returns ("c:", "/dir") + + If the path contained a UNC path, the drive_or_unc will contain the host name + and share up to but not including the fourth directory separator character. + e.g. splitdrive("//host/computer/dir") returns ("//host/computer", "/dir") + + Paths cannot contain both a drive letter and a UNC path. + + 'b'Split a pathname. + + Return tuple (head, tail) where tail is everything after the final slash. + Either part may be empty.'u'Split a pathname. + + Return tuple (head, tail) where tail is everything after the final slash. + Either part may be empty.'b'Returns the final component of a pathname'u'Returns the final component of a pathname'b'Returns the directory component of a pathname'u'Returns the directory component of a pathname'b'Test whether a path is a symbolic link. + This will always return false for Windows prior to 6.0. + 'u'Test whether a path is a symbolic link. + This will always return false for Windows prior to 6.0. + 'b'Test whether a path exists. Returns True for broken symbolic links'u'Test whether a path exists. Returns True for broken symbolic links'b'Test whether a path is a mount point (a drive root, the root of a + share, or a mounted volume)'u'Test whether a path is a mount point (a drive root, the root of a + share, or a mounted volume)'b'Expand ~ and ~user constructs. + + If user or $HOME is unknown, do nothing.'u'Expand ~ and ~user constructs. + + If user or $HOME is unknown, do nothing.'b'USERPROFILE'u'USERPROFILE'b'HOMEPATH'u'HOMEPATH'b'HOMEDRIVE'u'HOMEDRIVE'b'Expand shell variables of the forms $var, ${var} and %var%. + + Unknown variables are left unchanged.'u'Expand shell variables of the forms $var, ${var} and %var%. + + Unknown variables are left unchanged.'b'_-'u'_-'b'environb'u'environb'b'Normalize path, eliminating double slashes, etc.'u'Normalize path, eliminating double slashes, etc.'b'\\.\'u'\\.\'b'Return the absolute version of a path as a fallback function in case + `nt._getfullpathname` is not available or raises OSError. See bpo-31047 for + more. + + 'u'Return the absolute version of a path as a fallback function in case + `nt._getfullpathname` is not available or raises OSError. See bpo-31047 for + more. + + 'b'Return the absolute version of a path.'u'Return the absolute version of a path.'b'\\?\UNC\'b'\\.\NUL'u'\\?\UNC\'u'\\.\NUL'b'getwindowsversion'u'getwindowsversion'b'Return a relative version of a path'u'Return a relative version of a path'b'no path specified'u'no path specified'b'path is on mount %r, start on mount %r'u'path is on mount %r, start on mount %r'b'Given a sequence of path names, returns the longest common sub-path.'u'Given a sequence of path names, returns the longest common sub-path.'b'commonpath() arg is an empty sequence'u'commonpath() arg is an empty sequence'b'Can't mix absolute and relative paths'u'Can't mix absolute and relative paths'b'Paths don't have the same drive'u'Paths don't have the same drive'u'ntpath'Convert a NT pathname to a file URL and vice versa. + +This module only exists to provide OS-specific code +for urllib.requests, thus do not use directly. +OS-specific conversion from a relative URL of the 'file' scheme + to a file system path; not recommended for general use.////componentsBad URL: OS-specific conversion from a file system path to a relative URL + of the 'file' scheme; not recommended for general use.UNC\Bad path: ///# Testing is done through test_urllib.# e.g.# ///C|/foo/bar/spam.foo# and# ///C:/foo/bar/spam.foo# become# C:\foo\bar\spam.foo# Windows itself uses ":" even in URLs.# No drive specifier, just convert slashes# path is something like ////host/path/on/remote/host# convert this to \\host\path\on\remote\host# (notice halving of slashes at the start of the path)# make sure not to convert quoted slashes :-)# Issue #11474 - handing url such as |c/|# becomes# First, clean up some special forms. We are going to sacrifice# the additional information anyway# No drive specifier, just convert slashes and quote the name# path is something like \\host\path\on\remote\host# convert this to ////host/path/on/remote/host# (notice doubling of slashes at the start of the path)b'Convert a NT pathname to a file URL and vice versa. + +This module only exists to provide OS-specific code +for urllib.requests, thus do not use directly. +'u'Convert a NT pathname to a file URL and vice versa. + +This module only exists to provide OS-specific code +for urllib.requests, thus do not use directly. +'b'OS-specific conversion from a relative URL of the 'file' scheme + to a file system path; not recommended for general use.'u'OS-specific conversion from a relative URL of the 'file' scheme + to a file system path; not recommended for general use.'b'////'u'////'b'Bad URL: 'u'Bad URL: 'b'OS-specific conversion from a file system path to a relative URL + of the 'file' scheme; not recommended for general use.'u'OS-specific conversion from a file system path to a relative URL + of the 'file' scheme; not recommended for general use.'b'UNC\'u'UNC\'b'Bad path: 'u'Bad path: 'b'///'u'///'u'nturl2path'Abstract Base Classes (ABCs) for numbers, according to PEP 3141. + +TODO: Fill out more detailed documentation on the operators.RealIntegralAll numbers inherit from this class. + + If you just want to check if an argument x is a number, without + caring what kind, use isinstance(x, Number). + Complex defines the operations that work on the builtin complex type. + + In short, those are: a conversion to complex, .real, .imag, +, -, + *, /, abs(), .conjugate, ==, and !=. + + If it is given heterogeneous arguments, and doesn't have special + knowledge about them, it should fall back to the builtin complex + type as described below. + Return a builtin complex instance. Called for complex(self).True if self != 0. Called for bool(self).Retrieve the real component of this number. + + This should subclass Real. + Retrieve the imaginary component of this number. + + This should subclass Real. + self + otherother + self-self+selfself - otherother - selfself * otherother * selfself / other: Should promote to float when necessary.other / selfself**exponent; should promote to float or complex when necessary.base ** selfReturns the Real distance from 0. Called for abs(self).(x+y*i).conjugate() returns (x-y*i).self == otherTo Complex, Real adds the operations that work on real numbers. + + In short, those are: a conversion to float, trunc(), divmod, + %, <, <=, >, and >=. + + Real also provides defaults for the derived operations. + Any Real can be converted to a native float object. + + Called for float(self).trunc(self): Truncates self to an Integral. + + Returns an Integral i such that: + * i>0 iff self>0; + * abs(i) <= abs(self); + * for any Integral j satisfying the first two conditions, + abs(i) >= abs(j) [i.e. i has "maximal" abs among those]. + i.e. "truncate towards 0". + Finds the greatest Integral <= self.Finds the least Integral >= self.ndigitsRounds self to ndigits decimal places, defaulting to 0. + + If ndigits is omitted or None, returns an Integral, otherwise + returns a Real. Rounds half toward even. + divmod(self, other): The pair (self // other, self % other). + + Sometimes this can be computed faster than the pair of + operations. + divmod(other, self): The pair (self // other, self % other). + + Sometimes this can be computed faster than the pair of + operations. + self // other: The floor() of self/other.other // self: The floor() of other/self.self % otherother % selfself < other + + < on Reals defines a total ordering, except perhaps for NaN.self <= othercomplex(self) == complex(float(self), 0)Real numbers are their real component.Real numbers have no imaginary component.Conjugate is a no-op for Reals..numerator and .denominator should be in lowest terms.float(self) = self.numerator / self.denominator + + It's important that this conversion use the integer's "true" + division rather than casting one side to float before dividing + so that ratios of huge integers convert without overflowing. + + Integral adds a conversion to int and the bit-string operations.int(self)Called whenever an index is needed, such as in slicingself ** exponent % modulus, but maybe faster. + + Accept the modulus argument if you want to support the + 3-argument version of pow(). Raise a TypeError if exponent < 0 + or any argument isn't Integral. Otherwise, just implement the + 2-argument version described in Complex. + self << otherother << selfself >> otherother >> selfself & otherother & selfself ^ otherother ^ selfself | otherother | self~selffloat(self) == float(int(self))Integers are their own numerators.Integers have a denominator of 1.# Concrete numeric types must provide their own hash implementation## Notes on Decimal## ----------------## Decimal has all of the methods specified by the Real abc, but it should## not be registered as a Real because decimals do not interoperate with## binary floats (i.e. Decimal('3.14') + 2.71828 is undefined). But,## abstract reals are expected to interoperate (i.e. R1 + R2 should be## expected to work if R1 and R2 are both Reals).# Concrete implementations of Complex abstract methods.# Concrete implementation of Real's conversion to float.# Concrete implementations of Rational and Real abstract methods.b'Abstract Base Classes (ABCs) for numbers, according to PEP 3141. + +TODO: Fill out more detailed documentation on the operators.'u'Abstract Base Classes (ABCs) for numbers, according to PEP 3141. + +TODO: Fill out more detailed documentation on the operators.'b'Number'u'Number'b'Complex'u'Complex'b'Real'u'Real'b'Rational'u'Rational'b'Integral'u'Integral'b'All numbers inherit from this class. + + If you just want to check if an argument x is a number, without + caring what kind, use isinstance(x, Number). + 'u'All numbers inherit from this class. + + If you just want to check if an argument x is a number, without + caring what kind, use isinstance(x, Number). + 'b'Complex defines the operations that work on the builtin complex type. + + In short, those are: a conversion to complex, .real, .imag, +, -, + *, /, abs(), .conjugate, ==, and !=. + + If it is given heterogeneous arguments, and doesn't have special + knowledge about them, it should fall back to the builtin complex + type as described below. + 'u'Complex defines the operations that work on the builtin complex type. + + In short, those are: a conversion to complex, .real, .imag, +, -, + *, /, abs(), .conjugate, ==, and !=. + + If it is given heterogeneous arguments, and doesn't have special + knowledge about them, it should fall back to the builtin complex + type as described below. + 'b'Return a builtin complex instance. Called for complex(self).'u'Return a builtin complex instance. Called for complex(self).'b'True if self != 0. Called for bool(self).'u'True if self != 0. Called for bool(self).'b'Retrieve the real component of this number. + + This should subclass Real. + 'u'Retrieve the real component of this number. + + This should subclass Real. + 'b'Retrieve the imaginary component of this number. + + This should subclass Real. + 'u'Retrieve the imaginary component of this number. + + This should subclass Real. + 'b'self + other'u'self + other'b'other + self'u'other + self'b'-self'u'-self'b'+self'u'+self'b'self - other'u'self - other'b'other - self'u'other - self'b'self * other'u'self * other'b'other * self'u'other * self'b'self / other: Should promote to float when necessary.'u'self / other: Should promote to float when necessary.'b'other / self'u'other / self'b'self**exponent; should promote to float or complex when necessary.'u'self**exponent; should promote to float or complex when necessary.'b'base ** self'u'base ** self'b'Returns the Real distance from 0. Called for abs(self).'u'Returns the Real distance from 0. Called for abs(self).'b'(x+y*i).conjugate() returns (x-y*i).'u'(x+y*i).conjugate() returns (x-y*i).'b'self == other'u'self == other'b'To Complex, Real adds the operations that work on real numbers. + + In short, those are: a conversion to float, trunc(), divmod, + %, <, <=, >, and >=. + + Real also provides defaults for the derived operations. + 'u'To Complex, Real adds the operations that work on real numbers. + + In short, those are: a conversion to float, trunc(), divmod, + %, <, <=, >, and >=. + + Real also provides defaults for the derived operations. + 'b'Any Real can be converted to a native float object. + + Called for float(self).'u'Any Real can be converted to a native float object. + + Called for float(self).'b'trunc(self): Truncates self to an Integral. + + Returns an Integral i such that: + * i>0 iff self>0; + * abs(i) <= abs(self); + * for any Integral j satisfying the first two conditions, + abs(i) >= abs(j) [i.e. i has "maximal" abs among those]. + i.e. "truncate towards 0". + 'u'trunc(self): Truncates self to an Integral. + + Returns an Integral i such that: + * i>0 iff self>0; + * abs(i) <= abs(self); + * for any Integral j satisfying the first two conditions, + abs(i) >= abs(j) [i.e. i has "maximal" abs among those]. + i.e. "truncate towards 0". + 'b'Finds the greatest Integral <= self.'u'Finds the greatest Integral <= self.'b'Finds the least Integral >= self.'u'Finds the least Integral >= self.'b'Rounds self to ndigits decimal places, defaulting to 0. + + If ndigits is omitted or None, returns an Integral, otherwise + returns a Real. Rounds half toward even. + 'u'Rounds self to ndigits decimal places, defaulting to 0. + + If ndigits is omitted or None, returns an Integral, otherwise + returns a Real. Rounds half toward even. + 'b'divmod(self, other): The pair (self // other, self % other). + + Sometimes this can be computed faster than the pair of + operations. + 'u'divmod(self, other): The pair (self // other, self % other). + + Sometimes this can be computed faster than the pair of + operations. + 'b'divmod(other, self): The pair (self // other, self % other). + + Sometimes this can be computed faster than the pair of + operations. + 'u'divmod(other, self): The pair (self // other, self % other). + + Sometimes this can be computed faster than the pair of + operations. + 'b'self // other: The floor() of self/other.'u'self // other: The floor() of self/other.'b'other // self: The floor() of other/self.'u'other // self: The floor() of other/self.'b'self % other'u'self % other'b'other % self'u'other % self'b'self < other + + < on Reals defines a total ordering, except perhaps for NaN.'u'self < other + + < on Reals defines a total ordering, except perhaps for NaN.'b'self <= other'u'self <= other'b'complex(self) == complex(float(self), 0)'u'complex(self) == complex(float(self), 0)'b'Real numbers are their real component.'u'Real numbers are their real component.'b'Real numbers have no imaginary component.'u'Real numbers have no imaginary component.'b'Conjugate is a no-op for Reals.'u'Conjugate is a no-op for Reals.'b'.numerator and .denominator should be in lowest terms.'u'.numerator and .denominator should be in lowest terms.'b'float(self) = self.numerator / self.denominator + + It's important that this conversion use the integer's "true" + division rather than casting one side to float before dividing + so that ratios of huge integers convert without overflowing. + + 'u'float(self) = self.numerator / self.denominator + + It's important that this conversion use the integer's "true" + division rather than casting one side to float before dividing + so that ratios of huge integers convert without overflowing. + + 'b'Integral adds a conversion to int and the bit-string operations.'u'Integral adds a conversion to int and the bit-string operations.'b'int(self)'u'int(self)'b'Called whenever an index is needed, such as in slicing'u'Called whenever an index is needed, such as in slicing'b'self ** exponent % modulus, but maybe faster. + + Accept the modulus argument if you want to support the + 3-argument version of pow(). Raise a TypeError if exponent < 0 + or any argument isn't Integral. Otherwise, just implement the + 2-argument version described in Complex. + 'u'self ** exponent % modulus, but maybe faster. + + Accept the modulus argument if you want to support the + 3-argument version of pow(). Raise a TypeError if exponent < 0 + or any argument isn't Integral. Otherwise, just implement the + 2-argument version described in Complex. + 'b'self << other'u'self << other'b'other << self'u'other << self'b'self >> other'u'self >> other'b'other >> self'u'other >> self'b'self & other'u'self & other'b'other & self'u'other & self'b'self ^ other'u'self ^ other'b'other ^ self'u'other ^ self'b'self | other'u'self | other'b'other | self'u'other | self'b'~self'u'~self'b'float(self) == float(int(self))'u'float(self) == float(int(self))'b'Integers are their own numerators.'u'Integers are their own numerators.'b'Integers have a denominator of 1.'u'Integers have a denominator of 1.'u'numbers' +opcode module - potentially shared between dis and other modules which +operate on bytecodes (e.g. peephole optimizers). +hasnargsnot inis notexception matchBAD<%r>def_opname_opjrel_opjabs_opPOP_TOPROT_TWOROT_THREEDUP_TOPDUP_TOP_TWOROT_FOURNOPUNARY_POSITIVEUNARY_NEGATIVEUNARY_NOTUNARY_INVERTBINARY_MATRIX_MULTIPLYINPLACE_MATRIX_MULTIPLYBINARY_POWERBINARY_MULTIPLYBINARY_MODULOBINARY_ADDBINARY_SUBTRACTBINARY_SUBSCRBINARY_FLOOR_DIVIDEBINARY_TRUE_DIVIDEINPLACE_FLOOR_DIVIDEINPLACE_TRUE_DIVIDEGET_AITERGET_ANEXTBEFORE_ASYNC_WITHBEGIN_FINALLYEND_ASYNC_FORINPLACE_ADDINPLACE_SUBTRACTINPLACE_MULTIPLYINPLACE_MODULOSTORE_SUBSCRDELETE_SUBSCRBINARY_LSHIFTBINARY_RSHIFTBINARY_ANDBINARY_XORBINARY_ORINPLACE_POWERGET_ITERGET_YIELD_FROM_ITERPRINT_EXPRLOAD_BUILD_CLASSYIELD_FROMGET_AWAITABLEINPLACE_LSHIFTINPLACE_RSHIFTINPLACE_ANDINPLACE_XORINPLACE_ORWITH_CLEANUP_STARTWITH_CLEANUP_FINISHRETURN_VALUEIMPORT_STARSETUP_ANNOTATIONSYIELD_VALUEPOP_BLOCKEND_FINALLYPOP_EXCEPTSTORE_NAMEDELETE_NAMEUNPACK_SEQUENCEFOR_ITERUNPACK_EXSTORE_ATTRDELETE_ATTRSTORE_GLOBALDELETE_GLOBALLOAD_CONSTLOAD_NAMEBUILD_TUPLEBUILD_LISTBUILD_SETBUILD_MAPLOAD_ATTR106COMPARE_OP107IMPORT_NAMEIMPORT_FROM109JUMP_FORWARDJUMP_IF_FALSE_OR_POPJUMP_IF_TRUE_OR_POPJUMP_ABSOLUTEPOP_JUMP_IF_FALSEPOP_JUMP_IF_TRUELOAD_GLOBAL116SETUP_FINALLY122LOAD_FAST124STORE_FASTDELETE_FAST126RAISE_VARARGSCALL_FUNCTIONBUILD_SLICELOAD_CLOSURELOAD_DEREFSTORE_DEREFDELETE_DEREFCALL_FUNCTION_KWCALL_FUNCTION_EXSETUP_WITHLIST_APPENDSET_ADDMAP_ADDLOAD_CLASSDEREFBUILD_LIST_UNPACKBUILD_MAP_UNPACKBUILD_MAP_UNPACK_WITH_CALLBUILD_TUPLE_UNPACKBUILD_SET_UNPACKSETUP_ASYNC_WITHBUILD_CONST_KEY_MAPBUILD_STRINGBUILD_TUPLE_UNPACK_WITH_CALLLOAD_METHODCALL_METHODCALL_FINALLYPOP_FINALLY# It's a chicken-and-egg I'm afraid:# We're imported before _opcode's made.# With exception unheeded# (stack_effect is not needed)# Both our chickens and eggs are allayed.# --Larry Hastings, 2013/11/23# unused# Instruction opcodes for compiled code# Blank lines correspond to available opcodes# Opcodes from here have an argument:# Index in name list# ""# Number of tuple items# Index in const list# Number of list items# Number of set items# Number of dict entries# Comparison operator# Number of bytes to skip# Target byte offset from beginning of code# Distance to target address# Local variable number# Number of raise arguments (1, 2, or 3)# #args# Flags# Number of items# #args + #kwargsb' +opcode module - potentially shared between dis and other modules which +operate on bytecodes (e.g. peephole optimizers). +'u' +opcode module - potentially shared between dis and other modules which +operate on bytecodes (e.g. peephole optimizers). +'b'cmp_op'u'cmp_op'b'hasconst'u'hasconst'b'hasname'u'hasname'b'hasjrel'u'hasjrel'b'hasjabs'u'hasjabs'b'haslocal'u'haslocal'b'hascompare'u'hascompare'b'hasfree'u'hasfree'b'opname'u'opname'b'opmap'u'opmap'b'HAVE_ARGUMENT'u'HAVE_ARGUMENT'b'EXTENDED_ARG'u'EXTENDED_ARG'b'hasnargs'u'hasnargs'b'stack_effect'u'stack_effect'b'not in'u'not in'b'is not'u'is not'b'exception match'u'exception match'b'BAD'u'BAD'b'<%r>'u'<%r>'b'POP_TOP'u'POP_TOP'b'ROT_TWO'u'ROT_TWO'b'ROT_THREE'u'ROT_THREE'b'DUP_TOP'u'DUP_TOP'b'DUP_TOP_TWO'u'DUP_TOP_TWO'b'ROT_FOUR'u'ROT_FOUR'b'NOP'u'NOP'b'UNARY_POSITIVE'u'UNARY_POSITIVE'b'UNARY_NEGATIVE'u'UNARY_NEGATIVE'b'UNARY_NOT'u'UNARY_NOT'b'UNARY_INVERT'u'UNARY_INVERT'b'BINARY_MATRIX_MULTIPLY'u'BINARY_MATRIX_MULTIPLY'b'INPLACE_MATRIX_MULTIPLY'u'INPLACE_MATRIX_MULTIPLY'b'BINARY_POWER'u'BINARY_POWER'b'BINARY_MULTIPLY'u'BINARY_MULTIPLY'b'BINARY_MODULO'u'BINARY_MODULO'b'BINARY_ADD'u'BINARY_ADD'b'BINARY_SUBTRACT'u'BINARY_SUBTRACT'b'BINARY_SUBSCR'u'BINARY_SUBSCR'b'BINARY_FLOOR_DIVIDE'u'BINARY_FLOOR_DIVIDE'b'BINARY_TRUE_DIVIDE'u'BINARY_TRUE_DIVIDE'b'INPLACE_FLOOR_DIVIDE'u'INPLACE_FLOOR_DIVIDE'b'INPLACE_TRUE_DIVIDE'u'INPLACE_TRUE_DIVIDE'b'GET_AITER'u'GET_AITER'b'GET_ANEXT'u'GET_ANEXT'b'BEFORE_ASYNC_WITH'u'BEFORE_ASYNC_WITH'b'BEGIN_FINALLY'u'BEGIN_FINALLY'b'END_ASYNC_FOR'u'END_ASYNC_FOR'b'INPLACE_ADD'u'INPLACE_ADD'b'INPLACE_SUBTRACT'u'INPLACE_SUBTRACT'b'INPLACE_MULTIPLY'u'INPLACE_MULTIPLY'b'INPLACE_MODULO'u'INPLACE_MODULO'b'STORE_SUBSCR'u'STORE_SUBSCR'b'DELETE_SUBSCR'u'DELETE_SUBSCR'b'BINARY_LSHIFT'u'BINARY_LSHIFT'b'BINARY_RSHIFT'u'BINARY_RSHIFT'b'BINARY_AND'u'BINARY_AND'b'BINARY_XOR'u'BINARY_XOR'b'BINARY_OR'u'BINARY_OR'b'INPLACE_POWER'u'INPLACE_POWER'b'GET_ITER'u'GET_ITER'b'GET_YIELD_FROM_ITER'u'GET_YIELD_FROM_ITER'b'PRINT_EXPR'u'PRINT_EXPR'b'LOAD_BUILD_CLASS'u'LOAD_BUILD_CLASS'b'YIELD_FROM'u'YIELD_FROM'b'GET_AWAITABLE'u'GET_AWAITABLE'b'INPLACE_LSHIFT'u'INPLACE_LSHIFT'b'INPLACE_RSHIFT'u'INPLACE_RSHIFT'b'INPLACE_AND'u'INPLACE_AND'b'INPLACE_XOR'u'INPLACE_XOR'b'INPLACE_OR'u'INPLACE_OR'b'WITH_CLEANUP_START'u'WITH_CLEANUP_START'b'WITH_CLEANUP_FINISH'u'WITH_CLEANUP_FINISH'b'RETURN_VALUE'u'RETURN_VALUE'b'IMPORT_STAR'u'IMPORT_STAR'b'SETUP_ANNOTATIONS'u'SETUP_ANNOTATIONS'b'YIELD_VALUE'u'YIELD_VALUE'b'POP_BLOCK'u'POP_BLOCK'b'END_FINALLY'u'END_FINALLY'b'POP_EXCEPT'u'POP_EXCEPT'b'STORE_NAME'u'STORE_NAME'b'DELETE_NAME'u'DELETE_NAME'b'UNPACK_SEQUENCE'u'UNPACK_SEQUENCE'b'FOR_ITER'u'FOR_ITER'b'UNPACK_EX'u'UNPACK_EX'b'STORE_ATTR'u'STORE_ATTR'b'DELETE_ATTR'u'DELETE_ATTR'b'STORE_GLOBAL'u'STORE_GLOBAL'b'DELETE_GLOBAL'u'DELETE_GLOBAL'b'LOAD_CONST'u'LOAD_CONST'b'LOAD_NAME'u'LOAD_NAME'b'BUILD_TUPLE'u'BUILD_TUPLE'b'BUILD_LIST'u'BUILD_LIST'b'BUILD_SET'u'BUILD_SET'b'BUILD_MAP'u'BUILD_MAP'b'LOAD_ATTR'u'LOAD_ATTR'b'COMPARE_OP'u'COMPARE_OP'b'IMPORT_NAME'u'IMPORT_NAME'b'IMPORT_FROM'u'IMPORT_FROM'b'JUMP_FORWARD'u'JUMP_FORWARD'b'JUMP_IF_FALSE_OR_POP'u'JUMP_IF_FALSE_OR_POP'b'JUMP_IF_TRUE_OR_POP'u'JUMP_IF_TRUE_OR_POP'b'JUMP_ABSOLUTE'u'JUMP_ABSOLUTE'b'POP_JUMP_IF_FALSE'u'POP_JUMP_IF_FALSE'b'POP_JUMP_IF_TRUE'u'POP_JUMP_IF_TRUE'b'LOAD_GLOBAL'u'LOAD_GLOBAL'b'SETUP_FINALLY'u'SETUP_FINALLY'b'LOAD_FAST'u'LOAD_FAST'b'STORE_FAST'u'STORE_FAST'b'DELETE_FAST'u'DELETE_FAST'b'RAISE_VARARGS'u'RAISE_VARARGS'b'CALL_FUNCTION'u'CALL_FUNCTION'b'BUILD_SLICE'u'BUILD_SLICE'b'LOAD_CLOSURE'u'LOAD_CLOSURE'b'LOAD_DEREF'u'LOAD_DEREF'b'STORE_DEREF'u'STORE_DEREF'b'DELETE_DEREF'u'DELETE_DEREF'b'CALL_FUNCTION_KW'u'CALL_FUNCTION_KW'b'CALL_FUNCTION_EX'u'CALL_FUNCTION_EX'b'SETUP_WITH'u'SETUP_WITH'b'LIST_APPEND'u'LIST_APPEND'b'SET_ADD'u'SET_ADD'b'MAP_ADD'u'MAP_ADD'b'LOAD_CLASSDEREF'u'LOAD_CLASSDEREF'b'BUILD_LIST_UNPACK'u'BUILD_LIST_UNPACK'b'BUILD_MAP_UNPACK'u'BUILD_MAP_UNPACK'b'BUILD_MAP_UNPACK_WITH_CALL'u'BUILD_MAP_UNPACK_WITH_CALL'b'BUILD_TUPLE_UNPACK'u'BUILD_TUPLE_UNPACK'b'BUILD_SET_UNPACK'u'BUILD_SET_UNPACK'b'SETUP_ASYNC_WITH'u'SETUP_ASYNC_WITH'b'BUILD_CONST_KEY_MAP'u'BUILD_CONST_KEY_MAP'b'BUILD_STRING'u'BUILD_STRING'b'BUILD_TUPLE_UNPACK_WITH_CALL'u'BUILD_TUPLE_UNPACK_WITH_CALL'b'LOAD_METHOD'u'LOAD_METHOD'b'CALL_METHOD'u'CALL_METHOD'b'CALL_FINALLY'u'CALL_FINALLY'b'POP_FINALLY'u'POP_FINALLY'u'opcode' +Operator Interface + +This module exports a set of functions corresponding to the intrinsic +operators of Python. For example, operator.add(x, y) is equivalent +to the expression x+y. The function names are those used for special +methods; variants without leading and trailing '__' are also provided +for convenience. + +This is the pure Python implementation of the module. +_absSame as a < b.Same as a <= b.Same as a == b.Same as a != b.Same as a >= b.Same as a > b.Same as not a.Return True if a is true, False otherwise.Same as a is b.Same as a is not b.Same as abs(a).Same as a + b.Same as a & b.Same as a // b.Same as a.__index__().Same as ~a.Same as a << b.Same as a % b.Same as a * b.Same as a @ b.Same as -a.Same as a | b.Same as +a.Same as a ** b.Same as a >> b.Same as a - b.Same as a / b.Same as a ^ b.Same as a + b, for a and b sequences.'%s' object can't be concatenatedSame as b in a (note reversed operands).Return the number of times b occurs in a.Same as del a[b].Same as a[b].Return the first index of b in a.sequence.index(x): x not in sequenceSame as a[b] = c. + Return an estimate of the number of items in obj. + This is useful for presizing containers when building from an iterable. + + If the object supports len(), the result will be exact. Otherwise, it may + over- or under-estimate by an arbitrary amount. The result will be an + integer >= 0. + '%s' object cannot be interpreted as an integerhint__length_hint__ must be integer, not %s__length_hint__() should return >= 0 + Return a callable object that fetches the given attribute(s) from its operand. + After f = attrgetter('name'), the call f(r) returns r.name. + After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). + After h = attrgetter('name.first', 'name.last'), the call h(r) returns + (r.name.first, r.name.last). + _attrsattribute name must be a stringgetters + Return a callable object that fetches the given item(s) from its operand. + After f = itemgetter(2), the call f(r) returns r[2]. + After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) + _items + Return a callable object that calls the given method on its operand. + After f = methodcaller('name'), the call f(r) returns r.name(). + After g = methodcaller('name', 'date', foo=1), the call g(r) returns + r.name('date', foo=1). + _kwargsmethod name must be a stringSame as a += b.Same as a &= b.Same as a += b, for a and b sequences.Same as a //= b.Same as a <<= b.Same as a %= b.Same as a *= b.Same as a @= b.Same as a |= b.Same as a **= b.Same as a >>= b.Same as a -= b.Same as a /= b.Same as a ^= b.__not____inv____concat____iconcat__# Comparison Operations *******************************************************## Logical Operations **********************************************************## Mathematical/Bitwise Operations *********************************************## Sequence Operations *********************************************************## Generalized Lookup Objects **************************************************## In-place Operations *********************************************************## All of these "__func__ = func" assignments have to happen after importing# from _operator to make sure they're set to the right functionb' +Operator Interface + +This module exports a set of functions corresponding to the intrinsic +operators of Python. For example, operator.add(x, y) is equivalent +to the expression x+y. The function names are those used for special +methods; variants without leading and trailing '__' are also provided +for convenience. + +This is the pure Python implementation of the module. +'u' +Operator Interface + +This module exports a set of functions corresponding to the intrinsic +operators of Python. For example, operator.add(x, y) is equivalent +to the expression x+y. The function names are those used for special +methods; variants without leading and trailing '__' are also provided +for convenience. + +This is the pure Python implementation of the module. +'b'abs'u'abs'b'and_'u'and_'b'attrgetter'u'attrgetter'b'concat'u'concat'b'contains'u'contains'b'countOf'u'countOf'b'delitem'u'delitem'b'eq'u'eq'b'floordiv'u'floordiv'b'getitem'u'getitem'b'iadd'u'iadd'b'iand'u'iand'b'iconcat'u'iconcat'b'ifloordiv'u'ifloordiv'b'ilshift'u'ilshift'b'imatmul'u'imatmul'b'imod'u'imod'b'imul'u'imul'b'indexOf'u'indexOf'b'inv'u'inv'b'invert'u'invert'b'ior'u'ior'b'ipow'u'ipow'b'irshift'u'irshift'b'is_'u'is_'b'is_not'u'is_not'b'isub'u'isub'b'itemgetter'u'itemgetter'b'itruediv'u'itruediv'b'ixor'u'ixor'b'length_hint'u'length_hint'b'lshift'u'lshift'b'matmul'u'matmul'b'methodcaller'u'methodcaller'b'mod'u'mod'b'mul'u'mul'b'neg'u'neg'b'not_'u'not_'b'or_'u'or_'b'pos'u'pos'b'pow'u'pow'b'rshift'u'rshift'b'setitem'u'setitem'b'truediv'u'truediv'b'truth'u'truth'b'xor'u'xor'b'Same as a < b.'u'Same as a < b.'b'Same as a <= b.'u'Same as a <= b.'b'Same as a == b.'u'Same as a == b.'b'Same as a != b.'u'Same as a != b.'b'Same as a >= b.'u'Same as a >= b.'b'Same as a > b.'u'Same as a > b.'b'Same as not a.'u'Same as not a.'b'Return True if a is true, False otherwise.'u'Return True if a is true, False otherwise.'b'Same as a is b.'u'Same as a is b.'b'Same as a is not b.'u'Same as a is not b.'b'Same as abs(a).'u'Same as abs(a).'b'Same as a + b.'u'Same as a + b.'b'Same as a & b.'u'Same as a & b.'b'Same as a // b.'u'Same as a // b.'b'Same as a.__index__().'u'Same as a.__index__().'b'Same as ~a.'u'Same as ~a.'b'Same as a << b.'u'Same as a << b.'b'Same as a % b.'u'Same as a % b.'b'Same as a * b.'u'Same as a * b.'b'Same as a @ b.'u'Same as a @ b.'b'Same as -a.'u'Same as -a.'b'Same as a | b.'u'Same as a | b.'b'Same as +a.'u'Same as +a.'b'Same as a ** b.'u'Same as a ** b.'b'Same as a >> b.'u'Same as a >> b.'b'Same as a - b.'u'Same as a - b.'b'Same as a / b.'u'Same as a / b.'b'Same as a ^ b.'u'Same as a ^ b.'b'Same as a + b, for a and b sequences.'u'Same as a + b, for a and b sequences.'b'__getitem__'u'__getitem__'b''%s' object can't be concatenated'u''%s' object can't be concatenated'b'Same as b in a (note reversed operands).'u'Same as b in a (note reversed operands).'b'Return the number of times b occurs in a.'u'Return the number of times b occurs in a.'b'Same as del a[b].'u'Same as del a[b].'b'Same as a[b].'u'Same as a[b].'b'Return the first index of b in a.'u'Return the first index of b in a.'b'sequence.index(x): x not in sequence'u'sequence.index(x): x not in sequence'b'Same as a[b] = c.'u'Same as a[b] = c.'b' + Return an estimate of the number of items in obj. + This is useful for presizing containers when building from an iterable. + + If the object supports len(), the result will be exact. Otherwise, it may + over- or under-estimate by an arbitrary amount. The result will be an + integer >= 0. + 'u' + Return an estimate of the number of items in obj. + This is useful for presizing containers when building from an iterable. + + If the object supports len(), the result will be exact. Otherwise, it may + over- or under-estimate by an arbitrary amount. The result will be an + integer >= 0. + 'b''%s' object cannot be interpreted as an integer'u''%s' object cannot be interpreted as an integer'b'__length_hint__ must be integer, not %s'u'__length_hint__ must be integer, not %s'b'__length_hint__() should return >= 0'u'__length_hint__() should return >= 0'b' + Return a callable object that fetches the given attribute(s) from its operand. + After f = attrgetter('name'), the call f(r) returns r.name. + After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). + After h = attrgetter('name.first', 'name.last'), the call h(r) returns + (r.name.first, r.name.last). + 'u' + Return a callable object that fetches the given attribute(s) from its operand. + After f = attrgetter('name'), the call f(r) returns r.name. + After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date). + After h = attrgetter('name.first', 'name.last'), the call h(r) returns + (r.name.first, r.name.last). + 'b'_attrs'u'_attrs'b'_call'u'_call'b'attribute name must be a string'u'attribute name must be a string'b' + Return a callable object that fetches the given item(s) from its operand. + After f = itemgetter(2), the call f(r) returns r[2]. + After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) + 'u' + Return a callable object that fetches the given item(s) from its operand. + After f = itemgetter(2), the call f(r) returns r[2]. + After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]) + 'b'_items'u'_items'b' + Return a callable object that calls the given method on its operand. + After f = methodcaller('name'), the call f(r) returns r.name(). + After g = methodcaller('name', 'date', foo=1), the call g(r) returns + r.name('date', foo=1). + 'u' + Return a callable object that calls the given method on its operand. + After f = methodcaller('name'), the call f(r) returns r.name(). + After g = methodcaller('name', 'date', foo=1), the call g(r) returns + r.name('date', foo=1). + 'b'_kwargs'u'_kwargs'b'method name must be a string'u'method name must be a string'b'Same as a += b.'u'Same as a += b.'b'Same as a &= b.'u'Same as a &= b.'b'Same as a += b, for a and b sequences.'u'Same as a += b, for a and b sequences.'b'Same as a //= b.'u'Same as a //= b.'b'Same as a <<= b.'u'Same as a <<= b.'b'Same as a %= b.'u'Same as a %= b.'b'Same as a *= b.'u'Same as a *= b.'b'Same as a @= b.'u'Same as a @= b.'b'Same as a |= b.'u'Same as a |= b.'b'Same as a **= b.'u'Same as a **= b.'b'Same as a >>= b.'u'Same as a >>= b.'b'Same as a -= b.'u'Same as a -= b.'b'Same as a /= b.'u'Same as a /= b.'b'Same as a ^= b.'u'Same as a ^= b.'u'operator'A powerful, extensible, and easy-to-use option parser. + +By Greg Ward + +Originally distributed as Optik. + +For support, use the optik-users@lists.sourceforge.net mailing list +(http://lists.sourceforge.net/lists/listinfo/optik-users). + +Simple usage example: + + from optparse import OptionParser + + parser = OptionParser() + parser.add_option("-f", "--file", dest="filename", + help="write report to FILE", metavar="FILE") + parser.add_option("-q", "--quiet", + action="store_false", dest="verbose", default=True, + help="don't print status messages to stdout") + + (options, args) = parser.parse_args() +1.5.3Optionmake_optionSUPPRESS_HELPSUPPRESS_USAGEValuesOptionContainerOptionGroupOptionParserIndentedHelpFormatterTitledHelpFormatterOptParseErrorOptionErrorOptionConflictErrorOptionValueErrorBadOptionErrorcheck_choice +Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved. +Copyright (c) 2002-2006 Python Software Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +__copyright__<%s at 0x%x: %s>singular + Raised if an Option instance is created with invalid or + inconsistent arguments. + option_idoption %s: %s + Raised if conflicting options are added to an OptionParser. + + Raised if an invalid option value is encountered on the command + line. + + Raised if an invalid option is seen on the command line. + opt_strno such option: %sAmbiguousOptionError + Raised if an ambiguous option is seen on the command line. + ambiguous option: %s (%s?) + Abstract base class for formatting option help. OptionParser + instances should use one of the HelpFormatter subclasses for + formatting help; by default IndentedHelpFormatter is used. + + Instance attributes: + parser : OptionParser + the controlling OptionParser instance + indent_increment : int + the number of columns to indent per nesting level + max_help_position : int + the maximum starting column for option help text + help_position : int + the calculated starting column for option help text; + initially the same as the maximum + width : int + total number of columns for output (pass None to constructor for + this value to be taken from the $COLUMNS environment variable) + level : int + current indentation level + current_indent : int + current indentation level (in columns) + help_width : int + number of columns available for option help text (calculated) + default_tag : str + text to replace with each option's default value, "%default" + by default. Set to false value to disable default value expansion. + option_strings : { Option : str } + maps Option instances to the snippet of help text explaining + the syntax of that option, e.g. "-h, --help" or + "-fFILE, --file=FILE" + _short_opt_fmt : str + format string controlling how short options with values are + printed in help text. Must be either "%s%s" ("-fFILE") or + "%s %s" ("-f FILE"), because those are the two syntaxes that + Optik supports. + _long_opt_fmt : str + similar but for long options; must be either "%s %s" ("--file FILE") + or "%s=%s" ("--file=FILE"). + NO_DEFAULT_VALUEshort_firstCOLUMNS%defaultdefault_tag_short_opt_fmt_long_opt_fmtset_parserset_short_opt_delimiterdeliminvalid metavar delimiter for short options: %rset_long_opt_delimiterinvalid metavar delimiter for long options: %rdedentsubclasses must implementformat_heading + Format a paragraph of free-form text for inclusion in the + help output at the current indentation level. + format_descriptionformat_epilogexpand_defaultdefault_valueNO_DEFAULTformat_optionstore_option_stringsmax_lenoption_listformat_option_stringsoption_groupsReturn a comma-separated list of option strings & metavariables.takes_valuesopt_short_optslopt_long_optsFormat help with indented section bodies. + Usage: %s +Format help with underlined section headers. + %s %s +Usage%s +%s +=-_parse_num0b_parse_intintegerfloating-point_builtin_cvtcheck_builtincvtoption %s: invalid %s value: %roption %s: invalid choice: %r (choose from %s)DEFAULT + Instance attributes: + _short_opts : [string] + _long_opts : [string] + + action : string + type : string + dest : string + default : any + nargs : int + const : any + choices : [string] + callback : function + callback_args : (any*) + callback_kwargs : { string : any } + help : string + metavar : string + callback_argscallback_kwargsATTRSACTIONSSTORE_ACTIONSTYPED_ACTIONSALWAYS_TYPED_ACTIONSCONST_ACTIONSTYPESTYPE_CHECKERCHECK_METHODS_check_opt_strings_set_opt_strings_set_attrsat least one option string must be suppliedinvalid option string %r: must be at least two characters long"invalid option string %r: ""must be at least two characters long"invalid short option string %r: must be of the form -x, (x any non-dash char)"invalid short option string %r: ""must be of the form -x, (x any non-dash char)"invalid long option string %r: must start with --, followed by non-dash"invalid long option string %r: ""must start with --, followed by non-dash"invalid keyword arguments: %s_check_actioninvalid action: %r_check_typeinvalid option type: %rmust not supply a type for action %r_check_choicemust supply a list of choices for type 'choice'choices must be a list of strings ('%s' supplied)must not supply choices for type %r_check_dest_check_const'const' must not be supplied for action %r_check_nargs'nargs' must not be supplied for action %rcallback not callable: %rcallback_args, if supplied, must be a tuple: not %rcallback_kwargs, if supplied, must be a dict: not %rcallback supplied (%r) for non-callback optioncallback_args supplied for non-callback optioncallback_kwargs supplied for non-callback optionget_opt_stringcheck_valueconvert_valueensure_valueprint_versionunknown action %r_update_careful + Update the option values from an arbitrary dictionary, but only + use keys from dict that already have a corresponding attribute + in self. Any keys in dict without a corresponding attribute + are silently ignored. + dval_update_loose + Update the option values from an arbitrary dictionary, + using all keys from the dictionary regardless of whether + they have a corresponding attribute in self or not. + _updatecarefullooseinvalid update mode: %rread_moduleread_file + Abstract base class. + + Class attributes: + standard_option_list : [Option] + list of standard options that will be accepted by all instances + of this parser class (intended to be overridden by subclasses). + + Instance attributes: + option_list : [Option] + the list of Option objects contained by this OptionContainer + _short_opt : { string : Option } + dictionary mapping short option strings, eg. "-f" or "-X", + to the Option instances that implement them. If an Option + has multiple short option strings, it will appear in this + dictionary multiple times. [1] + _long_opt : { string : Option } + dictionary mapping long option strings, eg. "--file" or + "--exclude", to the Option instances that implement them. + Again, a given Option can occur multiple times in this + dictionary. [1] + defaults : { string : any } + dictionary mapping option destination names to default + values for each destination [1] + + [1] These mappings are common to (shared by) all components of the + controlling OptionParser, where they are initially created. + + option_class_create_option_listset_conflict_handlerset_description_create_option_mappings_short_opt_long_opt_share_option_mappingsresolveinvalid conflict_resolution value %rget_descriptionsee OptionParser.destroy().conflict_optsconflicting option string(s): %sc_optionadd_option(Option) + add_option(opt_str, ..., kwarg=val, ...) + not an Option instance: %rinvalid argumentsadd_optionsget_optionremove_optionno such option %rformat_option_helpset_title + Class attributes: + standard_option_list : [Option] + list of standard options that will be accepted by all instances + of this parser class (intended to be overridden by subclasses). + + Instance attributes: + usage : string + a usage string for your program. Before it is displayed + to the user, "%prog" will be expanded to the name of + your program (self.prog or os.path.basename(sys.argv[0])). + prog : string + the name of the current program (to override + os.path.basename(sys.argv[0])). + description : string + A paragraph of text giving a brief overview of your program. + optparse reformats this paragraph to fit the current terminal + width and prints it when the user requests help (after usage, + but before the list of options). + epilog : string + paragraph of help text to print after option help + + option_groups : [OptionGroup] + list of option groups in this parser (option groups are + irrelevant for parsing the command-line, but very useful + for generating help) + + allow_interspersed_args : bool = true + if true, positional arguments may be interspersed with options. + Assuming -a and -b each take a single argument, the command-line + -ablah foo bar -bboo baz + will be interpreted the same as + -ablah -bboo -- foo bar baz + If this flag were false, that command line would be interpreted as + -ablah -- foo bar -bboo baz + -- ie. we stop processing options as soon as we see the first + non-option argument. (This is the tradition followed by + Python's getopt module, Perl's Getopt::Std, and other argument- + parsing libraries, but it is generally annoying to users.) + + process_default_values : bool = true + if true, option default values are processed similarly to option + values from the command line: that is, they are passed to the + type-checking function for the option's type (as long as the + default value is a string). (This really only matters if you + have defined custom types; see SF bug #955889.) Set it to false + to restore the behaviour of Optik 1.4.1 and earlier. + + rargs : [string] + the argument list currently being parsed. Only set when + parse_args() is active, and continually trimmed down as + we consume arguments. Mainly there for the benefit of + callback options. + largs : [string] + the list of leftover arguments that we have skipped while + parsing options. If allow_interspersed_args is false, this + list is always empty. + values : Values + the set of option values currently being accumulated. Only + set when parse_args() is active. Also mainly for callbacks. + + Because of the 'rargs', 'largs', and 'values' attributes, + OptionParser is not thread-safe. If, for some perverse reason, you + need to parse command-line arguments simultaneously in different + threads, use different OptionParser instances. + + standard_option_listadd_help_optionset_usageallow_interspersed_argsprocess_default_values_populate_option_list_init_parsing_state + Declare that you are done with this OptionParser. This cleans up + reference cycles so the OptionParser (and all objects referenced by + it) can be garbage-collected promptly. After calling destroy(), the + OptionParser is unusable. + _add_help_option_add_version_option--versionrargslargs%prog [options]enable_interspersed_argsSet parsing to not stop on the first non-option, allowing + interspersing switches with command arguments. This is the + default behavior. See also disable_interspersed_args() and the + class documentation description of the attribute + allow_interspersed_args.disable_interspersed_argsSet parsing to stop on the first non-option. Use this if + you have a command processor which runs another command that + has options of its own and you want to make sure these options + don't get confused. + set_process_default_valuesset_default_get_all_optionsget_default_valuesadd_option_groupnot an OptionGroup instance: %rinvalid OptionGroup (wrong parser)get_option_group + parse_args(args : [string] = sys.argv[1:], + values : Values = None) + -> (values : Values, args : [string]) + + Parse the command-line options found in 'args' (default: + sys.argv[1:]). Any errors result in a call to 'error()', which + by default prints the usage message to stderr and calls + sys.exit() with an error message. On success returns a pair + (values, args) where 'values' is a Values instance (with all + your option values) and 'args' is the list of arguments left + over after parsing options. + _process_argscheck_values + check_values(values : Values, args : [string]) + -> (values : Values, args : [string]) + + Check that the supplied option values and leftover arguments are + valid. Returns the option values and leftover arguments + (possibly adjusted, possibly completely new -- whatever you + like). Default implementation just returns the passed-in + values; subclasses may override as desired. + _process_args(largs : [string], + rargs : [string], + values : Values) + + Process command-line arguments and populate 'values', consuming + options and arguments from 'rargs'. If 'allow_interspersed_args' is + false, stop at the first non-option argument. If true, accumulate any + interspersed non-option arguments in 'largs'. + _process_long_opt_process_short_opts_match_long_opt_match_long_opt(opt : string) -> string + + Determine which long option string 'opt' matches, ie. which one + it is an unambiguous abbreviation for. Raises BadOptionError if + 'opt' doesn't unambiguously match any long option string. + _match_abbrevnext_arghad_explicit_value%(option)s option requires %(number)d argument%(option)s option requires %(number)d arguments%s option does not take a valueget_prog_nameexpand_prog_name%progerror(msg : string) + + Print a usage message incorporating 'msg' to stderr and exit. + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + %s: error: %s +get_usageprint_usage(file : file = stdout) + + Print the usage message for the current program (self.usage) to + 'file' (default stdout). Any occurrence of the string "%prog" in + self.usage is replaced with the name of the current program + (basename of sys.argv[0]). Does nothing if self.usage is empty + or not defined. + print_version(file : file = stdout) + + Print the version message for this program (self.version) to + 'file' (default stdout). As with print_usage(), any occurrence + of "%prog" in self.version is replaced by the current program's + name. Does nothing if self.version is empty or undefined. + Optionsprint_help(file : file = stdout) + + Print an extended help message, listing all options and any + help text provided with them, to 'file' (default stdout). + wordmap_match_abbrev(s : string, wordmap : {string : Option}) -> string + + Return the string key in 'wordmap' for which 's' is an unambiguous + abbreviation. If 's' is found to be ambiguous or doesn't match any of + 'words', raise BadOptionError. + # This file was generated from:# Id: option_parser.py 527 2006-07-23 15:21:30Z greg# Id: option.py 522 2006-06-11 16:22:03Z gward# Id: help.py 527 2006-07-23 15:21:30Z greg# Id: errors.py 509 2006-04-20 00:58:24Z gward# computed later# The help for each option consists of two parts:# * the opt strings and metavars# eg. ("-x", or "-fFILENAME, --file=FILENAME")# * the user-supplied help string# eg. ("turn on expert mode", "read data from FILENAME")# If possible, we write both of these on the same line:# -x turn on expert mode# But if the opt string list is too long, we put the help# string on a second line, indented to the same column it would# start in if it fit on the first line.# -fFILENAME, --file=FILENAME# read data from FILENAME# start help on same line as opts# hexadecimal# binary# have to remove "0b" prefix# octal# decimal# Not supplying a default is different from a default of None,# so we need an explicit "not supplied" value.# The list of instance attributes that may be set through# keyword args to the constructor.# The set of actions allowed by option parsers. Explicitly listed# here so the constructor can validate its arguments.# The set of actions that involve storing a value somewhere;# also listed just for constructor argument validation. (If# the action is one of these, there must be a destination.)# The set of actions for which it makes sense to supply a value# type, ie. which may consume an argument from the command line.# The set of actions which *require* a value type, ie. that# always consume an argument from the command line.# The set of actions which take a 'const' attribute.# The set of known types for option parsers. Again, listed here for# constructor argument validation.# Dictionary of argument checking functions, which convert and# validate option arguments according to the option type.# Signature of checking functions is:# check(option : Option, opt : string, value : string) -> any# where# option is the Option instance calling the checker# opt is the actual option seen on the command-line# (eg. "-a", "--file")# value is the option argument seen on the command-line# The return value should be in the appropriate Python type# for option.type -- eg. an integer if option.type == "int".# If no checker is defined for a type, arguments will be# unchecked and remain strings.# CHECK_METHODS is a list of unbound method objects; they are called# by the constructor, in order, after all attributes are# initialized. The list is created and filled in later, after all# the methods are actually defined. (I just put it here because I# like to define and document all class attributes in the same# place.) Subclasses that add another _check_*() method should# define their own CHECK_METHODS list that adds their check method# to those from this class.# -- Constructor/initialization methods ----------------------------# Set _short_opts, _long_opts attrs from 'opts' tuple.# Have to be set now, in case no option strings are supplied.# Set all other attrs (action, type, etc.) from 'attrs' dict# Check all the attributes we just set. There are lots of# complicated interdependencies, but luckily they can be farmed# out to the _check_*() methods listed in CHECK_METHODS -- which# could be handy for subclasses! The one thing these all share# is that they raise OptionError if they discover a problem.# Filter out None because early versions of Optik had exactly# one short option and one long option, either of which# could be None.# -- Constructor validation methods --------------------------------# The "choices" attribute implies "choice" type.# No type given? "string" is the most sensible default.# Allow type objects or builtin type conversion functions# (int, str, etc.) as an alternative to their names.# No destination given, and we need one for this action. The# self.type check is for callbacks that take a value.# Glean a destination from the first long option string,# or from the first short option string if no long options.# eg. "--foo-bar" -> "foo_bar"# -- Processing methods --------------------------------------------# First, convert the value(s) to the right type. Howl if any# value(s) are bogus.# And then take whatever action is expected of us.# This is a separate method to make life easier for# subclasses to add new actions.# class Option# Initialize the option list and related data structures.# This method must be provided by subclasses, and it must# initialize at least the following instance attributes:# option_list, _short_opt, _long_opt, defaults.# For use by OptionParser constructor -- create the main# option mappings used by this OptionParser and all# OptionGroups that it owns.# single letter -> Option instance# long option -> Option instance# maps option dest -> default value# For use by OptionGroup constructor -- use shared option# mappings from the OptionParser that owns this OptionGroup.# -- Option-adding methods -----------------------------------------# option has a dest, we need a default# -- Option query/removal methods ----------------------------------# -- Help-formatting methods ---------------------------------------# Populate the option list; initial sources are the# standard_option_list class attribute, the 'option_list'# argument, and (if applicable) the _add_version_option() and# _add_help_option() methods.# -- Private methods -----------------------------------------------# (used by our or OptionContainer's constructor)# These are set in parse_args() for the convenience of callbacks.# -- Simple modifier methods ---------------------------------------# For backwards compatibility with Optik 1.3 and earlier.# Old, pre-Optik 1.5 behaviour.# -- OptionGroup methods -------------------------------------------# XXX lots of overlap with OptionContainer.add_option()# -- Option-parsing methods ----------------------------------------# don't modify caller's list# Store the halves of the argument list as attributes for the# convenience of callbacks:# rargs# the rest of the command-line (the "r" stands for# "remaining" or "right-hand")# largs# the leftover arguments -- ie. what's left after removing# options and their arguments (the "l" stands for "leftover"# or "left-hand")# We handle bare "--" explicitly, and bare "-" is handled by the# standard arg handler since the short arg case ensures that the# len of the opt string is greater than 1.# process a single long option (possibly with value(s))# process a cluster of short options (possibly with# value(s) for the last one only)# stop now, leave this arg in rargs# Say this is the original argument list:# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]# ^# (we are about to process arg(i)).# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of# [arg0, ..., arg(i-1)] (any options and their arguments will have# been removed from largs).# The while loop will usually consume 1 or more arguments per pass.# If it consumes 1 (eg. arg is an option that takes no arguments),# then after _process_arg() is done the situation is:# largs = subset of [arg0, ..., arg(i)]# rargs = [arg(i+1), ..., arg(N-1)]# If allow_interspersed_args is false, largs will always be# *empty* -- still a subset of [arg0, ..., arg(i-1)], but# not a very interesting subset!# Value explicitly attached to arg? Pretend it's the next# we have consumed a character# Any characters left in arg? Pretend they're the# next arg, and stop consuming characters of arg.# option doesn't take a value# -- Feedback methods ----------------------------------------------# Drop the last "\n", or the header if no options or option groups:# class OptionParser# Isolate all words with s as a prefix.# No exact match, so there had better be just one possibility.# More than one possible completion: ambiguous prefix.# Some day, there might be many Option classes. As of Optik 1.3, the# preferred way to instantiate Options is indirectly, via make_option(),# which will become a factory function when there are many Option# classes.b'A powerful, extensible, and easy-to-use option parser. + +By Greg Ward + +Originally distributed as Optik. + +For support, use the optik-users@lists.sourceforge.net mailing list +(http://lists.sourceforge.net/lists/listinfo/optik-users). + +Simple usage example: + + from optparse import OptionParser + + parser = OptionParser() + parser.add_option("-f", "--file", dest="filename", + help="write report to FILE", metavar="FILE") + parser.add_option("-q", "--quiet", + action="store_false", dest="verbose", default=True, + help="don't print status messages to stdout") + + (options, args) = parser.parse_args() +'u'A powerful, extensible, and easy-to-use option parser. + +By Greg Ward + +Originally distributed as Optik. + +For support, use the optik-users@lists.sourceforge.net mailing list +(http://lists.sourceforge.net/lists/listinfo/optik-users). + +Simple usage example: + + from optparse import OptionParser + + parser = OptionParser() + parser.add_option("-f", "--file", dest="filename", + help="write report to FILE", metavar="FILE") + parser.add_option("-q", "--quiet", + action="store_false", dest="verbose", default=True, + help="don't print status messages to stdout") + + (options, args) = parser.parse_args() +'b'1.5.3'u'1.5.3'b'Option'u'Option'b'make_option'u'make_option'b'SUPPRESS_HELP'u'SUPPRESS_HELP'b'SUPPRESS_USAGE'u'SUPPRESS_USAGE'b'Values'u'Values'b'OptionContainer'u'OptionContainer'b'OptionGroup'u'OptionGroup'b'OptionParser'u'OptionParser'b'IndentedHelpFormatter'u'IndentedHelpFormatter'b'TitledHelpFormatter'u'TitledHelpFormatter'b'OptParseError'u'OptParseError'b'OptionError'u'OptionError'b'OptionConflictError'u'OptionConflictError'b'OptionValueError'u'OptionValueError'b'BadOptionError'u'BadOptionError'b'check_choice'u'check_choice'b' +Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved. +Copyright (c) 2002-2006 Python Software Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +'u' +Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved. +Copyright (c) 2002-2006 Python Software Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +'b'<%s at 0x%x: %s>'u'<%s at 0x%x: %s>'b' + Raised if an Option instance is created with invalid or + inconsistent arguments. + 'u' + Raised if an Option instance is created with invalid or + inconsistent arguments. + 'b'option %s: %s'u'option %s: %s'b' + Raised if conflicting options are added to an OptionParser. + 'u' + Raised if conflicting options are added to an OptionParser. + 'b' + Raised if an invalid option value is encountered on the command + line. + 'u' + Raised if an invalid option value is encountered on the command + line. + 'b' + Raised if an invalid option is seen on the command line. + 'u' + Raised if an invalid option is seen on the command line. + 'b'no such option: %s'u'no such option: %s'b' + Raised if an ambiguous option is seen on the command line. + 'u' + Raised if an ambiguous option is seen on the command line. + 'b'ambiguous option: %s (%s?)'u'ambiguous option: %s (%s?)'b' + Abstract base class for formatting option help. OptionParser + instances should use one of the HelpFormatter subclasses for + formatting help; by default IndentedHelpFormatter is used. + + Instance attributes: + parser : OptionParser + the controlling OptionParser instance + indent_increment : int + the number of columns to indent per nesting level + max_help_position : int + the maximum starting column for option help text + help_position : int + the calculated starting column for option help text; + initially the same as the maximum + width : int + total number of columns for output (pass None to constructor for + this value to be taken from the $COLUMNS environment variable) + level : int + current indentation level + current_indent : int + current indentation level (in columns) + help_width : int + number of columns available for option help text (calculated) + default_tag : str + text to replace with each option's default value, "%default" + by default. Set to false value to disable default value expansion. + option_strings : { Option : str } + maps Option instances to the snippet of help text explaining + the syntax of that option, e.g. "-h, --help" or + "-fFILE, --file=FILE" + _short_opt_fmt : str + format string controlling how short options with values are + printed in help text. Must be either "%s%s" ("-fFILE") or + "%s %s" ("-f FILE"), because those are the two syntaxes that + Optik supports. + _long_opt_fmt : str + similar but for long options; must be either "%s %s" ("--file FILE") + or "%s=%s" ("--file=FILE"). + 'u' + Abstract base class for formatting option help. OptionParser + instances should use one of the HelpFormatter subclasses for + formatting help; by default IndentedHelpFormatter is used. + + Instance attributes: + parser : OptionParser + the controlling OptionParser instance + indent_increment : int + the number of columns to indent per nesting level + max_help_position : int + the maximum starting column for option help text + help_position : int + the calculated starting column for option help text; + initially the same as the maximum + width : int + total number of columns for output (pass None to constructor for + this value to be taken from the $COLUMNS environment variable) + level : int + current indentation level + current_indent : int + current indentation level (in columns) + help_width : int + number of columns available for option help text (calculated) + default_tag : str + text to replace with each option's default value, "%default" + by default. Set to false value to disable default value expansion. + option_strings : { Option : str } + maps Option instances to the snippet of help text explaining + the syntax of that option, e.g. "-h, --help" or + "-fFILE, --file=FILE" + _short_opt_fmt : str + format string controlling how short options with values are + printed in help text. Must be either "%s%s" ("-fFILE") or + "%s %s" ("-f FILE"), because those are the two syntaxes that + Optik supports. + _long_opt_fmt : str + similar but for long options; must be either "%s %s" ("--file FILE") + or "%s=%s" ("--file=FILE"). + 'b'COLUMNS'u'COLUMNS'b'%default'u'%default'b'invalid metavar delimiter for short options: %r'u'invalid metavar delimiter for short options: %r'b'invalid metavar delimiter for long options: %r'u'invalid metavar delimiter for long options: %r'b'subclasses must implement'u'subclasses must implement'b' + Format a paragraph of free-form text for inclusion in the + help output at the current indentation level. + 'u' + Format a paragraph of free-form text for inclusion in the + help output at the current indentation level. + 'b'Return a comma-separated list of option strings & metavariables.'u'Return a comma-separated list of option strings & metavariables.'b'Format help with indented section bodies. + 'u'Format help with indented section bodies. + 'b'Usage: %s +'u'Usage: %s +'b'Format help with underlined section headers. + 'u'Format help with underlined section headers. + 'b'%s %s +'u'%s %s +'b'Usage'u'Usage'b'%s +%s +'u'%s +%s +'b'=-'u'=-'b'0b'u'0b'b'integer'u'integer'b'floating-point'u'floating-point'b'complex'u'complex'b'option %s: invalid %s value: %r'u'option %s: invalid %s value: %r'b'option %s: invalid choice: %r (choose from %s)'u'option %s: invalid choice: %r (choose from %s)'b'NO'u'NO'b'DEFAULT'u'DEFAULT'b' + Instance attributes: + _short_opts : [string] + _long_opts : [string] + + action : string + type : string + dest : string + default : any + nargs : int + const : any + choices : [string] + callback : function + callback_args : (any*) + callback_kwargs : { string : any } + help : string + metavar : string + 'u' + Instance attributes: + _short_opts : [string] + _long_opts : [string] + + action : string + type : string + dest : string + default : any + nargs : int + const : any + choices : [string] + callback : function + callback_args : (any*) + callback_kwargs : { string : any } + help : string + metavar : string + 'b'callback_args'u'callback_args'b'callback_kwargs'u'callback_kwargs'b'choice'u'choice'b'at least one option string must be supplied'u'at least one option string must be supplied'b'invalid option string %r: must be at least two characters long'u'invalid option string %r: must be at least two characters long'b'invalid short option string %r: must be of the form -x, (x any non-dash char)'u'invalid short option string %r: must be of the form -x, (x any non-dash char)'b'invalid long option string %r: must start with --, followed by non-dash'u'invalid long option string %r: must start with --, followed by non-dash'b'invalid keyword arguments: %s'u'invalid keyword arguments: %s'b'invalid action: %r'u'invalid action: %r'b'invalid option type: %r'u'invalid option type: %r'b'must not supply a type for action %r'u'must not supply a type for action %r'b'must supply a list of choices for type 'choice''u'must supply a list of choices for type 'choice''b'choices must be a list of strings ('%s' supplied)'u'choices must be a list of strings ('%s' supplied)'b'must not supply choices for type %r'u'must not supply choices for type %r'b''const' must not be supplied for action %r'u''const' must not be supplied for action %r'b''nargs' must not be supplied for action %r'u''nargs' must not be supplied for action %r'b'callback not callable: %r'u'callback not callable: %r'b'callback_args, if supplied, must be a tuple: not %r'u'callback_args, if supplied, must be a tuple: not %r'b'callback_kwargs, if supplied, must be a dict: not %r'u'callback_kwargs, if supplied, must be a dict: not %r'b'callback supplied (%r) for non-callback option'u'callback supplied (%r) for non-callback option'b'callback_args supplied for non-callback option'u'callback_args supplied for non-callback option'b'callback_kwargs supplied for non-callback option'u'callback_kwargs supplied for non-callback option'b'unknown action %r'u'unknown action %r'b'USAGE'u'USAGE'b' + Update the option values from an arbitrary dictionary, but only + use keys from dict that already have a corresponding attribute + in self. Any keys in dict without a corresponding attribute + are silently ignored. + 'u' + Update the option values from an arbitrary dictionary, but only + use keys from dict that already have a corresponding attribute + in self. Any keys in dict without a corresponding attribute + are silently ignored. + 'b' + Update the option values from an arbitrary dictionary, + using all keys from the dictionary regardless of whether + they have a corresponding attribute in self or not. + 'u' + Update the option values from an arbitrary dictionary, + using all keys from the dictionary regardless of whether + they have a corresponding attribute in self or not. + 'b'careful'u'careful'b'loose'u'loose'b'invalid update mode: %r'u'invalid update mode: %r'b' + Abstract base class. + + Class attributes: + standard_option_list : [Option] + list of standard options that will be accepted by all instances + of this parser class (intended to be overridden by subclasses). + + Instance attributes: + option_list : [Option] + the list of Option objects contained by this OptionContainer + _short_opt : { string : Option } + dictionary mapping short option strings, eg. "-f" or "-X", + to the Option instances that implement them. If an Option + has multiple short option strings, it will appear in this + dictionary multiple times. [1] + _long_opt : { string : Option } + dictionary mapping long option strings, eg. "--file" or + "--exclude", to the Option instances that implement them. + Again, a given Option can occur multiple times in this + dictionary. [1] + defaults : { string : any } + dictionary mapping option destination names to default + values for each destination [1] + + [1] These mappings are common to (shared by) all components of the + controlling OptionParser, where they are initially created. + + 'u' + Abstract base class. + + Class attributes: + standard_option_list : [Option] + list of standard options that will be accepted by all instances + of this parser class (intended to be overridden by subclasses). + + Instance attributes: + option_list : [Option] + the list of Option objects contained by this OptionContainer + _short_opt : { string : Option } + dictionary mapping short option strings, eg. "-f" or "-X", + to the Option instances that implement them. If an Option + has multiple short option strings, it will appear in this + dictionary multiple times. [1] + _long_opt : { string : Option } + dictionary mapping long option strings, eg. "--file" or + "--exclude", to the Option instances that implement them. + Again, a given Option can occur multiple times in this + dictionary. [1] + defaults : { string : any } + dictionary mapping option destination names to default + values for each destination [1] + + [1] These mappings are common to (shared by) all components of the + controlling OptionParser, where they are initially created. + + 'b'resolve'u'resolve'b'invalid conflict_resolution value %r'u'invalid conflict_resolution value %r'b'see OptionParser.destroy().'u'see OptionParser.destroy().'b'conflicting option string(s): %s'u'conflicting option string(s): %s'b'add_option(Option) + add_option(opt_str, ..., kwarg=val, ...) + 'u'add_option(Option) + add_option(opt_str, ..., kwarg=val, ...) + 'b'not an Option instance: %r'u'not an Option instance: %r'b'invalid arguments'u'invalid arguments'b'no such option %r'u'no such option %r'b' + Class attributes: + standard_option_list : [Option] + list of standard options that will be accepted by all instances + of this parser class (intended to be overridden by subclasses). + + Instance attributes: + usage : string + a usage string for your program. Before it is displayed + to the user, "%prog" will be expanded to the name of + your program (self.prog or os.path.basename(sys.argv[0])). + prog : string + the name of the current program (to override + os.path.basename(sys.argv[0])). + description : string + A paragraph of text giving a brief overview of your program. + optparse reformats this paragraph to fit the current terminal + width and prints it when the user requests help (after usage, + but before the list of options). + epilog : string + paragraph of help text to print after option help + + option_groups : [OptionGroup] + list of option groups in this parser (option groups are + irrelevant for parsing the command-line, but very useful + for generating help) + + allow_interspersed_args : bool = true + if true, positional arguments may be interspersed with options. + Assuming -a and -b each take a single argument, the command-line + -ablah foo bar -bboo baz + will be interpreted the same as + -ablah -bboo -- foo bar baz + If this flag were false, that command line would be interpreted as + -ablah -- foo bar -bboo baz + -- ie. we stop processing options as soon as we see the first + non-option argument. (This is the tradition followed by + Python's getopt module, Perl's Getopt::Std, and other argument- + parsing libraries, but it is generally annoying to users.) + + process_default_values : bool = true + if true, option default values are processed similarly to option + values from the command line: that is, they are passed to the + type-checking function for the option's type (as long as the + default value is a string). (This really only matters if you + have defined custom types; see SF bug #955889.) Set it to false + to restore the behaviour of Optik 1.4.1 and earlier. + + rargs : [string] + the argument list currently being parsed. Only set when + parse_args() is active, and continually trimmed down as + we consume arguments. Mainly there for the benefit of + callback options. + largs : [string] + the list of leftover arguments that we have skipped while + parsing options. If allow_interspersed_args is false, this + list is always empty. + values : Values + the set of option values currently being accumulated. Only + set when parse_args() is active. Also mainly for callbacks. + + Because of the 'rargs', 'largs', and 'values' attributes, + OptionParser is not thread-safe. If, for some perverse reason, you + need to parse command-line arguments simultaneously in different + threads, use different OptionParser instances. + + 'u' + Class attributes: + standard_option_list : [Option] + list of standard options that will be accepted by all instances + of this parser class (intended to be overridden by subclasses). + + Instance attributes: + usage : string + a usage string for your program. Before it is displayed + to the user, "%prog" will be expanded to the name of + your program (self.prog or os.path.basename(sys.argv[0])). + prog : string + the name of the current program (to override + os.path.basename(sys.argv[0])). + description : string + A paragraph of text giving a brief overview of your program. + optparse reformats this paragraph to fit the current terminal + width and prints it when the user requests help (after usage, + but before the list of options). + epilog : string + paragraph of help text to print after option help + + option_groups : [OptionGroup] + list of option groups in this parser (option groups are + irrelevant for parsing the command-line, but very useful + for generating help) + + allow_interspersed_args : bool = true + if true, positional arguments may be interspersed with options. + Assuming -a and -b each take a single argument, the command-line + -ablah foo bar -bboo baz + will be interpreted the same as + -ablah -bboo -- foo bar baz + If this flag were false, that command line would be interpreted as + -ablah -- foo bar -bboo baz + -- ie. we stop processing options as soon as we see the first + non-option argument. (This is the tradition followed by + Python's getopt module, Perl's Getopt::Std, and other argument- + parsing libraries, but it is generally annoying to users.) + + process_default_values : bool = true + if true, option default values are processed similarly to option + values from the command line: that is, they are passed to the + type-checking function for the option's type (as long as the + default value is a string). (This really only matters if you + have defined custom types; see SF bug #955889.) Set it to false + to restore the behaviour of Optik 1.4.1 and earlier. + + rargs : [string] + the argument list currently being parsed. Only set when + parse_args() is active, and continually trimmed down as + we consume arguments. Mainly there for the benefit of + callback options. + largs : [string] + the list of leftover arguments that we have skipped while + parsing options. If allow_interspersed_args is false, this + list is always empty. + values : Values + the set of option values currently being accumulated. Only + set when parse_args() is active. Also mainly for callbacks. + + Because of the 'rargs', 'largs', and 'values' attributes, + OptionParser is not thread-safe. If, for some perverse reason, you + need to parse command-line arguments simultaneously in different + threads, use different OptionParser instances. + + 'b' + Declare that you are done with this OptionParser. This cleans up + reference cycles so the OptionParser (and all objects referenced by + it) can be garbage-collected promptly. After calling destroy(), the + OptionParser is unusable. + 'u' + Declare that you are done with this OptionParser. This cleans up + reference cycles so the OptionParser (and all objects referenced by + it) can be garbage-collected promptly. After calling destroy(), the + OptionParser is unusable. + 'b'--version'u'--version'b'%prog [options]'u'%prog [options]'b'Set parsing to not stop on the first non-option, allowing + interspersing switches with command arguments. This is the + default behavior. See also disable_interspersed_args() and the + class documentation description of the attribute + allow_interspersed_args.'u'Set parsing to not stop on the first non-option, allowing + interspersing switches with command arguments. This is the + default behavior. See also disable_interspersed_args() and the + class documentation description of the attribute + allow_interspersed_args.'b'Set parsing to stop on the first non-option. Use this if + you have a command processor which runs another command that + has options of its own and you want to make sure these options + don't get confused. + 'u'Set parsing to stop on the first non-option. Use this if + you have a command processor which runs another command that + has options of its own and you want to make sure these options + don't get confused. + 'b'not an OptionGroup instance: %r'u'not an OptionGroup instance: %r'b'invalid OptionGroup (wrong parser)'u'invalid OptionGroup (wrong parser)'b' + parse_args(args : [string] = sys.argv[1:], + values : Values = None) + -> (values : Values, args : [string]) + + Parse the command-line options found in 'args' (default: + sys.argv[1:]). Any errors result in a call to 'error()', which + by default prints the usage message to stderr and calls + sys.exit() with an error message. On success returns a pair + (values, args) where 'values' is a Values instance (with all + your option values) and 'args' is the list of arguments left + over after parsing options. + 'u' + parse_args(args : [string] = sys.argv[1:], + values : Values = None) + -> (values : Values, args : [string]) + + Parse the command-line options found in 'args' (default: + sys.argv[1:]). Any errors result in a call to 'error()', which + by default prints the usage message to stderr and calls + sys.exit() with an error message. On success returns a pair + (values, args) where 'values' is a Values instance (with all + your option values) and 'args' is the list of arguments left + over after parsing options. + 'b' + check_values(values : Values, args : [string]) + -> (values : Values, args : [string]) + + Check that the supplied option values and leftover arguments are + valid. Returns the option values and leftover arguments + (possibly adjusted, possibly completely new -- whatever you + like). Default implementation just returns the passed-in + values; subclasses may override as desired. + 'u' + check_values(values : Values, args : [string]) + -> (values : Values, args : [string]) + + Check that the supplied option values and leftover arguments are + valid. Returns the option values and leftover arguments + (possibly adjusted, possibly completely new -- whatever you + like). Default implementation just returns the passed-in + values; subclasses may override as desired. + 'b'_process_args(largs : [string], + rargs : [string], + values : Values) + + Process command-line arguments and populate 'values', consuming + options and arguments from 'rargs'. If 'allow_interspersed_args' is + false, stop at the first non-option argument. If true, accumulate any + interspersed non-option arguments in 'largs'. + 'u'_process_args(largs : [string], + rargs : [string], + values : Values) + + Process command-line arguments and populate 'values', consuming + options and arguments from 'rargs'. If 'allow_interspersed_args' is + false, stop at the first non-option argument. If true, accumulate any + interspersed non-option arguments in 'largs'. + 'b'_match_long_opt(opt : string) -> string + + Determine which long option string 'opt' matches, ie. which one + it is an unambiguous abbreviation for. Raises BadOptionError if + 'opt' doesn't unambiguously match any long option string. + 'u'_match_long_opt(opt : string) -> string + + Determine which long option string 'opt' matches, ie. which one + it is an unambiguous abbreviation for. Raises BadOptionError if + 'opt' doesn't unambiguously match any long option string. + 'b'%(option)s option requires %(number)d argument'u'%(option)s option requires %(number)d argument'b'%(option)s option requires %(number)d arguments'u'%(option)s option requires %(number)d arguments'b'%s option does not take a value'u'%s option does not take a value'b'%prog'u'%prog'b'error(msg : string) + + Print a usage message incorporating 'msg' to stderr and exit. + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + 'u'error(msg : string) + + Print a usage message incorporating 'msg' to stderr and exit. + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + 'b'%s: error: %s +'u'%s: error: %s +'b'print_usage(file : file = stdout) + + Print the usage message for the current program (self.usage) to + 'file' (default stdout). Any occurrence of the string "%prog" in + self.usage is replaced with the name of the current program + (basename of sys.argv[0]). Does nothing if self.usage is empty + or not defined. + 'u'print_usage(file : file = stdout) + + Print the usage message for the current program (self.usage) to + 'file' (default stdout). Any occurrence of the string "%prog" in + self.usage is replaced with the name of the current program + (basename of sys.argv[0]). Does nothing if self.usage is empty + or not defined. + 'b'print_version(file : file = stdout) + + Print the version message for this program (self.version) to + 'file' (default stdout). As with print_usage(), any occurrence + of "%prog" in self.version is replaced by the current program's + name. Does nothing if self.version is empty or undefined. + 'u'print_version(file : file = stdout) + + Print the version message for this program (self.version) to + 'file' (default stdout). As with print_usage(), any occurrence + of "%prog" in self.version is replaced by the current program's + name. Does nothing if self.version is empty or undefined. + 'b'Options'u'Options'b'print_help(file : file = stdout) + + Print an extended help message, listing all options and any + help text provided with them, to 'file' (default stdout). + 'u'print_help(file : file = stdout) + + Print an extended help message, listing all options and any + help text provided with them, to 'file' (default stdout). + 'b'_match_abbrev(s : string, wordmap : {string : Option}) -> string + + Return the string key in 'wordmap' for which 's' is an unambiguous + abbreviation. If 's' is found to be ambiguous or doesn't match any of + 'words', raise BadOptionError. + 'u'_match_abbrev(s : string, wordmap : {string : Option}) -> string + + Return the string key in 'wordmap' for which 's' is an unambiguous + abbreviation. If 's' is found to be ambiguous or doesn't match any of + 'words', raise BadOptionError. + 'u'optparse'OS routines for NT or Posix depending on what system we're on. + +This exports: + - all functions from posix or nt, e.g. unlink, stat, etc. + - os.path is either posixpath or ntpath + - os.name is either 'posix' or 'nt' + - os.curdir is a string representing the current directory (always '.') + - os.pardir is a string representing the parent directory (always '..') + - os.sep is the (or a most common) pathname separator ('/' or '\\') + - os.extsep is the extension separator (always '.') + - os.altsep is the alternate pathname separator (None or '/') + - os.pathsep is the component separator used in $PATH etc + - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') + - os.defpath is the default search path for executables + - os.devnull is the file path of the null device ('/dev/null', etc.) + +Programs that import and use 'os' stand a better chance of being +portable between different platforms. Of course, they must then +only use functions that are defined by all platforms (e.g., unlink +and opendir), and leave all pathname manipulation to os.path +(e.g., split and join). +get_exec_path_exists_get_exports_list_have_functionsntpathno os specific module found_globals_add_setHAVE_FACCESSATHAVE_FCHMODATHAVE_FCHOWNATchownHAVE_FSTATATHAVE_FUTIMESATHAVE_LINKATHAVE_MKDIRATHAVE_MKFIFOATmkfifoHAVE_MKNODATmknodHAVE_OPENATHAVE_READLINKATHAVE_RENAMEATHAVE_SYMLINKATHAVE_UNLINKATHAVE_UTIMENSATsupports_dir_fdsupports_effective_idsHAVE_FCHDIRHAVE_FCHMODHAVE_FCHOWNHAVE_FDOPENDIRHAVE_FEXECVEexecveHAVE_FTRUNCATEHAVE_FUTIMENSHAVE_FUTIMESHAVE_FPATHCONFpathconfstatvfsfstatvfsHAVE_FSTATVFSsupports_fdHAVE_LCHFLAGSchflagsHAVE_LCHMODlchownHAVE_LCHOWNHAVE_LUTIMESHAVE_LSTATMS_WINDOWSsupports_follow_symlinksmakedirsexist_okmakedirs(name [, mode=0o777][, exist_ok=False]) + + Super-mkdir; create a leaf directory and all intermediate ones. Works like + mkdir, except that any intermediate path segment (not just the rightmost) + will be created if it does not exist. If the target directory already + exists, raise an OSError if exist_ok is False. Otherwise no exception is + raised. This is recursive. + + cdirremovedirsremovedirs(name) + + Super-rmdir; remove a leaf directory and all empty intermediate + ones. Works like rmdir except that, if the leaf directory is + successfully removed, directories corresponding to rightmost path + segments will be pruned away until either the whole path is + consumed or an error occurs. Errors during this latter phase are + ignored -- they generally mean that a directory was not empty. + + renamesrenames(old, new) + + Super-rename; create directories as necessary and delete any left + empty. Works like rename, except creation of any intermediate + directories needed to make the new pathname good is attempted + first. After the rename, directories corresponding to rightmost + path segments of the old name will be pruned until either the + whole path is consumed or a nonempty directory is found. + + Note: this function can fail with the new directory structure made + if you lack permissions needed to unlink the leaf directory or + file. + + topdownonerrorDirectory tree generator. + + For each directory in the directory tree rooted at top (including top + itself, but excluding '.' and '..'), yields a 3-tuple + + dirpath, dirnames, filenames + + dirpath is a string, the path to the directory. dirnames is a list of + the names of the subdirectories in dirpath (excluding '.' and '..'). + filenames is a list of the names of the non-directory files in dirpath. + Note that the names in the lists are just names, with no path components. + To get a full path (which begins with top) to a file or directory in + dirpath, do os.path.join(dirpath, name). + + If optional arg 'topdown' is true or not specified, the triple for a + directory is generated before the triples for any of its subdirectories + (directories are generated top down). If topdown is false, the triple + for a directory is generated after the triples for all of its + subdirectories (directories are generated bottom up). + + When topdown is true, the caller can modify the dirnames list in-place + (e.g., via del or slice assignment), and walk will only recurse into the + subdirectories whose names remain in dirnames; this can be used to prune the + search, or to impose a specific order of visiting. Modifying dirnames when + topdown is false has no effect on the behavior of os.walk(), since the + directories in dirnames have already been generated by the time dirnames + itself is generated. No matter the value of topdown, the list of + subdirectories is retrieved before the tuples for the directory and its + subdirectories are generated. + + By default errors from the os.scandir() call are ignored. If + optional arg 'onerror' is specified, it should be a function; it + will be called with one argument, an OSError instance. It can + report the error to continue with the walk, or raise the exception + to abort the walk. Note that the filename is available as the + filename attribute of the exception object. + + By default, os.walk does not follow symbolic links to subdirectories on + systems that support them. In order to get this functionality, set the + optional argument 'followlinks' to true. + + Caution: if you pass a relative pathname for top, don't change the + current working directory between resumptions of walk. walk never + changes the current directory, and assumes that the client doesn't + either. + + Example: + + import os + from os.path import join, getsize + for root, dirs, files in os.walk('python/Lib/email'): + print(root, "consumes", end="") + print(sum(getsize(join(root, name)) for name in files), end="") + print("bytes in", len(files), "non-directory files") + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + + nondirswalk_dirsscandir_itwalk_intois_symlinkfwalkfollow_symlinksdir_fdDirectory tree generator. + + This behaves exactly like walk(), except that it yields a 4-tuple + + dirpath, dirnames, filenames, dirfd + + `dirpath`, `dirnames` and `filenames` are identical to walk() output, + and `dirfd` is a file descriptor referring to the directory `dirpath`. + + The advantage of fwalk() over walk() is that it's safe against symlink + races (when follow_symlinks is False). + + If dir_fd is not None, it should be a file descriptor open to a directory, + and top should be relative; top will then be relative to that directory. + (dir_fd is always supported for fwalk.) + + Caution: + Since fwalk() yields file descriptors, those are only valid until the + next iteration step, so you should dup() them if you want to keep them + for a longer period. + + Example: + + import os + for root, dirs, files, rootfd in os.fwalk('python/Lib/email'): + print(root, "consumes", end="") + print(sum(os.stat(name, dir_fd=rootfd).st_size for name in files), + end="") + print("bytes in", len(files), "non-directory files") + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + orig_stO_RDONLYtopfd_fwalktoppathisbytesentriesdirfddirpathexeclexecl(file, *args) + + Execute the executable file with argument list args, replacing the + current process. execvexecleexecle(file, *args, env) + + Execute the executable file with argument list args and + environment env, replacing the current process. execlpexeclp(file, *args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. execvpexeclpeexeclpe(file, *args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the current + process. execvpeexecvp(file, args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. + args may be a list or tuple of strings. _execvpeexecvpe(file, args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the + current process. + args may be a list or tuple of strings. exec_funcargrestsaved_exclast_excReturns the sequence of directories that will be searched for the + named executable (similar to a shell) when launching a process. + + *env* must be an environment variable dict or None. If *env* is None, + os.environ will be used. + supports_bytes_environpath_listbenv cannot contain 'PATH' and b'PATH' keys_Environencodekeydecodekeyencodevaluedecodevalueputenvunsetenvencodedkeyenviron({{{}}}){!r}: {!r}_putenv_unsetenv_createenvironcheck_strstr expected, not %sGet an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are str._check_bytesbytes expected, not %sgetenvbGet an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are bytes._fscodecEncode filename (an os.PathLike, bytes, or str) to the filesystem + encoding with 'surrogateescape' error handler, return bytes unchanged. + On Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + Decode filename (an os.PathLike, bytes, or str) from the filesystem + encoding with 'surrogateescape' error handler, return str unchanged. On + Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + spawnvP_WAITP_NOWAITP_NOWAITO_spawnvefargv must be a tuple or a listargv first element cannot be emptywpidWIFSTOPPEDNot stopped, signaled or exited???spawnv(mode, file, args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. spawnvespawnve(mode, file, args, env) -> integer + +Execute file with arguments from args in a subprocess with the +specified environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. spawnvpspawnvp(mode, file, args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. spawnvpespawnvpe(mode, file, args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. spawnlspawnl(mode, file, *args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. spawnlespawnle(mode, file, *args, env) -> integer + +Execute file with arguments from args in a subprocess with the +supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. spawnlpspawnlp(mode, file, *args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. spawnlpespawnlpe(mode, file, *args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. invalid cmd type (%s, expected string)invalid mode %rpopen() does not support unbuffered streams_wrap_closeinvalid fd type (%s, expected integer)_fspathReturn the path representation of a path-like object. + + If str or bytes is passed in, it is returned unchanged. Otherwise the + os.PathLike interface is used to get the path representation. If the + path representation is not str or bytes, TypeError is raised. If the + provided path is not str, bytes, or os.PathLike, TypeError is raised. + path_typepath_reprexpected str, bytes or os.PathLike object, not "expected str, bytes or os.PathLike object, "expected {}.__fspath__() to return str or bytes, not {}"expected {}.__fspath__() to return str or bytes, ""not {}"Abstract base class for implementing the file system path protocol.Return the file system path representation of the object._AddedDllDirectorycookieremove_dll_directory_cookie_remove_dll_directoryadd_dll_directoryAdd a path to the DLL search path. + + This search path is used when resolving dependencies for imported + extension modules (the module itself is resolved through sys.path), + and also by ctypes. + + Remove the directory by calling close() on the returned object or + using it in a with statement. + _add_dll_directory#'# Note: more names are added to __all__ later.# Any new dependencies of the os module and/or changes in path separator# requires updating importlib as well.# fstat always works# mac os x10.3# Some platforms don't support lchmod(). Often the function exists# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.# (No, I don't know why that's a good design.) ./configure will detect# this and reject it--so HAVE_LCHMOD still won't be defined on such# platforms. This is Very Helpful.# However, sometimes platforms without a working lchmod() *do* have# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes# it behave like lchmod(). So in theory it would be a suitable# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s# flag doesn't work *either*. Sadly ./configure isn't sophisticated# enough to detect this condition--it only determines whether or not# fchmodat() minimally works.# Therefore we simply ignore fchmodat() when deciding whether or not# os.chmod supports follow_symlinks. Just checking lchmod() is# sufficient. After all--if you have a working fchmodat(), your# lchmod() almost certainly works too.# _add("HAVE_FCHMODAT", "chmod")# Python uses fixed values for the SEEK_ constants; they are mapped# to native constants if necessary in posixmodule.c# Other possible SEEK values are directly imported from posixmodule.c# Super directory utilities.# (Inspired by Eric Raymond; the doc strings are mostly his)# Defeats race condition when another thread created the path# xxx/newdir/. exists if xxx/newdir exists# Cannot rely on checking for EEXIST, since the operating system# could give priority to other errors like EACCES or EROFS# We may not have read permission for top, in which case we can't# get a list of the files the directory contains. os.walk# always suppressed the exception then, rather than blow up for a# minor reason when (say) a thousand readable directories are still# left to visit. That logic is copied here.# Note that scandir is global in this module due# to earlier import-*.# If is_dir() raises an OSError, consider that the entry is not# a directory, same behaviour than os.path.isdir().# Bottom-up: recurse into sub-directory, but exclude symlinks to# directories if followlinks is False# If is_symlink() raises an OSError, consider that the# entry is not a symbolic link, same behaviour than# os.path.islink().# Yield before recursion if going top down# Recurse into sub-directories# Issue #23605: os.path.islink() is used instead of caching# entry.is_symlink() result during the loop on os.scandir() because# the caller can replace the directory entry during the "yield"# above.# Yield after recursion if going bottom up# Note: To guard against symlink races, we use the standard# lstat()/open()/fstat() trick.# Note: This uses O(depth of the directory tree) file descriptors: if# necessary, it can be adapted to only require O(1) FDs, see issue# #13734.# Add dangling symlinks, ignore disappeared files# Use a local import instead of a global import to limit the number of# modules loaded at startup: the os module is always loaded at startup by# Python. It may also avoid a bootstrap issue.# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a# BytesWarning when using python -b or python -bb: ignore the warning# Change environ to automatically call putenv(), unsetenv if they exist.# raise KeyError with the original key value# list() from dict object is an atomic operation# Where Env Var Names Must Be UPPERCASE# Where Env Var Names Can Be Mixed Case# unicode environ# bytes environ# Does type-checking of `filename`.# Supply spawn*() (probably only for Unix)# XXX Should we support P_DETACH? I suppose it could fork()**2# and close the std I/O streams. Also, P_OVERLAY is the same# as execv*()?# Internal helper; func is the exec*() function to use# Parent# Caller is responsible for waiting!# Note: spawnvp[e] isn't currently supported on Windows# These aren't supplied by the basic Windows code# but can be easily implemented in Python# At the moment, Windows doesn't implement spawnvp[e],# so it won't have spawnlp[e] either.# Supply os.popen()# Helper for popen() -- a proxy for a file whose close waits for the process# Shift left to match old behavior# Supply os.fdopen()# For testing purposes, make sure the function is available when the C# implementation exists.# Work from the object's type to match method resolution of other magic# methods.# If there is no C implementation, make the pure Python version the# implementation as transparently as possible.b'OS routines for NT or Posix depending on what system we're on. + +This exports: + - all functions from posix or nt, e.g. unlink, stat, etc. + - os.path is either posixpath or ntpath + - os.name is either 'posix' or 'nt' + - os.curdir is a string representing the current directory (always '.') + - os.pardir is a string representing the parent directory (always '..') + - os.sep is the (or a most common) pathname separator ('/' or '\\') + - os.extsep is the extension separator (always '.') + - os.altsep is the alternate pathname separator (None or '/') + - os.pathsep is the component separator used in $PATH etc + - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') + - os.defpath is the default search path for executables + - os.devnull is the file path of the null device ('/dev/null', etc.) + +Programs that import and use 'os' stand a better chance of being +portable between different platforms. Of course, they must then +only use functions that are defined by all platforms (e.g., unlink +and opendir), and leave all pathname manipulation to os.path +(e.g., split and join). +'u'OS routines for NT or Posix depending on what system we're on. + +This exports: + - all functions from posix or nt, e.g. unlink, stat, etc. + - os.path is either posixpath or ntpath + - os.name is either 'posix' or 'nt' + - os.curdir is a string representing the current directory (always '.') + - os.pardir is a string representing the parent directory (always '..') + - os.sep is the (or a most common) pathname separator ('/' or '\\') + - os.extsep is the extension separator (always '.') + - os.altsep is the alternate pathname separator (None or '/') + - os.pathsep is the component separator used in $PATH etc + - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') + - os.defpath is the default search path for executables + - os.devnull is the file path of the null device ('/dev/null', etc.) + +Programs that import and use 'os' stand a better chance of being +portable between different platforms. Of course, they must then +only use functions that are defined by all platforms (e.g., unlink +and opendir), and leave all pathname manipulation to os.path +(e.g., split and join). +'b'fsencode'u'fsencode'b'fsdecode'u'fsdecode'b'get_exec_path'u'get_exec_path'b'fdopen'u'fdopen'b'popen'u'popen'b'_exit'u'_exit'b'no os specific module found'u'no os specific module found'b'os.path'u'os.path'b'_have_functions'u'_have_functions'b'HAVE_FACCESSAT'u'HAVE_FACCESSAT'b'access'u'access'b'HAVE_FCHMODAT'u'HAVE_FCHMODAT'b'chmod'u'chmod'b'HAVE_FCHOWNAT'u'HAVE_FCHOWNAT'b'chown'u'chown'b'HAVE_FSTATAT'u'HAVE_FSTATAT'b'stat'u'stat'b'HAVE_FUTIMESAT'u'HAVE_FUTIMESAT'b'utime'u'utime'b'HAVE_LINKAT'u'HAVE_LINKAT'b'HAVE_MKDIRAT'u'HAVE_MKDIRAT'b'mkdir'u'mkdir'b'HAVE_MKFIFOAT'u'HAVE_MKFIFOAT'b'mkfifo'u'mkfifo'b'HAVE_MKNODAT'u'HAVE_MKNODAT'b'mknod'u'mknod'b'HAVE_OPENAT'u'HAVE_OPENAT'b'HAVE_READLINKAT'u'HAVE_READLINKAT'b'readlink'u'readlink'b'HAVE_RENAMEAT'u'HAVE_RENAMEAT'b'rename'u'rename'b'HAVE_SYMLINKAT'u'HAVE_SYMLINKAT'b'symlink'u'symlink'b'HAVE_UNLINKAT'u'HAVE_UNLINKAT'b'rmdir'u'rmdir'b'HAVE_UTIMENSAT'u'HAVE_UTIMENSAT'b'HAVE_FCHDIR'u'HAVE_FCHDIR'b'chdir'u'chdir'b'HAVE_FCHMOD'u'HAVE_FCHMOD'b'HAVE_FCHOWN'u'HAVE_FCHOWN'b'HAVE_FDOPENDIR'u'HAVE_FDOPENDIR'b'listdir'u'listdir'b'scandir'u'scandir'b'HAVE_FEXECVE'u'HAVE_FEXECVE'b'execve'u'execve'b'HAVE_FTRUNCATE'u'HAVE_FTRUNCATE'b'truncate'u'truncate'b'HAVE_FUTIMENS'u'HAVE_FUTIMENS'b'HAVE_FUTIMES'u'HAVE_FUTIMES'b'HAVE_FPATHCONF'u'HAVE_FPATHCONF'b'pathconf'u'pathconf'b'statvfs'u'statvfs'b'fstatvfs'u'fstatvfs'b'HAVE_FSTATVFS'u'HAVE_FSTATVFS'b'HAVE_LCHFLAGS'u'HAVE_LCHFLAGS'b'chflags'u'chflags'b'HAVE_LCHMOD'u'HAVE_LCHMOD'b'lchown'u'lchown'b'HAVE_LCHOWN'u'HAVE_LCHOWN'b'HAVE_LUTIMES'u'HAVE_LUTIMES'b'HAVE_LSTAT'u'HAVE_LSTAT'b'MS_WINDOWS'u'MS_WINDOWS'b'makedirs(name [, mode=0o777][, exist_ok=False]) + + Super-mkdir; create a leaf directory and all intermediate ones. Works like + mkdir, except that any intermediate path segment (not just the rightmost) + will be created if it does not exist. If the target directory already + exists, raise an OSError if exist_ok is False. Otherwise no exception is + raised. This is recursive. + + 'u'makedirs(name [, mode=0o777][, exist_ok=False]) + + Super-mkdir; create a leaf directory and all intermediate ones. Works like + mkdir, except that any intermediate path segment (not just the rightmost) + will be created if it does not exist. If the target directory already + exists, raise an OSError if exist_ok is False. Otherwise no exception is + raised. This is recursive. + + 'b'removedirs(name) + + Super-rmdir; remove a leaf directory and all empty intermediate + ones. Works like rmdir except that, if the leaf directory is + successfully removed, directories corresponding to rightmost path + segments will be pruned away until either the whole path is + consumed or an error occurs. Errors during this latter phase are + ignored -- they generally mean that a directory was not empty. + + 'u'removedirs(name) + + Super-rmdir; remove a leaf directory and all empty intermediate + ones. Works like rmdir except that, if the leaf directory is + successfully removed, directories corresponding to rightmost path + segments will be pruned away until either the whole path is + consumed or an error occurs. Errors during this latter phase are + ignored -- they generally mean that a directory was not empty. + + 'b'renames(old, new) + + Super-rename; create directories as necessary and delete any left + empty. Works like rename, except creation of any intermediate + directories needed to make the new pathname good is attempted + first. After the rename, directories corresponding to rightmost + path segments of the old name will be pruned until either the + whole path is consumed or a nonempty directory is found. + + Note: this function can fail with the new directory structure made + if you lack permissions needed to unlink the leaf directory or + file. + + 'u'renames(old, new) + + Super-rename; create directories as necessary and delete any left + empty. Works like rename, except creation of any intermediate + directories needed to make the new pathname good is attempted + first. After the rename, directories corresponding to rightmost + path segments of the old name will be pruned until either the + whole path is consumed or a nonempty directory is found. + + Note: this function can fail with the new directory structure made + if you lack permissions needed to unlink the leaf directory or + file. + + 'b'makedirs'u'makedirs'b'removedirs'u'removedirs'b'renames'u'renames'b'Directory tree generator. + + For each directory in the directory tree rooted at top (including top + itself, but excluding '.' and '..'), yields a 3-tuple + + dirpath, dirnames, filenames + + dirpath is a string, the path to the directory. dirnames is a list of + the names of the subdirectories in dirpath (excluding '.' and '..'). + filenames is a list of the names of the non-directory files in dirpath. + Note that the names in the lists are just names, with no path components. + To get a full path (which begins with top) to a file or directory in + dirpath, do os.path.join(dirpath, name). + + If optional arg 'topdown' is true or not specified, the triple for a + directory is generated before the triples for any of its subdirectories + (directories are generated top down). If topdown is false, the triple + for a directory is generated after the triples for all of its + subdirectories (directories are generated bottom up). + + When topdown is true, the caller can modify the dirnames list in-place + (e.g., via del or slice assignment), and walk will only recurse into the + subdirectories whose names remain in dirnames; this can be used to prune the + search, or to impose a specific order of visiting. Modifying dirnames when + topdown is false has no effect on the behavior of os.walk(), since the + directories in dirnames have already been generated by the time dirnames + itself is generated. No matter the value of topdown, the list of + subdirectories is retrieved before the tuples for the directory and its + subdirectories are generated. + + By default errors from the os.scandir() call are ignored. If + optional arg 'onerror' is specified, it should be a function; it + will be called with one argument, an OSError instance. It can + report the error to continue with the walk, or raise the exception + to abort the walk. Note that the filename is available as the + filename attribute of the exception object. + + By default, os.walk does not follow symbolic links to subdirectories on + systems that support them. In order to get this functionality, set the + optional argument 'followlinks' to true. + + Caution: if you pass a relative pathname for top, don't change the + current working directory between resumptions of walk. walk never + changes the current directory, and assumes that the client doesn't + either. + + Example: + + import os + from os.path import join, getsize + for root, dirs, files in os.walk('python/Lib/email'): + print(root, "consumes", end="") + print(sum(getsize(join(root, name)) for name in files), end="") + print("bytes in", len(files), "non-directory files") + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + + 'u'Directory tree generator. + + For each directory in the directory tree rooted at top (including top + itself, but excluding '.' and '..'), yields a 3-tuple + + dirpath, dirnames, filenames + + dirpath is a string, the path to the directory. dirnames is a list of + the names of the subdirectories in dirpath (excluding '.' and '..'). + filenames is a list of the names of the non-directory files in dirpath. + Note that the names in the lists are just names, with no path components. + To get a full path (which begins with top) to a file or directory in + dirpath, do os.path.join(dirpath, name). + + If optional arg 'topdown' is true or not specified, the triple for a + directory is generated before the triples for any of its subdirectories + (directories are generated top down). If topdown is false, the triple + for a directory is generated after the triples for all of its + subdirectories (directories are generated bottom up). + + When topdown is true, the caller can modify the dirnames list in-place + (e.g., via del or slice assignment), and walk will only recurse into the + subdirectories whose names remain in dirnames; this can be used to prune the + search, or to impose a specific order of visiting. Modifying dirnames when + topdown is false has no effect on the behavior of os.walk(), since the + directories in dirnames have already been generated by the time dirnames + itself is generated. No matter the value of topdown, the list of + subdirectories is retrieved before the tuples for the directory and its + subdirectories are generated. + + By default errors from the os.scandir() call are ignored. If + optional arg 'onerror' is specified, it should be a function; it + will be called with one argument, an OSError instance. It can + report the error to continue with the walk, or raise the exception + to abort the walk. Note that the filename is available as the + filename attribute of the exception object. + + By default, os.walk does not follow symbolic links to subdirectories on + systems that support them. In order to get this functionality, set the + optional argument 'followlinks' to true. + + Caution: if you pass a relative pathname for top, don't change the + current working directory between resumptions of walk. walk never + changes the current directory, and assumes that the client doesn't + either. + + Example: + + import os + from os.path import join, getsize + for root, dirs, files in os.walk('python/Lib/email'): + print(root, "consumes", end="") + print(sum(getsize(join(root, name)) for name in files), end="") + print("bytes in", len(files), "non-directory files") + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + + 'b'walk'u'walk'b'Directory tree generator. + + This behaves exactly like walk(), except that it yields a 4-tuple + + dirpath, dirnames, filenames, dirfd + + `dirpath`, `dirnames` and `filenames` are identical to walk() output, + and `dirfd` is a file descriptor referring to the directory `dirpath`. + + The advantage of fwalk() over walk() is that it's safe against symlink + races (when follow_symlinks is False). + + If dir_fd is not None, it should be a file descriptor open to a directory, + and top should be relative; top will then be relative to that directory. + (dir_fd is always supported for fwalk.) + + Caution: + Since fwalk() yields file descriptors, those are only valid until the + next iteration step, so you should dup() them if you want to keep them + for a longer period. + + Example: + + import os + for root, dirs, files, rootfd in os.fwalk('python/Lib/email'): + print(root, "consumes", end="") + print(sum(os.stat(name, dir_fd=rootfd).st_size for name in files), + end="") + print("bytes in", len(files), "non-directory files") + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + 'u'Directory tree generator. + + This behaves exactly like walk(), except that it yields a 4-tuple + + dirpath, dirnames, filenames, dirfd + + `dirpath`, `dirnames` and `filenames` are identical to walk() output, + and `dirfd` is a file descriptor referring to the directory `dirpath`. + + The advantage of fwalk() over walk() is that it's safe against symlink + races (when follow_symlinks is False). + + If dir_fd is not None, it should be a file descriptor open to a directory, + and top should be relative; top will then be relative to that directory. + (dir_fd is always supported for fwalk.) + + Caution: + Since fwalk() yields file descriptors, those are only valid until the + next iteration step, so you should dup() them if you want to keep them + for a longer period. + + Example: + + import os + for root, dirs, files, rootfd in os.fwalk('python/Lib/email'): + print(root, "consumes", end="") + print(sum(os.stat(name, dir_fd=rootfd).st_size for name in files), + end="") + print("bytes in", len(files), "non-directory files") + if 'CVS' in dirs: + dirs.remove('CVS') # don't visit CVS directories + 'b'fwalk'u'fwalk'b'execl(file, *args) + + Execute the executable file with argument list args, replacing the + current process. 'u'execl(file, *args) + + Execute the executable file with argument list args, replacing the + current process. 'b'execle(file, *args, env) + + Execute the executable file with argument list args and + environment env, replacing the current process. 'u'execle(file, *args, env) + + Execute the executable file with argument list args and + environment env, replacing the current process. 'b'execlp(file, *args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. 'u'execlp(file, *args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. 'b'execlpe(file, *args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the current + process. 'u'execlpe(file, *args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the current + process. 'b'execvp(file, args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. + args may be a list or tuple of strings. 'u'execvp(file, args) + + Execute the executable file (which is searched for along $PATH) + with argument list args, replacing the current process. + args may be a list or tuple of strings. 'b'execvpe(file, args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the + current process. + args may be a list or tuple of strings. 'u'execvpe(file, args, env) + + Execute the executable file (which is searched for along $PATH) + with argument list args and environment env, replacing the + current process. + args may be a list or tuple of strings. 'b'execl'u'execl'b'execle'u'execle'b'execlp'u'execlp'b'execlpe'u'execlpe'b'execvp'u'execvp'b'execvpe'u'execvpe'b'Returns the sequence of directories that will be searched for the + named executable (similar to a shell) when launching a process. + + *env* must be an environment variable dict or None. If *env* is None, + os.environ will be used. + 'u'Returns the sequence of directories that will be searched for the + named executable (similar to a shell) when launching a process. + + *env* must be an environment variable dict or None. If *env* is None, + os.environ will be used. + 'b'env cannot contain 'PATH' and b'PATH' keys'u'env cannot contain 'PATH' and b'PATH' keys'b'environ({{{}}})'u'environ({{{}}})'b'{!r}: {!r}'u'{!r}: {!r}'b'putenv'u'putenv'b'unsetenv'u'unsetenv'b'str expected, not %s'u'str expected, not %s'b'Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are str.'u'Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are str.'b'getenv'u'getenv'b'supports_bytes_environ'u'supports_bytes_environ'b'bytes expected, not %s'u'bytes expected, not %s'b'Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are bytes.'u'Get an environment variable, return None if it doesn't exist. + The optional second argument can specify an alternate default. + key, default and the result are bytes.'b'getenvb'u'getenvb'b'Encode filename (an os.PathLike, bytes, or str) to the filesystem + encoding with 'surrogateescape' error handler, return bytes unchanged. + On Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + 'u'Encode filename (an os.PathLike, bytes, or str) to the filesystem + encoding with 'surrogateescape' error handler, return bytes unchanged. + On Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + 'b'Decode filename (an os.PathLike, bytes, or str) from the filesystem + encoding with 'surrogateescape' error handler, return str unchanged. On + Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + 'u'Decode filename (an os.PathLike, bytes, or str) from the filesystem + encoding with 'surrogateescape' error handler, return str unchanged. On + Windows, use 'strict' error handler if the file system encoding is + 'mbcs' (which is the default encoding). + 'b'spawnv'u'spawnv'b'execv'u'execv'b'P_WAIT'u'P_WAIT'b'P_NOWAIT'u'P_NOWAIT'b'P_NOWAITO'u'P_NOWAITO'b'argv must be a tuple or a list'u'argv must be a tuple or a list'b'argv first element cannot be empty'u'argv first element cannot be empty'b'Not stopped, signaled or exited???'u'Not stopped, signaled or exited???'b'spawnv(mode, file, args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'u'spawnv(mode, file, args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'b'spawnve(mode, file, args, env) -> integer + +Execute file with arguments from args in a subprocess with the +specified environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'u'spawnve(mode, file, args, env) -> integer + +Execute file with arguments from args in a subprocess with the +specified environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'b'spawnvp(mode, file, args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'u'spawnvp(mode, file, args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'b'spawnvpe(mode, file, args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'u'spawnvpe(mode, file, args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'b'spawnve'u'spawnve'b'spawnvp'u'spawnvp'b'spawnvpe'u'spawnvpe'b'spawnl(mode, file, *args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'u'spawnl(mode, file, *args) -> integer + +Execute file with arguments from args in a subprocess. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'b'spawnle(mode, file, *args, env) -> integer + +Execute file with arguments from args in a subprocess with the +supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'u'spawnle(mode, file, *args, env) -> integer + +Execute file with arguments from args in a subprocess with the +supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'b'spawnl'u'spawnl'b'spawnle'u'spawnle'b'spawnlp(mode, file, *args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'u'spawnlp(mode, file, *args) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'b'spawnlpe(mode, file, *args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'u'spawnlpe(mode, file, *args, env) -> integer + +Execute file (which is looked for along $PATH) with arguments from +args in a subprocess with the supplied environment. +If mode == P_NOWAIT return the pid of the process. +If mode == P_WAIT return the process's exit code if it exits normally; +otherwise return -SIG, where SIG is the signal that killed it. 'b'spawnlp'u'spawnlp'b'spawnlpe'u'spawnlpe'b'invalid cmd type (%s, expected string)'u'invalid cmd type (%s, expected string)'b'invalid mode %r'u'invalid mode %r'b'popen() does not support unbuffered streams'u'popen() does not support unbuffered streams'b'invalid fd type (%s, expected integer)'u'invalid fd type (%s, expected integer)'b'Return the path representation of a path-like object. + + If str or bytes is passed in, it is returned unchanged. Otherwise the + os.PathLike interface is used to get the path representation. If the + path representation is not str or bytes, TypeError is raised. If the + provided path is not str, bytes, or os.PathLike, TypeError is raised. + 'u'Return the path representation of a path-like object. + + If str or bytes is passed in, it is returned unchanged. Otherwise the + os.PathLike interface is used to get the path representation. If the + path representation is not str or bytes, TypeError is raised. If the + provided path is not str, bytes, or os.PathLike, TypeError is raised. + 'b'__fspath__'u'__fspath__'b'expected str, bytes or os.PathLike object, not 'u'expected str, bytes or os.PathLike object, not 'b'expected {}.__fspath__() to return str or bytes, not {}'u'expected {}.__fspath__() to return str or bytes, not {}'b'fspath'u'fspath'b'Abstract base class for implementing the file system path protocol.'u'Abstract base class for implementing the file system path protocol.'b'Return the file system path representation of the object.'u'Return the file system path representation of the object.'b''u''b''u''b'Add a path to the DLL search path. + + This search path is used when resolving dependencies for imported + extension modules (the module itself is resolved through sys.path), + and also by ctypes. + + Remove the directory by calling close() on the returned object or + using it in a with statement. + 'u'Add a path to the DLL search path. + + This search path is used when resolving dependencies for imported + extension modules (the module itself is resolved through sys.path), + and also by ctypes. + + Remove the directory by calling close() on the returned object or + using it in a with statement. + 'u'os'Parser engine for the grammar tables generated by pgen. + +The grammar table must be loaded first. + +See Parser/parser.c in the Python distribution for additional info on +how this parsing engine works. + +Exception to signal the parser is stuck.%s: type=%r, value=%r, context=%rParser engine. + + The proper usage sequence is: + + p = Parser(grammar, [converter]) # create instance + p.setup([start]) # prepare for parsing + : + if p.addtoken(...): # parse a token; may raise ParseError + break + root = p.rootnode # root of abstract syntax tree + + A Parser instance may be reused by calling setup() repeatedly. + + A Parser instance contains state pertaining to the current token + sequence, and should not be used concurrently by different threads + to parse separate token sequences. + + See driver.py for how to get input tokens by tokenizing a file or + string. + + Parsing is complete when addtoken() returns True; the root of the + abstract syntax tree can then be retrieved from the rootnode + instance variable. When a syntax error occurs, addtoken() raises + the ParseError exception. There is no error recovery; the parser + cannot be used after a syntax error was reported (but it can be + reinitialized by calling setup()). + + Constructor. + + The grammar argument is a grammar.Grammar instance; see the + grammar module for more information. + + The parser is not ready yet for parsing; you must call the + setup() method to get it started. + + The optional convert argument is a function mapping concrete + syntax tree nodes to abstract syntax tree nodes. If not + given, no conversion is done and the syntax tree produced is + the concrete syntax tree. If given, it must be a function of + two arguments, the first being the grammar (a grammar.Grammar + instance), and the second being the concrete syntax tree node + to be converted. The syntax tree is converted from the bottom + up. + + A concrete syntax tree node is a (type, value, context, nodes) + tuple, where type is the node type (a token or symbol number), + value is None for symbols and a string for tokens, context is + None or an opaque value used for error reporting (typically a + (lineno, offset) pair), and nodes is a list of children for + symbols, and None for tokens. + + An abstract syntax tree node may be anything; this is entirely + up to the converter function. + + Prepare for parsing. + + This *must* be called before starting to parse. + + The optional argument is an alternative start symbol; it + defaults to the grammar's start symbol. + + You can use a Parser instance to parse any number of programs; + each time you call setup() the parser is reset to an initial + state determined by the (implicit or explicit) start symbol. + + newnodestackentryused_namesAdd a token; return True iff this is the end of the program.classifyilabeldfaarcsitsdfaitsstatesitsfirsttoo much inputbad inputTurn a token into a label. (Internal)bad tokenShift a token. (Internal)newdfaPush a nonterminal. (Internal)Pop a nonterminal. (Internal)popdfapopstatepopnode# Each stack entry is a tuple: (dfa, state, node).# A node is a tuple: (type, value, context, children),# where children is a list of nodes or None, and context may be None.# Aliased to self.rootnode.used_names in pop()# Map from token to label# Loop until the token is shifted; may raise exceptions# Look for a state with this label# Look it up in the list of labels# Shift a token; we're done with it# Pop while we are in an accept-only state# Done parsing!# Done with this token# See if it's a symbol and if we're in its first set# Push a symbol# To continue the outer while loop# An accepting state, pop it and try something else# Done parsing, but another token is input# No success finding a transition# Keep a listing of all used names# Check for reserved wordsb'Parser engine for the grammar tables generated by pgen. + +The grammar table must be loaded first. + +See Parser/parser.c in the Python distribution for additional info on +how this parsing engine works. + +'u'Parser engine for the grammar tables generated by pgen. + +The grammar table must be loaded first. + +See Parser/parser.c in the Python distribution for additional info on +how this parsing engine works. + +'b'Exception to signal the parser is stuck.'u'Exception to signal the parser is stuck.'b'%s: type=%r, value=%r, context=%r'u'%s: type=%r, value=%r, context=%r'b'Parser engine. + + The proper usage sequence is: + + p = Parser(grammar, [converter]) # create instance + p.setup([start]) # prepare for parsing + : + if p.addtoken(...): # parse a token; may raise ParseError + break + root = p.rootnode # root of abstract syntax tree + + A Parser instance may be reused by calling setup() repeatedly. + + A Parser instance contains state pertaining to the current token + sequence, and should not be used concurrently by different threads + to parse separate token sequences. + + See driver.py for how to get input tokens by tokenizing a file or + string. + + Parsing is complete when addtoken() returns True; the root of the + abstract syntax tree can then be retrieved from the rootnode + instance variable. When a syntax error occurs, addtoken() raises + the ParseError exception. There is no error recovery; the parser + cannot be used after a syntax error was reported (but it can be + reinitialized by calling setup()). + + 'u'Parser engine. + + The proper usage sequence is: + + p = Parser(grammar, [converter]) # create instance + p.setup([start]) # prepare for parsing + : + if p.addtoken(...): # parse a token; may raise ParseError + break + root = p.rootnode # root of abstract syntax tree + + A Parser instance may be reused by calling setup() repeatedly. + + A Parser instance contains state pertaining to the current token + sequence, and should not be used concurrently by different threads + to parse separate token sequences. + + See driver.py for how to get input tokens by tokenizing a file or + string. + + Parsing is complete when addtoken() returns True; the root of the + abstract syntax tree can then be retrieved from the rootnode + instance variable. When a syntax error occurs, addtoken() raises + the ParseError exception. There is no error recovery; the parser + cannot be used after a syntax error was reported (but it can be + reinitialized by calling setup()). + + 'b'Constructor. + + The grammar argument is a grammar.Grammar instance; see the + grammar module for more information. + + The parser is not ready yet for parsing; you must call the + setup() method to get it started. + + The optional convert argument is a function mapping concrete + syntax tree nodes to abstract syntax tree nodes. If not + given, no conversion is done and the syntax tree produced is + the concrete syntax tree. If given, it must be a function of + two arguments, the first being the grammar (a grammar.Grammar + instance), and the second being the concrete syntax tree node + to be converted. The syntax tree is converted from the bottom + up. + + A concrete syntax tree node is a (type, value, context, nodes) + tuple, where type is the node type (a token or symbol number), + value is None for symbols and a string for tokens, context is + None or an opaque value used for error reporting (typically a + (lineno, offset) pair), and nodes is a list of children for + symbols, and None for tokens. + + An abstract syntax tree node may be anything; this is entirely + up to the converter function. + + 'u'Constructor. + + The grammar argument is a grammar.Grammar instance; see the + grammar module for more information. + + The parser is not ready yet for parsing; you must call the + setup() method to get it started. + + The optional convert argument is a function mapping concrete + syntax tree nodes to abstract syntax tree nodes. If not + given, no conversion is done and the syntax tree produced is + the concrete syntax tree. If given, it must be a function of + two arguments, the first being the grammar (a grammar.Grammar + instance), and the second being the concrete syntax tree node + to be converted. The syntax tree is converted from the bottom + up. + + A concrete syntax tree node is a (type, value, context, nodes) + tuple, where type is the node type (a token or symbol number), + value is None for symbols and a string for tokens, context is + None or an opaque value used for error reporting (typically a + (lineno, offset) pair), and nodes is a list of children for + symbols, and None for tokens. + + An abstract syntax tree node may be anything; this is entirely + up to the converter function. + + 'b'Prepare for parsing. + + This *must* be called before starting to parse. + + The optional argument is an alternative start symbol; it + defaults to the grammar's start symbol. + + You can use a Parser instance to parse any number of programs; + each time you call setup() the parser is reset to an initial + state determined by the (implicit or explicit) start symbol. + + 'u'Prepare for parsing. + + This *must* be called before starting to parse. + + The optional argument is an alternative start symbol; it + defaults to the grammar's start symbol. + + You can use a Parser instance to parse any number of programs; + each time you call setup() the parser is reset to an initial + state determined by the (implicit or explicit) start symbol. + + 'b'Add a token; return True iff this is the end of the program.'u'Add a token; return True iff this is the end of the program.'b'too much input'u'too much input'b'bad input'u'bad input'b'Turn a token into a label. (Internal)'u'Turn a token into a label. (Internal)'b'bad token'u'bad token'b'Shift a token. (Internal)'u'Shift a token. (Internal)'b'Push a nonterminal. (Internal)'u'Push a nonterminal. (Internal)'b'Pop a nonterminal. (Internal)'u'Pop a nonterminal. (Internal)'u'lib2to3.pgen2.parse'u'pgen2.parse'Parse (absolute and relative) URLs. + +urlparse module is based upon the following RFC specifications. + +RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding +and L. Masinter, January 2005. + +RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter +and L.Masinter, December 1999. + +RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. +Berners-Lee, R. Fielding, and L. Masinter, August 1998. + +RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. + +RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June +1995. + +RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. +McCahill, December 1994 + +RFC 3986 is considered the current standard and any future changes to +urlparse module should conform with it. The urlparse module is +currently not entirely compliant with this RFC due to defacto +scenarios for parsing, and for backward compatibility purposes, some +parsing quirks from older RFCs are retained. The testcases in +test_urlparse.py provides a good indicator of parsing behavior. +urlunparseurljoinurldefragurlunsplitparse_qsparse_qslquote_from_bytesDefragResultParseResultSplitResultDefragResultBytesParseResultBytesSplitResultBytesgophernntpwaisshttpmmsprosperortsprtspusftpsvnsvn+sshwssuses_relativetelnetsnewsrsyncnfsgitgit+sshuses_netlochdlsipsipsteluses_paramsmailtonon_hierarchicaluses_queryuses_fragmentabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+-.'abcdefghijklmnopqrstuvwxyz''ABCDEFGHIJKLMNOPQRSTUVWXYZ''0123456789''+-.'scheme_chars_UNSAFE_URL_BYTES_TO_REMOVEMAX_CACHE_SIZE_parse_cacheclear_cacheClear the parse cache and the quoters cache._safe_quoters_implicit_encoding_implicit_errors_noop_encode_result_decode_args_coerce_argsstr_inputCannot mix str and non-str arguments_ResultMixinStrStandard approach to encoding parsed results from str to bytes_encoded_counterpart_ResultMixinBytesStandard approach to decoding parsed results from bytes to str_decoded_counterpart_NetlocResultMixinBaseShared methods for the parsed result objects containing a netloc element_userinfo_hostinfoPort could not be cast to integer value as Port out of range 0-65535_NetlocResultMixinStruserinfohave_infohostinfohave_passwordhave_open_brbracketed_NetlocResultMixinBytesurl fragment_DefragResultBasescheme netloc path query fragment_SplitResultBasescheme netloc path params query fragment_ParseResultBase +DefragResult(url, fragment) + +A 2-tuple that contains the url without fragment identifier and the fragment +identifier as a separate argument. +The URL with no fragment identifier. +Fragment identifier separated from URL, that allows indirect identification of a +secondary resource by reference to a primary resource and additional identifying +information. +fragment +SplitResult(scheme, netloc, path, query, fragment) + +A 5-tuple that contains the different components of a URL. Similar to +ParseResult, but does not split params. +Specifies URL scheme for the request. +Network location where the request is made to. + +The hierarchical path, such as the path to a file to download. + +The query component, that contains non-hierarchical data, that along with data +in path component, identifies a resource in the scope of URI's scheme and +network location. + +Fragment identifier, that allows indirect identification of a secondary resource +by reference to a primary resource and additional identifying information. + +ParseResult(scheme, netloc, path, params, query, fragment) + +A 6-tuple that contains components of a parsed URL. + +Parameters for last path element used to dereference the URI in order to provide +access to perform some operation on the resource. +ResultBase_fix_result_transcoding_result_pairs_decoded_encodedallow_fragmentsParse a URL into 6 components: + :///;?# + Return a 6-tuple: (scheme, netloc, path, params, query, fragment). + Note that we don't break the components up in smaller bits + (e.g. netloc is a single string) and we don't expand % escapes._coerce_resultsplitresult_splitparams_splitnetloc/?#wdelim_checknetlocNFKCnetloc2/?#@:netloc '' contains invalid characters under NFKC normalization_remove_unsafe_bytes_from_urlParse a URL into 5 components: + :///?# + Return a 5-tuple: (scheme, netloc, path, query, fragment). + Note that we don't break the components up in smaller bits + (e.g. netloc is a single string) and we don't expand % escapes.Invalid IPv6 URLPut a parsed URL back together again. This may result in a + slightly different, but equivalent URL, if the URL that was parsed + originally had redundant delimiters, e.g. a ? with an empty query + (the draft states that these are equivalent).%s;%sCombine the elements of a tuple as returned by urlsplit() into a + complete URL as a string. The data argument can be any five-item iterable. + This may result in a slightly different, but equivalent URL, if the URL that + was parsed originally had unnecessary delimiters (for example, a ? with an + empty query; the RFC states that these are equivalent).Join a base URL and a possibly relative URL to form an absolute + interpretation of the latter.bschemebnetlocbpathbparamsbquerybfragmentbase_partssegmentsresolved_pathsegRemoves any existing fragment from URL. + + Returns a tuple of the defragmented URL and the fragment. If + the URL contained no fragments, the second element is the + empty string. + fragdefrag0123456789ABCDEFabcdef_hexdig_hextobyteunquote_to_bytes('abc%20def') -> b'abc def'.([-]+)_asciireReplace %xx escapes by their single-character equivalent. The optional + encoding and errors parameters specify how to decode percent-encoded + sequences into Unicode characters, as accepted by the bytes.decode() + method. + By default, percent-encoded sequences are decoded with UTF-8, and invalid + sequences are replaced by a placeholder character. + + unquote('abc%20def') -> 'abc def'. + Expected str, got bytesqskeep_blank_valuesstrict_parsingmax_num_fieldsParse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + + encoding and errors: specify how to decode percent-encoded sequences + into Unicode characters, as accepted by the bytes.decode() method. + + max_num_fields: int. If set, then throws a ValueError if there + are more than n fields read by parse_qsl(). + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + + Returns a dictionary. + parsed_resultpairsParse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. + A true value indicates that blanks should be retained as blank + strings. The default false value indicates that blank values + are to be ignored and treated as if they were not included. + + strict_parsing: flag indicating what to do with parsing errors. If + false (the default), errors are silently ignored. If true, + errors raise a ValueError exception. + + encoding and errors: specify how to decode percent-encoded sequences + into Unicode characters, as accepted by the bytes.decode() method. + + max_num_fields: int. If set, then throws a ValueError + if there are more than n fields read by parse_qsl(). + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + + Returns a list, as G-d intended. + Separator must be of type string or bytes.Max number of fields exceededname_valuebad query field: %rLike unquote(), but also replace plus signs by spaces, as required for + unquoting HTML form values. + + unquote_plus('%7e/abc+def') -> '~/abc def' + ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.-~b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'b'abcdefghijklmnopqrstuvwxyz'b'_.-~'_ALWAYS_SAFE_ALWAYS_SAFE_BYTESQuoterA mapping from bytes (in range(0,256)) to strings. + + String values are percent-encoded byte values, unless the key < 128, and + in the "safe" set (either the specified safe set, or default set). + safe: bytes object.%{:02X}quote('abc def') -> 'abc%20def' + + Each part of a URL, e.g. the path info, the query, etc., has a + different set of reserved characters that must be quoted. The + quote function offers a cautious (not minimal) way to quote a + string for most of these parts. + + RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists + the following (un)reserved characters. + + unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + reserved = gen-delims / sub-delims + gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" + sub-delims = "!" / "$" / "&" / "'" / "(" / ")" + / "*" / "+" / "," / ";" / "=" + + Each of the reserved characters is reserved in some component of a URL, + but not necessarily in all of them. + + The quote function %-escapes all characters that are neither in the + unreserved chars ("always safe") nor the additional chars set via the + safe arg. + + The default for the safe arg is '/'. The character is reserved, but in + typical usage the quote function is being called on a path where the + existing slash characters are to be preserved. + + Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings. + Now, "~" is included in the set of unreserved characters. + + string and safe may be either str or bytes objects. encoding and errors + must not be specified if string is a bytes object. + + The optional encoding and errors parameters specify how to deal with + non-ASCII characters, as accepted by the str.encode method. + By default, encoding='utf-8' (characters are encoded with UTF-8), and + errors='strict' (unsupported characters raise a UnicodeEncodeError). + quote() doesn't support 'encoding' for bytesquote() doesn't support 'errors' for bytesLike quote(), but also replace ' ' with '+', as required for quoting + HTML form values. Plus signs in the original string are escaped unless + they are included in safe. It also does not have safe default to '/'. + spaceLike quote(), but accepts a bytes object rather than a str, and does + not perform string-to-bytes encoding. It always returns an ASCII string. + quote_from_bytes(b'abc def?') -> 'abc%20def%3f' + quote_from_bytes() expected bytesquoterdoseqquote_viaEncode a dict or sequence of two-element tuples into a URL query string. + + If any values in the query arg are sequences and doseq is true, each + sequence element is converted to a separate parameter. + + If the query arg is a sequence of two-element tuples, the order of the + parameters in the output will match the order of parameters in the + input. + + The components of a query arg may each be either a string or a bytes type. + + The safe, encoding, and errors parameters are passed down to the function + specified by quote_via (encoding and errors only if a component is a str). + tyvanot a valid non-string sequence or mapping object"not a valid non-string sequence ""or mapping object"urllib.parse.to_bytes() is deprecated as of 3.8_to_bytesto_bytes(u"URL") --> 'URL'.URL contains non-ASCII charactersTransform a string like '' into 'scheme://host/path'. + + The string is returned unchanged if it's not a wrapped URL. + URL:splittypeurllib.parse.splittype() is deprecated as of 3.8, use urllib.parse.urlparse() instead"urllib.parse.splittype() is deprecated as of 3.8, ""use urllib.parse.urlparse() instead"_typeprogsplittype('type:opaquestring') --> 'type', 'opaquestring'.([^/:]+):(.*)splithosturllib.parse.splithost() is deprecated as of 3.8, use urllib.parse.urlparse() instead"urllib.parse.splithost() is deprecated as of 3.8, "_hostprogsplithost('//host[:port]/path') --> 'host[:port]', '/path'.//([^/#?]*)(.*)host_portsplituserurllib.parse.splituser() is deprecated as of 3.8, use urllib.parse.urlparse() instead"urllib.parse.splituser() is deprecated as of 3.8, "splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.splitpasswdurllib.parse.splitpasswd() is deprecated as of 3.8, use urllib.parse.urlparse() instead"urllib.parse.splitpasswd() is deprecated as of 3.8, "_splitpasswdsplitpasswd('user:passwd') -> 'user', 'passwd'.splitporturllib.parse.splitport() is deprecated as of 3.8, use urllib.parse.urlparse() instead"urllib.parse.splitport() is deprecated as of 3.8, "_splitport_portprogsplitport('host:port') --> 'host', 'port'.(.*):([0-9]*)splitnportdefporturllib.parse.splitnport() is deprecated as of 3.8, use urllib.parse.urlparse() instead"urllib.parse.splitnport() is deprecated as of 3.8, "_splitnportSplit host and port, returning numeric port. + Return given default port if no ':' found; defaults to -1. + Return numerical port if a valid number are found after ':'. + Return None if ':' but not a valid number.nportsplitqueryurllib.parse.splitquery() is deprecated as of 3.8, use urllib.parse.urlparse() instead"urllib.parse.splitquery() is deprecated as of 3.8, "_splitquerysplitquery('/path?query') --> '/path', 'query'.splittagurllib.parse.splittag() is deprecated as of 3.8, use urllib.parse.urlparse() instead"urllib.parse.splittag() is deprecated as of 3.8, "_splittagsplittag('/path#tag') --> '/path', 'tag'.splitattrurllib.parse.splitattr() is deprecated as of 3.8, use urllib.parse.urlparse() instead"urllib.parse.splitattr() is deprecated as of 3.8, "_splitattrsplitattr('/path;attr1=value1;attr2=value2;...') -> + '/path', ['attr1=value1', 'attr2=value2', ...].splitvalueurllib.parse.splitvalue() is deprecated as of 3.8, use urllib.parse.parse_qsl() instead"urllib.parse.splitvalue() is deprecated as of 3.8, ""use urllib.parse.parse_qsl() instead"_splitvaluesplitvalue('attr=value') --> 'attr', 'value'.# A classification of schemes.# The empty string classifies URLs with no scheme specified,# being the default value returned by “urlsplit” and “urlparse”.# These are not actually used anymore, but should stay for backwards# compatibility. (They are undocumented, but have a public-looking name.)# Characters valid in scheme names# Unsafe bytes to be removed per WHATWG spec# XXX: Consider replacing with functools.lru_cache# Helpers for bytes handling# For 3.2, we deliberately require applications that# handle improperly quoted URLs to do their own# decoding and encoding. If valid use cases are# presented, we may relax this by using latin-1# decoding internally for 3.3# Invokes decode if necessary to create str args# and returns the coerced inputs along with# an appropriate result coercion function# - noop for str inputs# - encoding function otherwise# We special-case the empty string to support the# "scheme=''" default argument to some functions# Result objects are more helpful than simple tuples# Scoped IPv6 address may have zone info, which must not be lowercased# like http://[fe80::822a:a8ff:fe49:470c%tESt]:1234/keys# For backwards compatibility, alias _NetlocResultMixinStr# ResultBase is no longer part of the documented API, but it is# retained since deprecating it isn't worth the hassle# Structured result objects for string data# Structured result objects for bytes data# Set up the encode/decode result pairs# position of end of domain part of url, default is end# look for delimiters; the order is NOT important# find first of this delim# if found# use earliest delim position# return (domain, rest)# looking for characters like \u2100 that expand to 'a/c'# IDNA uses NFKC equivalence, so normalize for this check# ignore characters already included# but not the surrounding text# avoid runaway growth# optimize the common case# make sure "url" is not actually a port number (in which case# "scheme" is really part of the path)# not a port number# the last item is not a directory, so will not be taken into account# in resolving the relative path# for rfc3986, ignore all base path should the first character be root.# filter out elements that would cause redundant slashes on re-joining# the resolved_path# ignore any .. segments that would otherwise cause an IndexError# when popped from resolved_path if resolving for rfc3986# do some post-processing here. if the last segment was a relative dir,# then we need to append the trailing '/'# Note: strings are encoded as UTF-8. This is only an issue if it contains# unescaped non-ASCII characters, which URIs should not.# Is it a string-like object?# If max_num_fields is defined then check that the number of fields# is less than max_num_fields. This prevents a memory exhaustion DOS# attack via post bodies with many fields.# Handle case of a control-name with no equal sign# Keeps a cache internally, using defaultdict, for efficiency (lookups# of cached keys don't call Python code at all).# Without this, will just display as a defaultdict# Handle a cache miss. Store quoted string in cache and return.# Check if ' ' in string, where string may either be a str or bytes. If# there are no spaces, the regular quote will produce the right answer.# Normalize 'safe' by converting to bytes and removing non-ASCII chars# It's a bother at times that strings and string-like objects are# sequences.# non-sequence items should not work with len()# non-empty strings will fail this# Zero-length sequences of all types will get here and succeed,# but that's a minor nit. Since the original implementation# allowed empty dicts that type of behavior probably should be# preserved for consistency# Is this a sufficient test for sequence-ness?# not a sequence# loop over the sequence# Most URL schemes require ASCII. If that changes, the conversion# can be relaxed.# XXX get rid of to_bytes()# splittag('/path#tag') --> '/path', 'tag'b'Parse (absolute and relative) URLs. + +urlparse module is based upon the following RFC specifications. + +RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding +and L. Masinter, January 2005. + +RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter +and L.Masinter, December 1999. + +RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. +Berners-Lee, R. Fielding, and L. Masinter, August 1998. + +RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. + +RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June +1995. + +RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. +McCahill, December 1994 + +RFC 3986 is considered the current standard and any future changes to +urlparse module should conform with it. The urlparse module is +currently not entirely compliant with this RFC due to defacto +scenarios for parsing, and for backward compatibility purposes, some +parsing quirks from older RFCs are retained. The testcases in +test_urlparse.py provides a good indicator of parsing behavior. +'u'Parse (absolute and relative) URLs. + +urlparse module is based upon the following RFC specifications. + +RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding +and L. Masinter, January 2005. + +RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter +and L.Masinter, December 1999. + +RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. +Berners-Lee, R. Fielding, and L. Masinter, August 1998. + +RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. + +RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June +1995. + +RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. +McCahill, December 1994 + +RFC 3986 is considered the current standard and any future changes to +urlparse module should conform with it. The urlparse module is +currently not entirely compliant with this RFC due to defacto +scenarios for parsing, and for backward compatibility purposes, some +parsing quirks from older RFCs are retained. The testcases in +test_urlparse.py provides a good indicator of parsing behavior. +'b'urlunparse'u'urlunparse'b'urljoin'u'urljoin'b'urldefrag'u'urldefrag'b'urlsplit'u'urlsplit'b'urlunsplit'u'urlunsplit'b'parse_qs'u'parse_qs'b'parse_qsl'u'parse_qsl'b'quote_from_bytes'u'quote_from_bytes'b'unquote_to_bytes'u'unquote_to_bytes'b'DefragResult'u'DefragResult'b'ParseResult'u'ParseResult'b'SplitResult'u'SplitResult'b'DefragResultBytes'u'DefragResultBytes'b'ParseResultBytes'u'ParseResultBytes'b'SplitResultBytes'u'SplitResultBytes'b'gopher'u'gopher'b'nntp'u'nntp'b'wais'u'wais'b'shttp'u'shttp'b'mms'u'mms'b'prospero'u'prospero'b'rtsp'u'rtsp'b'rtspu'u'rtspu'b'sftp'u'sftp'b'svn'u'svn'b'svn+ssh'u'svn+ssh'b'ws'u'ws'b'wss'u'wss'b'telnet'u'telnet'b'snews'u'snews'b'rsync'u'rsync'b'nfs'u'nfs'b'git'u'git'b'git+ssh'u'git+ssh'b'hdl'u'hdl'b'sip'u'sip'b'sips'u'sips'b'tel'u'tel'b'mailto'u'mailto'b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+-.'u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+-.'b'Clear the parse cache and the quoters cache.'u'Clear the parse cache and the quoters cache.'b'Cannot mix str and non-str arguments'u'Cannot mix str and non-str arguments'b'Standard approach to encoding parsed results from str to bytes'u'Standard approach to encoding parsed results from str to bytes'b'Standard approach to decoding parsed results from bytes to str'u'Standard approach to decoding parsed results from bytes to str'b'Shared methods for the parsed result objects containing a netloc element'u'Shared methods for the parsed result objects containing a netloc element'b'Port could not be cast to integer value as 'u'Port could not be cast to integer value as 'b'Port out of range 0-65535'u'Port out of range 0-65535'b'url fragment'u'url fragment'b'scheme netloc path query fragment'u'scheme netloc path query fragment'b'scheme netloc path params query fragment'u'scheme netloc path params query fragment'b' +DefragResult(url, fragment) + +A 2-tuple that contains the url without fragment identifier and the fragment +identifier as a separate argument. +'u' +DefragResult(url, fragment) + +A 2-tuple that contains the url without fragment identifier and the fragment +identifier as a separate argument. +'b'The URL with no fragment identifier.'u'The URL with no fragment identifier.'b' +Fragment identifier separated from URL, that allows indirect identification of a +secondary resource by reference to a primary resource and additional identifying +information. +'u' +Fragment identifier separated from URL, that allows indirect identification of a +secondary resource by reference to a primary resource and additional identifying +information. +'b' +SplitResult(scheme, netloc, path, query, fragment) + +A 5-tuple that contains the different components of a URL. Similar to +ParseResult, but does not split params. +'u' +SplitResult(scheme, netloc, path, query, fragment) + +A 5-tuple that contains the different components of a URL. Similar to +ParseResult, but does not split params. +'b'Specifies URL scheme for the request.'u'Specifies URL scheme for the request.'b' +Network location where the request is made to. +'u' +Network location where the request is made to. +'b' +The hierarchical path, such as the path to a file to download. +'u' +The hierarchical path, such as the path to a file to download. +'b' +The query component, that contains non-hierarchical data, that along with data +in path component, identifies a resource in the scope of URI's scheme and +network location. +'u' +The query component, that contains non-hierarchical data, that along with data +in path component, identifies a resource in the scope of URI's scheme and +network location. +'b' +Fragment identifier, that allows indirect identification of a secondary resource +by reference to a primary resource and additional identifying information. +'u' +Fragment identifier, that allows indirect identification of a secondary resource +by reference to a primary resource and additional identifying information. +'b' +ParseResult(scheme, netloc, path, params, query, fragment) + +A 6-tuple that contains components of a parsed URL. +'u' +ParseResult(scheme, netloc, path, params, query, fragment) + +A 6-tuple that contains components of a parsed URL. +'b' +Parameters for last path element used to dereference the URI in order to provide +access to perform some operation on the resource. +'u' +Parameters for last path element used to dereference the URI in order to provide +access to perform some operation on the resource. +'b'Parse a URL into 6 components: + :///;?# + Return a 6-tuple: (scheme, netloc, path, params, query, fragment). + Note that we don't break the components up in smaller bits + (e.g. netloc is a single string) and we don't expand % escapes.'u'Parse a URL into 6 components: + :///;?# + Return a 6-tuple: (scheme, netloc, path, params, query, fragment). + Note that we don't break the components up in smaller bits + (e.g. netloc is a single string) and we don't expand % escapes.'b'/?#'u'/?#'b'NFKC'u'NFKC'b'/?#@:'u'/?#@:'b'netloc ''u'netloc ''b'' contains invalid 'u'' contains invalid 'b'characters under NFKC normalization'u'characters under NFKC normalization'b'Parse a URL into 5 components: + :///?# + Return a 5-tuple: (scheme, netloc, path, query, fragment). + Note that we don't break the components up in smaller bits + (e.g. netloc is a single string) and we don't expand % escapes.'u'Parse a URL into 5 components: + :///?# + Return a 5-tuple: (scheme, netloc, path, query, fragment). + Note that we don't break the components up in smaller bits + (e.g. netloc is a single string) and we don't expand % escapes.'b'Invalid IPv6 URL'u'Invalid IPv6 URL'b'Put a parsed URL back together again. This may result in a + slightly different, but equivalent URL, if the URL that was parsed + originally had redundant delimiters, e.g. a ? with an empty query + (the draft states that these are equivalent).'u'Put a parsed URL back together again. This may result in a + slightly different, but equivalent URL, if the URL that was parsed + originally had redundant delimiters, e.g. a ? with an empty query + (the draft states that these are equivalent).'b'%s;%s'u'%s;%s'b'Combine the elements of a tuple as returned by urlsplit() into a + complete URL as a string. The data argument can be any five-item iterable. + This may result in a slightly different, but equivalent URL, if the URL that + was parsed originally had unnecessary delimiters (for example, a ? with an + empty query; the RFC states that these are equivalent).'u'Combine the elements of a tuple as returned by urlsplit() into a + complete URL as a string. The data argument can be any five-item iterable. + This may result in a slightly different, but equivalent URL, if the URL that + was parsed originally had unnecessary delimiters (for example, a ? with an + empty query; the RFC states that these are equivalent).'b'Join a base URL and a possibly relative URL to form an absolute + interpretation of the latter.'u'Join a base URL and a possibly relative URL to form an absolute + interpretation of the latter.'b'Removes any existing fragment from URL. + + Returns a tuple of the defragmented URL and the fragment. If + the URL contained no fragments, the second element is the + empty string. + 'u'Removes any existing fragment from URL. + + Returns a tuple of the defragmented URL and the fragment. If + the URL contained no fragments, the second element is the + empty string. + 'b'0123456789ABCDEFabcdef'u'0123456789ABCDEFabcdef'b'unquote_to_bytes('abc%20def') -> b'abc def'.'u'unquote_to_bytes('abc%20def') -> b'abc def'.'b'([-]+)'u'([-]+)'b'Replace %xx escapes by their single-character equivalent. The optional + encoding and errors parameters specify how to decode percent-encoded + sequences into Unicode characters, as accepted by the bytes.decode() + method. + By default, percent-encoded sequences are decoded with UTF-8, and invalid + sequences are replaced by a placeholder character. + + unquote('abc%20def') -> 'abc def'. + 'u'Replace %xx escapes by their single-character equivalent. The optional + encoding and errors parameters specify how to decode percent-encoded + sequences into Unicode characters, as accepted by the bytes.decode() + method. + By default, percent-encoded sequences are decoded with UTF-8, and invalid + sequences are replaced by a placeholder character. + + unquote('abc%20def') -> 'abc def'. + 'b'Expected str, got bytes'u'Expected str, got bytes'b'Parse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + + encoding and errors: specify how to decode percent-encoded sequences + into Unicode characters, as accepted by the bytes.decode() method. + + max_num_fields: int. If set, then throws a ValueError if there + are more than n fields read by parse_qsl(). + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + + Returns a dictionary. + 'u'Parse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + + encoding and errors: specify how to decode percent-encoded sequences + into Unicode characters, as accepted by the bytes.decode() method. + + max_num_fields: int. If set, then throws a ValueError if there + are more than n fields read by parse_qsl(). + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + + Returns a dictionary. + 'b'Parse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. + A true value indicates that blanks should be retained as blank + strings. The default false value indicates that blank values + are to be ignored and treated as if they were not included. + + strict_parsing: flag indicating what to do with parsing errors. If + false (the default), errors are silently ignored. If true, + errors raise a ValueError exception. + + encoding and errors: specify how to decode percent-encoded sequences + into Unicode characters, as accepted by the bytes.decode() method. + + max_num_fields: int. If set, then throws a ValueError + if there are more than n fields read by parse_qsl(). + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + + Returns a list, as G-d intended. + 'u'Parse a query given as a string argument. + + Arguments: + + qs: percent-encoded query string to be parsed + + keep_blank_values: flag indicating whether blank values in + percent-encoded queries should be treated as blank strings. + A true value indicates that blanks should be retained as blank + strings. The default false value indicates that blank values + are to be ignored and treated as if they were not included. + + strict_parsing: flag indicating what to do with parsing errors. If + false (the default), errors are silently ignored. If true, + errors raise a ValueError exception. + + encoding and errors: specify how to decode percent-encoded sequences + into Unicode characters, as accepted by the bytes.decode() method. + + max_num_fields: int. If set, then throws a ValueError + if there are more than n fields read by parse_qsl(). + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + + Returns a list, as G-d intended. + 'b'Separator must be of type string or bytes.'u'Separator must be of type string or bytes.'b'Max number of fields exceeded'u'Max number of fields exceeded'b'bad query field: %r'u'bad query field: %r'b'Like unquote(), but also replace plus signs by spaces, as required for + unquoting HTML form values. + + unquote_plus('%7e/abc+def') -> '~/abc def' + 'u'Like unquote(), but also replace plus signs by spaces, as required for + unquoting HTML form values. + + unquote_plus('%7e/abc+def') -> '~/abc def' + 'b'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.-~'b'A mapping from bytes (in range(0,256)) to strings. + + String values are percent-encoded byte values, unless the key < 128, and + in the "safe" set (either the specified safe set, or default set). + 'u'A mapping from bytes (in range(0,256)) to strings. + + String values are percent-encoded byte values, unless the key < 128, and + in the "safe" set (either the specified safe set, or default set). + 'b'safe: bytes object.'u'safe: bytes object.'b'%{:02X}'u'%{:02X}'b'quote('abc def') -> 'abc%20def' + + Each part of a URL, e.g. the path info, the query, etc., has a + different set of reserved characters that must be quoted. The + quote function offers a cautious (not minimal) way to quote a + string for most of these parts. + + RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists + the following (un)reserved characters. + + unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + reserved = gen-delims / sub-delims + gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" + sub-delims = "!" / "$" / "&" / "'" / "(" / ")" + / "*" / "+" / "," / ";" / "=" + + Each of the reserved characters is reserved in some component of a URL, + but not necessarily in all of them. + + The quote function %-escapes all characters that are neither in the + unreserved chars ("always safe") nor the additional chars set via the + safe arg. + + The default for the safe arg is '/'. The character is reserved, but in + typical usage the quote function is being called on a path where the + existing slash characters are to be preserved. + + Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings. + Now, "~" is included in the set of unreserved characters. + + string and safe may be either str or bytes objects. encoding and errors + must not be specified if string is a bytes object. + + The optional encoding and errors parameters specify how to deal with + non-ASCII characters, as accepted by the str.encode method. + By default, encoding='utf-8' (characters are encoded with UTF-8), and + errors='strict' (unsupported characters raise a UnicodeEncodeError). + 'u'quote('abc def') -> 'abc%20def' + + Each part of a URL, e.g. the path info, the query, etc., has a + different set of reserved characters that must be quoted. The + quote function offers a cautious (not minimal) way to quote a + string for most of these parts. + + RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists + the following (un)reserved characters. + + unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" + reserved = gen-delims / sub-delims + gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" + sub-delims = "!" / "$" / "&" / "'" / "(" / ")" + / "*" / "+" / "," / ";" / "=" + + Each of the reserved characters is reserved in some component of a URL, + but not necessarily in all of them. + + The quote function %-escapes all characters that are neither in the + unreserved chars ("always safe") nor the additional chars set via the + safe arg. + + The default for the safe arg is '/'. The character is reserved, but in + typical usage the quote function is being called on a path where the + existing slash characters are to be preserved. + + Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings. + Now, "~" is included in the set of unreserved characters. + + string and safe may be either str or bytes objects. encoding and errors + must not be specified if string is a bytes object. + + The optional encoding and errors parameters specify how to deal with + non-ASCII characters, as accepted by the str.encode method. + By default, encoding='utf-8' (characters are encoded with UTF-8), and + errors='strict' (unsupported characters raise a UnicodeEncodeError). + 'b'quote() doesn't support 'encoding' for bytes'u'quote() doesn't support 'encoding' for bytes'b'quote() doesn't support 'errors' for bytes'u'quote() doesn't support 'errors' for bytes'b'Like quote(), but also replace ' ' with '+', as required for quoting + HTML form values. Plus signs in the original string are escaped unless + they are included in safe. It also does not have safe default to '/'. + 'u'Like quote(), but also replace ' ' with '+', as required for quoting + HTML form values. Plus signs in the original string are escaped unless + they are included in safe. It also does not have safe default to '/'. + 'b'Like quote(), but accepts a bytes object rather than a str, and does + not perform string-to-bytes encoding. It always returns an ASCII string. + quote_from_bytes(b'abc def?') -> 'abc%20def%3f' + 'u'Like quote(), but accepts a bytes object rather than a str, and does + not perform string-to-bytes encoding. It always returns an ASCII string. + quote_from_bytes(b'abc def?') -> 'abc%20def%3f' + 'b'quote_from_bytes() expected bytes'u'quote_from_bytes() expected bytes'b'Encode a dict or sequence of two-element tuples into a URL query string. + + If any values in the query arg are sequences and doseq is true, each + sequence element is converted to a separate parameter. + + If the query arg is a sequence of two-element tuples, the order of the + parameters in the output will match the order of parameters in the + input. + + The components of a query arg may each be either a string or a bytes type. + + The safe, encoding, and errors parameters are passed down to the function + specified by quote_via (encoding and errors only if a component is a str). + 'u'Encode a dict or sequence of two-element tuples into a URL query string. + + If any values in the query arg are sequences and doseq is true, each + sequence element is converted to a separate parameter. + + If the query arg is a sequence of two-element tuples, the order of the + parameters in the output will match the order of parameters in the + input. + + The components of a query arg may each be either a string or a bytes type. + + The safe, encoding, and errors parameters are passed down to the function + specified by quote_via (encoding and errors only if a component is a str). + 'b'items'b'not a valid non-string sequence or mapping object'u'not a valid non-string sequence or mapping object'b'urllib.parse.to_bytes() is deprecated as of 3.8'u'urllib.parse.to_bytes() is deprecated as of 3.8'b'to_bytes(u"URL") --> 'URL'.'u'to_bytes(u"URL") --> 'URL'.'b'URL 'u'URL 'b' contains non-ASCII characters'u' contains non-ASCII characters'b'Transform a string like '' into 'scheme://host/path'. + + The string is returned unchanged if it's not a wrapped URL. + 'u'Transform a string like '' into 'scheme://host/path'. + + The string is returned unchanged if it's not a wrapped URL. + 'b'URL:'u'URL:'b'urllib.parse.splittype() is deprecated as of 3.8, use urllib.parse.urlparse() instead'u'urllib.parse.splittype() is deprecated as of 3.8, use urllib.parse.urlparse() instead'b'splittype('type:opaquestring') --> 'type', 'opaquestring'.'u'splittype('type:opaquestring') --> 'type', 'opaquestring'.'b'([^/:]+):(.*)'u'([^/:]+):(.*)'b'urllib.parse.splithost() is deprecated as of 3.8, use urllib.parse.urlparse() instead'u'urllib.parse.splithost() is deprecated as of 3.8, use urllib.parse.urlparse() instead'b'splithost('//host[:port]/path') --> 'host[:port]', '/path'.'u'splithost('//host[:port]/path') --> 'host[:port]', '/path'.'b'//([^/#?]*)(.*)'u'//([^/#?]*)(.*)'b'urllib.parse.splituser() is deprecated as of 3.8, use urllib.parse.urlparse() instead'u'urllib.parse.splituser() is deprecated as of 3.8, use urllib.parse.urlparse() instead'b'splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.'u'splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.'b'urllib.parse.splitpasswd() is deprecated as of 3.8, use urllib.parse.urlparse() instead'u'urllib.parse.splitpasswd() is deprecated as of 3.8, use urllib.parse.urlparse() instead'b'splitpasswd('user:passwd') -> 'user', 'passwd'.'u'splitpasswd('user:passwd') -> 'user', 'passwd'.'b'urllib.parse.splitport() is deprecated as of 3.8, use urllib.parse.urlparse() instead'u'urllib.parse.splitport() is deprecated as of 3.8, use urllib.parse.urlparse() instead'b'splitport('host:port') --> 'host', 'port'.'u'splitport('host:port') --> 'host', 'port'.'b'(.*):([0-9]*)'u'(.*):([0-9]*)'b'urllib.parse.splitnport() is deprecated as of 3.8, use urllib.parse.urlparse() instead'u'urllib.parse.splitnport() is deprecated as of 3.8, use urllib.parse.urlparse() instead'b'Split host and port, returning numeric port. + Return given default port if no ':' found; defaults to -1. + Return numerical port if a valid number are found after ':'. + Return None if ':' but not a valid number.'u'Split host and port, returning numeric port. + Return given default port if no ':' found; defaults to -1. + Return numerical port if a valid number are found after ':'. + Return None if ':' but not a valid number.'b'urllib.parse.splitquery() is deprecated as of 3.8, use urllib.parse.urlparse() instead'u'urllib.parse.splitquery() is deprecated as of 3.8, use urllib.parse.urlparse() instead'b'splitquery('/path?query') --> '/path', 'query'.'u'splitquery('/path?query') --> '/path', 'query'.'b'urllib.parse.splittag() is deprecated as of 3.8, use urllib.parse.urlparse() instead'u'urllib.parse.splittag() is deprecated as of 3.8, use urllib.parse.urlparse() instead'b'splittag('/path#tag') --> '/path', 'tag'.'u'splittag('/path#tag') --> '/path', 'tag'.'b'urllib.parse.splitattr() is deprecated as of 3.8, use urllib.parse.urlparse() instead'u'urllib.parse.splitattr() is deprecated as of 3.8, use urllib.parse.urlparse() instead'b'splitattr('/path;attr1=value1;attr2=value2;...') -> + '/path', ['attr1=value1', 'attr2=value2', ...].'u'splitattr('/path;attr1=value1;attr2=value2;...') -> + '/path', ['attr1=value1', 'attr2=value2', ...].'b'urllib.parse.splitvalue() is deprecated as of 3.8, use urllib.parse.parse_qsl() instead'u'urllib.parse.splitvalue() is deprecated as of 3.8, use urllib.parse.parse_qsl() instead'b'splitvalue('attr=value') --> 'attr', 'value'.'u'splitvalue('attr=value') --> 'attr', 'value'.'A parser of RFC 2822 and MIME email messages.HeaderParserBytesHeaderParseremail.feedparserParser of RFC 2822 and MIME email messages. + + Creates an in-memory object tree representing the email message, which + can then be manipulated and turned over to a Generator to return the + textual representation of the message. + + The string must be formatted as a block of RFC 2822 headers and header + continuation lines, optionally preceded by a `Unix-from' header. The + header block is terminated either by the end of the string or by a + blank line. + + _class is the class to instantiate for new message objects when they + must be created. This class must have a constructor that can take + zero arguments. Default is Message.Message. + + The policy keyword specifies a policy object that controls a number of + aspects of the parser's operation. The default policy maintains + backward compatibility. + + headersonlyCreate a message structure from the data in a file. + + Reads all the data from the file and returns the root of the message + structure. Optional headersonly is a flag specifying whether to stop + parsing after reading the headers or not. The default is False, + meaning it parses the entire contents of the file. + Create a message structure from a string. + + Returns the root of the message structure. Optional headersonly is a + flag specifying whether to stop parsing after reading the headers or + not. The default is False, meaning it parses the entire contents of + the file. + Parser of binary RFC 2822 and MIME email messages. + + Creates an in-memory object tree representing the email message, which + can then be manipulated and turned over to a Generator to return the + textual representation of the message. + + The input must be formatted as a block of RFC 2822 headers and header + continuation lines, optionally preceded by a `Unix-from' header. The + header block is terminated either by the end of the input or by a + blank line. + + _class is the class to instantiate for new message objects when they + must be created. This class must have a constructor that can take + zero arguments. Default is Message.Message. + Create a message structure from the data in a binary file. + + Reads all the data from the file and returns the root of the message + structure. Optional headersonly is a flag specifying whether to stop + parsing after reading the headers or not. The default is False, + meaning it parses the entire contents of the file. + Create a message structure from a byte string. + + Returns the root of the message structure. Optional headersonly is a + flag specifying whether to stop parsing after reading the headers or + not. The default is False, meaning it parses the entire contents of + the file. + # Author: Barry Warsaw, Thomas Wouters, Anthony Baxterb'A parser of RFC 2822 and MIME email messages.'u'A parser of RFC 2822 and MIME email messages.'b'Parser'u'Parser'b'HeaderParser'u'HeaderParser'b'BytesParser'u'BytesParser'b'BytesHeaderParser'u'BytesHeaderParser'b'Parser of RFC 2822 and MIME email messages. + + Creates an in-memory object tree representing the email message, which + can then be manipulated and turned over to a Generator to return the + textual representation of the message. + + The string must be formatted as a block of RFC 2822 headers and header + continuation lines, optionally preceded by a `Unix-from' header. The + header block is terminated either by the end of the string or by a + blank line. + + _class is the class to instantiate for new message objects when they + must be created. This class must have a constructor that can take + zero arguments. Default is Message.Message. + + The policy keyword specifies a policy object that controls a number of + aspects of the parser's operation. The default policy maintains + backward compatibility. + + 'u'Parser of RFC 2822 and MIME email messages. + + Creates an in-memory object tree representing the email message, which + can then be manipulated and turned over to a Generator to return the + textual representation of the message. + + The string must be formatted as a block of RFC 2822 headers and header + continuation lines, optionally preceded by a `Unix-from' header. The + header block is terminated either by the end of the string or by a + blank line. + + _class is the class to instantiate for new message objects when they + must be created. This class must have a constructor that can take + zero arguments. Default is Message.Message. + + The policy keyword specifies a policy object that controls a number of + aspects of the parser's operation. The default policy maintains + backward compatibility. + + 'b'Create a message structure from the data in a file. + + Reads all the data from the file and returns the root of the message + structure. Optional headersonly is a flag specifying whether to stop + parsing after reading the headers or not. The default is False, + meaning it parses the entire contents of the file. + 'u'Create a message structure from the data in a file. + + Reads all the data from the file and returns the root of the message + structure. Optional headersonly is a flag specifying whether to stop + parsing after reading the headers or not. The default is False, + meaning it parses the entire contents of the file. + 'b'Create a message structure from a string. + + Returns the root of the message structure. Optional headersonly is a + flag specifying whether to stop parsing after reading the headers or + not. The default is False, meaning it parses the entire contents of + the file. + 'u'Create a message structure from a string. + + Returns the root of the message structure. Optional headersonly is a + flag specifying whether to stop parsing after reading the headers or + not. The default is False, meaning it parses the entire contents of + the file. + 'b'Parser of binary RFC 2822 and MIME email messages. + + Creates an in-memory object tree representing the email message, which + can then be manipulated and turned over to a Generator to return the + textual representation of the message. + + The input must be formatted as a block of RFC 2822 headers and header + continuation lines, optionally preceded by a `Unix-from' header. The + header block is terminated either by the end of the input or by a + blank line. + + _class is the class to instantiate for new message objects when they + must be created. This class must have a constructor that can take + zero arguments. Default is Message.Message. + 'u'Parser of binary RFC 2822 and MIME email messages. + + Creates an in-memory object tree representing the email message, which + can then be manipulated and turned over to a Generator to return the + textual representation of the message. + + The input must be formatted as a block of RFC 2822 headers and header + continuation lines, optionally preceded by a `Unix-from' header. The + header block is terminated either by the end of the input or by a + blank line. + + _class is the class to instantiate for new message objects when they + must be created. This class must have a constructor that can take + zero arguments. Default is Message.Message. + 'b'Create a message structure from the data in a binary file. + + Reads all the data from the file and returns the root of the message + structure. Optional headersonly is a flag specifying whether to stop + parsing after reading the headers or not. The default is False, + meaning it parses the entire contents of the file. + 'u'Create a message structure from the data in a binary file. + + Reads all the data from the file and returns the root of the message + structure. Optional headersonly is a flag specifying whether to stop + parsing after reading the headers or not. The default is False, + meaning it parses the entire contents of the file. + 'b'Create a message structure from a byte string. + + Returns the root of the message structure. Optional headersonly is a + flag specifying whether to stop parsing after reading the headers or + not. The default is False, meaning it parses the entire contents of + the file. + 'u'Create a message structure from a byte string. + + Returns the root of the message structure. Optional headersonly is a + flag specifying whether to stop parsing after reading the headers or + not. The default is False, meaning it parses the entire contents of + the file. + 'u'email.parser'Pattern compiler. + +The grammar is taken from PatternGrammar.txt. + +The compiler compiles a pattern to a pytree.*Pattern instance. +driverliteralsPatternSyntaxErrortokenize_wrapperTokenizes a string suppressing significant whitespace.PatternCompilergrammar_fileInitializer. + + Takes an optional alternative filename for the pattern grammar. + pattern_grammarSymbolspython_grammarpygrammarpattern_convertwith_treeCompiles a pattern string to a nested pytree.*Pattern object.compile_nodeCompiles a node, recursively. + + This is one big switch on the node type. + altsWildcardPatternNegatedUnitcompile_basicNegatedPatternHUGEPLUSget_intLeafPattern_type_of_literalTOKEN_MAPInvalid token: %rCan't have details for tokenInvalid symbol: %rNodePatternTOKENraw_node_infoConverts raw node information to a Node or Leaf instance.# Fairly local imports# Really local imports# XXX Optimize certain Wildcard-containing-Wildcard patterns# that can be merged# Avoid unneeded recursion# Skip the odd children since they are just '|' tokens# Now we've reduced it to: STRING | NAME [Details] | (...) | [...]# Compile STRING | NAME [Details] | (...) | [...]# Details present# Map named tokens to the type value for a LeafPatternb'Pattern compiler. + +The grammar is taken from PatternGrammar.txt. + +The compiler compiles a pattern to a pytree.*Pattern instance. +'u'Pattern compiler. + +The grammar is taken from PatternGrammar.txt. + +The compiler compiles a pattern to a pytree.*Pattern instance. +'b'Tokenizes a string suppressing significant whitespace.'u'Tokenizes a string suppressing significant whitespace.'b'Initializer. + + Takes an optional alternative filename for the pattern grammar. + 'u'Initializer. + + Takes an optional alternative filename for the pattern grammar. + 'b'Compiles a pattern string to a nested pytree.*Pattern object.'u'Compiles a pattern string to a nested pytree.*Pattern object.'b'Compiles a node, recursively. + + This is one big switch on the node type. + 'u'Compiles a node, recursively. + + This is one big switch on the node type. + 'b'Invalid token: %r'u'Invalid token: %r'b'Can't have details for token'u'Can't have details for token'b'Invalid symbol: %r'u'Invalid symbol: %r'b'NAME'u'NAME'b'STRING'u'STRING'b'NUMBER'u'NUMBER'b'TOKEN'u'TOKEN'b'Converts raw node information to a Node or Leaf instance.'u'Converts raw node information to a Node or Leaf instance.'u'lib2to3.patcomp'u'patcomp' +The Python Debugger Pdb +======================= + +To use the debugger in its simplest form: + + >>> import pdb + >>> pdb.run('') + +The debugger's prompt is '(Pdb) '. This will stop in the first +function call in . + +Alternatively, if a statement terminated with an unhandled exception, +you can use pdb's post-mortem facility to inspect the contents of the +traceback: + + >>> + + >>> import pdb + >>> pdb.pm() + +The commands recognized by the debugger are listed in the next +section. Most can be abbreviated as indicated; e.g., h(elp) means +that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel', +nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in +square brackets. Alternatives in the command syntax are separated +by a vertical bar (|). + +A blank line repeats the previous command literally, except for +'list', where it lists the next 11 lines. + +Commands that the debugger doesn't recognize are assumed to be Python +statements and are executed in the context of the program being +debugged. Python statements can also be prefixed with an exclamation +point ('!'). This is a powerful way to inspect the program being +debugged; it is even possible to change variables or call functions. +When an exception occurs in such a statement, the exception name is +printed but the debugger's state is not changed. + +The debugger supports aliases, which can save typing. And aliases can +have parameters (see the alias help entry) which allows one a certain +level of adaptability to the context under examination. + +Multiple commands may be entered on a single line, separated by the +pair ';;'. No intelligence is applied to separating the commands; the +input is split at the first ';;', even if it is in the middle of a +quoted string. + +If a file ".pdbrc" exists in your home directory or in the current +directory, it is read in and executed as if it had been typed at the +debugger prompt. This is particularly useful for aliases. If both +files exist, the one in the home directory is read first and aliases +defined there can be overridden by the local file. This behavior can be +disabled by passing the "readrc=False" argument to the Pdb constructor. + +Aside from aliases, the debugger is not directly programmable; but it +is implemented as a class from which you can derive your own debugger +class, which you can make as fancy as you like. + + +Debugger commands +================= + +bdbRestartCauses a debugger to be restarted for the debugged python program.post_mortemfind_functiondef\s+%s\s*[(]crelasti2lineno_rstrString that doesn't quote its repr. +-> line_prefix_previous_sigint_handlerreadrcpdb.Pdb(Pdb) displayingmainpyfile_wait_for_mainpyfileset_completer_delims +`@#$%^&*()=+[{]}\|;:'",<>?allow_kbdintrcLines~/.pdbrcrcFile.pdbrccommands_dopromptcommands_silentcommands_definingcommands_bnumsigint_handler +Program interrupted. (Use 'cont' to resume).curindexcurframecurframe_localsexecRcLinesThis method is called when there is the remote possibility + that we ever need to stop in this function.--Call--This function is called when we stop or break at this line.bp_commandsCall every command that was set for the current active breakpoint + (if there is one). + + Returns True if the normal interaction function must be called, + False otherwise.lastcmd_backprint_stack_entry_cmdloopThis function is called when a return trap is set here.--Return--This function is called if an exception occurs, + but only if we are to stop at or just below this level.__exception__Internal --KeyboardInterrupt--_getval_exceptdisplay %s: %r [old: %r]Custom displayhook for the exec in default(), which prevents + assignment of the _ variable in the builtins. + save_stdinHandle alias expansion and ';;' separator.tmpArg%*;;markerInterpret the argument as though it had been typed in response + to the prompt. + + Checks whether this line is typed at the normal prompt or in + a breakpoint command list definition. + handle_command_defHandles one command line during command list definition.silentcmdlistcommands_resuming***_complete_location_complete_expression_complete_bpnumberdotteddo_commandscommands [bpnumber] + (com) ... + (com) end + (Pdb) + + Specify a list of commands for breakpoint number bpnumber. + The commands themselves are entered on the following lines. + Type a line containing just 'end' to terminate the commands. + The commands are executed when the breakpoint is hit. + + To remove all commands from a breakpoint, type commands and + follow it immediately with end; that is, give no commands. + + With no bpnumber argument, commands refers to the last + breakpoint set. + + You can use breakpoint commands to start your program up + again. Simply use the continue command, or step, or any other + command that resumes execution. + + Specifying any command resuming execution (currently continue, + step, next, return, jump, quit and their abbreviations) + terminates the command list (as if that command was + immediately followed by end). This is because any time you + resume execution (even with a simple next or step), you may + encounter another breakpoint -- which could have its own + command list, leading to ambiguities about which list to + execute. + + If you use the 'silent' command in the command list, the usual + message about stopping at a breakpoint is not printed. This + may be desirable for breakpoints that are to print a specific + message and then continue. If none of the other commands + print anything, you will see no sign that the breakpoint was + reached. + bnumUsage: commands [bnum] + ... + endold_command_defsprompt_back(com) command definition aborted, old commands restoredcomplete_commandsdo_breakb(reak) [ ([filename:]lineno | function) [, condition] ] + Without argument, list all breaks. + + With a line number argument, set a break at this line in the + current file. With a function name, set a break at the first + executable line of that function. If a second argument is + present, it is a string specifying an expression which must + evaluate to true before the breakpoint is honored. + + The line number may be prefixed with a filename and a colon, + to specify a breakpoint in another file (probably one that + hasn't been loaded yet). The file is searched for on + sys.path; the .py suffix may be omitted. + Num Type Disp Enb Wherelookupmodule%r not found from sys.pathBad lineno: %slineinfoThe specified object %r is not a function or was not found along sys.path.'The specified object %r is not a function ''or was not found along sys.path.'defaultFilechecklineBreakpoint %d at %s:%dProduce a reasonable default.do_bcomplete_breakcomplete_bdo_tbreaktbreak [ ([filename:]lineno | function) [, condition] ] + Same arguments as break, but sets a temporary breakpoint: it + is automatically deleted when first hit. + complete_tbreakidentifieridstringCheck whether specified line seems to be executable. + + Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank + line or EOF). Warning: testing is not comprehensive. + End of fileBlank or commentdo_enableenable bpnumber [bpnumber ...] + Enables the breakpoints given as a space separated list of + breakpoint numbers. + Enabled %scomplete_enabledo_disabledisable bpnumber [bpnumber ...] + Disables the breakpoints given as a space separated list of + breakpoint numbers. Disabling a breakpoint means it cannot + cause the program to stop execution, but unlike clearing a + breakpoint, it remains in the list of breakpoints and can be + (re-)enabled. + Disabled %scomplete_disabledo_conditioncondition bpnumber [condition] + Set a new condition for the breakpoint, an expression which + must evaluate to true before the breakpoint is honored. If + condition is absent, any existing condition is removed; i.e., + the breakpoint is made unconditional. + Breakpoint %d is now unconditional.New condition set for breakpoint %d.complete_conditiondo_ignoreignore bpnumber [count] + Set the ignore count for the given breakpoint number. If + count is omitted, the ignore count is set to 0. A breakpoint + becomes active when the ignore count is zero. When non-zero, + the count is decremented each time the breakpoint is reached + and the breakpoint is not disabled and any associated + condition evaluates to true. + %d crossingscountstr1 crossingWill ignore next %s of breakpoint %d.Will stop next time breakpoint %d is reached.complete_ignorecl(ear) filename:lineno +cl(ear) [bpnumber [bpnumber...]] + With a space separated list of breakpoint numbers, clear + those breakpoints. Without argument, clear all breaks (but + first ask confirmation). With a filename:lineno argument, + clear all breaks at that line in that file. + Clear all breaks? replyyesDeleted %sInvalid line number (%s)numberlistdo_clcomplete_clearcomplete_cldo_wherew(here) + Print a stack trace, with the most recent frame at the bottom. + An arrow indicates the "current frame", which determines the + context of most commands. 'bt' is an alias for this command. + print_stack_tracedo_wdo_bt_select_framedo_upu(p) [count] + Move the current frame count (default one) levels up in the + stack trace (to an older frame). + Oldest frameInvalid frame count (%s)newframedo_udo_downd(own) [count] + Move the current frame count (default one) levels down in the + stack trace (to a newer frame). + Newest framedo_ddo_untilunt(il) [lineno] + Without argument, continue execution until the line with a + number greater than the current one is reached. With a line + number, continue execution until a line with a number greater + or equal to that is reached. In both cases, also stop when + the current frame returns. + Error in argument: %r"until" line number is smaller than current line number'"until" line number is smaller than current ''line number'do_untdo_steps(tep) + Execute the current line, stop at the first possible occasion + (either in a function that is called or in the current + function). + do_sdo_nextn(ext) + Continue execution until the next line in the current function + is reached or it returns. + do_ndo_runrun [args...] + Restart the debugged python program. If a string is supplied + it is split with "shlex", and the result is used as the new + sys.argv. History, breakpoints, actions and debugger options + are preserved. "restart" is an alias for "run". + argv0do_restartdo_returnr(eturn) + Continue execution until the current function returns. + do_rdo_continuec(ont(inue)) + Continue execution, only stop when a breakpoint is encountered. + do_cdo_contdo_jumpj(ump) lineno + Set the next line that will be executed. Only available in + the bottom-most frame. This lets you jump back and execute + code again, or jump forward to skip code that you don't want + to run. + + It should be noted that not all jumps are allowed -- for + instance it is not possible to jump into the middle of a + for loop or out of a finally clause. + You can only jump within the bottom frameJump failed: %sThe 'jump' command requires a line numberdo_jdo_debugdebug code + Enter a recursive debugger that steps through the code + argument (which is an arbitrary expression or statement to be + executed in the current environment). + ENTERING RECURSIVE DEBUGGERLEAVING RECURSIVE DEBUGGERcomplete_debugdo_quitq(uit) +exit + Quit from the debugger. The program being executed is aborted. + _user_requested_quitdo_qdo_exitdo_EOFEOF + Handles the receipt of EOF as a command. + do_argsa(rgs) + Print the argument list of the current function. + %s = %r%s = *** undefined ***do_ado_retvalretval + Print the return value for the last return of a function. + Not yet returned!do_rv_getval** raised %s **do_pp expression + Print the value of the expression. + do_pppp expression + Pretty-print the value of the expression. + complete_printcomplete_pcomplete_ppdo_listl(ist) [first [,last] | .] + + List source code for the current file. Without arguments, + list 11 lines around the current line or continue the previous + listing. With . as argument, list 11 lines around the current + line. With one argument, list 11 lines starting at that line. + With two arguments, list the given range; if the second + argument is less than the first, it is a count. + + The current line in the current frame is indicated by "->". + If an exception is being debugged, the line where the + exception was originally raised or propagated is indicated by + ">>", if it differs from the current line. + breaklist_print_lines[EOF]do_ldo_longlistlonglist | ll + List the whole source code for the current function or frame. + do_lldo_sourcesource expression + Try to get source code for the given object and display it. + complete_sourcePrint a range of lines.current_linenoexc_linenodo_whatiswhatis arg + Print the type of the argument. + Method %sFunction %sClass %s.%scomplete_whatisdo_displaydisplay [expression] + + Display the value of the expression if it changed, each time execution + stops in the current frame. + + Without expression, list all display expressions for the current frame. + Currently displaying:display %s: %rcomplete_displaydo_undisplayundisplay [expression] + + Do not display the expression any more in the current frame. + + Without expression, clear all display expressions for the current frame. + not displaying %scomplete_undisplaydo_interactinteract + + Start an interactive interpreter whose global namespace + contains all the (global and local) names found in the current scope. + *interactive*do_aliasalias [name [command [parameter parameter ...] ]] + Create an alias called 'name' that executes 'command'. The + command must *not* be enclosed in quotes. Replaceable + parameters can be indicated by %1, %2, and so on, while %* is + replaced by all the parameters. If no command is given, the + current alias for name is shown. If no name is given, all + aliases are listed. + + Aliases may be nested and can contain anything that can be + legally typed at the pdb prompt. Note! You *can* override + internal pdb commands with aliases! Those internal commands + are then hidden until the alias is removed. Aliasing is + recursively applied to the first word of the command line; all + other words in the line are left alone. + + As an example, here are two useful aliases (especially when + placed in the .pdbrc file): + + # Print instance variables (usage "pi classInst") + alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) + # Print instance variables in self + alias ps pi self + %s = %sdo_unaliasunalias name + Delete the specified alias. + complete_unaliasprompt_prefix> h(elp) + Without argument, print the list of available commands. + With a command name as argument, print help about that command. + "help pdb" shows the full pdb documentation. + "help exec" gives help on the ! command. + topicNo help for %r; please do not run Python with -OO if you need command help'No help for %r; please do not run Python with -OO ''if you need command help'No help for %rdo_hhelp_exec(!) statement + Execute the (one-line) statement in the context of the current + stack frame. The exclamation point can be omitted unless the + first word of the statement resembles a debugger command. To + assign to a global variable you must always prefix the command + with a 'global' command, e.g.: + (Pdb) global list_options; list_options = ['-l'] + (Pdb) + help_pdbHelper function for break/clear parsing -- may be overridden. + + lookupmodule() translates (possibly incomplete) file or module name + into an absolute file name. + _runmodulerunpy_get_module_detailsmod_spec_runscriptexec(compile(%r, %r, 'exec'))downuptbreakuntiljumplonglistwhatisdisplayundisplayunalias_help_order_commandexpressionA valid traceback must be passed if no exception is being handled"A valid traceback must be passed if no ""exception is being handled"import x; x.main()TESTCMDpydocpagerusage: pdb.py [-c command] ... [-m module | pyfile] [arg] ... + +Debug the Python program given by pyfile. Alternatively, +an executable module or package to debug can be specified using +the -m switch. + +Initial commands are read from .pdbrc files in your home directory +and in the current directory, if they exist. Commands supplied with +-c are executed after commands from .pdbrc files. + +To let the script run until an exception occurs, use "-c continue". +To let the script run up to a given line X in the debugged file, use +"-c 'until X'"._usagemhc:command=run_as_module--commandError:does not existThe program finished and will be restartedRestartingwith arguments:The program exited via sys.exit(). Exit status:print_excUncaught exception. Entering post mortem debuggingRunning 'cont' or 'step' will restart the programPost mortem debugger finished. The will be restarted# NOTE: the actual command documentation is collected from docstrings of the# commands and is appended to __doc__ after the class has been defined.# consumer of this info expects the first line to be 1# must be a module frame: do not try to cut a block out of it# Interaction prompt line will separate file and call info from code# text using value of line_prefix string. A newline and arrow may# be to your liking. You can set it once pdb is imported using the# command "pdb.line_prefix = '\n% '".# line_prefix = ': ' # Use this to get the old situation back# Probably a better default# Try to load readline if it exists# remove some common file name delimiters# Read ~/.pdbrc and ./.pdbrc# associates a command list to breakpoint numbers# for each bp num, tells if the prompt# must be disp. after execing the cmd list# for each bp num, tells if the stack trace# True while in the process of defining# a command list# The breakpoint number for which we are# defining a list# when setting up post-mortem debugging with a traceback, save all# the original line numbers to be displayed along the current line# numbers (which can be different, e.g. due to finally clauses)# The f_locals dictionary is updated from the actual frame# locals whenever the .f_locals accessor is called, so we# cache it here to ensure that modifications are not overwritten.# Can be executed earlier than 'setup' if desired# local copy because of recursion# execute every line only once# if onecmd returns True, the command wants to exit# from the interaction, save leftover rc lines# to execute before next interaction# Override Bdb methods# self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit# An 'Internal StopIteration' exception is an exception debug event# issued by the interpreter when handling a subgenerator run with# 'yield from' or a generator controlled by a for loop. No exception has# actually occurred in this case. The debugger uses this debug event to# stop when the debuggee is returning from such generators.# General interaction function# keyboard interrupts allow for an easy way to cancel# the current command, so allow them during interactive input# Called before loop, handles display expressions# check for identity first; this prevents custom __eq__ to# be called at every loop, and also prevents instances whose# fields are changed to be displayed# Restore the previous signal handler at the Pdb prompt.# ValueError: signal only works in main thread# no interaction desired at this time (happens if .pdbrc contains# a command like "continue")# reproduce the behavior of the standard displayhook, not printing None# split into ';;' separated commands# unless it's an alias command# queue up everything after marker# continue to handle other cmd def in the cmd list# end of cmd list# Determine if we must stop# one of the resuming commands# interface abstraction functions# Generic completion functions. Individual complete_foo methods can be# assigned below to one of these functions.# Complete a file/module/function location for break/tbreak/clear.# Here comes a line number or a condition which we can't complete.# First, try to find matching functions (i.e. expressions).# Then, try to complete file names as well.# Complete a breakpoint number. (This would be more helpful if we could# display additional info along with the completions, such as file/line# of the breakpoint.)# Complete an arbitrary expression.# Collect globals and locals. It is usually not really sensible to also# complete builtins, and they clutter the namespace quite heavily, so we# leave them out.# Walk an attribute chain up to the last part, similar to what# rlcompleter does. This will bail if any of the parts are not# simple attribute access, which is what we want.# Complete a simple name.# Command definitions, called by cmdloop()# The argument is the remaining string on the command line# Return true to exit from the command loop# Save old definitions for the case of a keyboard interrupt.# Restore old definitions.# There's at least one# parse arguments; comma has lowest precedence# and cannot occur in filename# parse stuff after comma: "condition"# parse stuff before comma: [filename:]lineno | function# no colon; can be lineno or function#use co_name to identify the bkpt (function names#could be aliased, but co_name is invariant)# last thing to try# ok contains a function name# Check for reasonable breakpoint# now set the break point# To be overridden in derived debuggers# Input is identifier, may be in single quotes# not in single quotes# quoted# Protection for derived debuggers# Best first guess at file to look at# More than one part.# First is module, second is method/class# this method should be callable before starting debugging, so default# to "no globals" if there is no current frame# Don't allow setting breakpoint at a blank line# Make sure it works for "clear C:\foo\bar.py:12"# 'c' is already an abbreviation for 'continue'# this is caught in the main debugger loop# ValueError happens when do_continue() is invoked from# a non-main thread in which case we just continue without# SIGINT set. Would printing a message here (once) make# sense?# Do the jump, fix up our copy of the stack, and display the# new position# assume it's a count# _getval() already printed the error# Is it an instance method?# Is it a function?# Is it a class?# None of the above...# List of all the commands making the program resume execution.# Print a traceback starting at the top stack frame.# The most recently entered frame is printed last;# this is different from dbx and gdb, but consistent with# the Python interpreter's stack trace.# It is also consistent with the up/down commands (which are# compatible with dbx and gdb: up moves towards 'main()'# and down moves towards the most recent stack frame).# Provide help# other helper functions# The script has to run in __main__ namespace (or imports from# __main__ will break).# So we clear up the __main__ and set several special variables# (this gets rid of pdb's globals and cleans old variables on restarts).# When bdb sets tracing, a number of call and line events happens# BEFORE debugger even reaches user's code (and the exact sequence of# events depends on python version). So we take special measures to# avoid stopping before we reach the main script (see user_line and# user_call for details).# Collect all command help into docstring, if not run with -OO# unfortunately we can't guess this order from the class definition# Simplified interface# Post-Mortem interface# handling the default# sys.exc_info() returns (type, value, traceback) if an exception is# being handled, otherwise it returns None# Main program for testing# print help# Get script filename# Hide "pdb.py" and pdb options from argument list# Replace pdb's dir with script's dir in front of module search path.# Note on saving/restoring sys.argv: it's a good idea when sys.argv was# modified by the script being debugged. It's a bad idea when it was# changed by the user from the command line. There is a "restart" command# which allows explicit specification of command line arguments.# In most cases SystemExit does not warrant a post-mortem session.# When invoked as main program, invoke the debugger on a scriptb' +The Python Debugger Pdb +======================= + +To use the debugger in its simplest form: + + >>> import pdb + >>> pdb.run('') + +The debugger's prompt is '(Pdb) '. This will stop in the first +function call in . + +Alternatively, if a statement terminated with an unhandled exception, +you can use pdb's post-mortem facility to inspect the contents of the +traceback: + + >>> + + >>> import pdb + >>> pdb.pm() + +The commands recognized by the debugger are listed in the next +section. Most can be abbreviated as indicated; e.g., h(elp) means +that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel', +nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in +square brackets. Alternatives in the command syntax are separated +by a vertical bar (|). + +A blank line repeats the previous command literally, except for +'list', where it lists the next 11 lines. + +Commands that the debugger doesn't recognize are assumed to be Python +statements and are executed in the context of the program being +debugged. Python statements can also be prefixed with an exclamation +point ('!'). This is a powerful way to inspect the program being +debugged; it is even possible to change variables or call functions. +When an exception occurs in such a statement, the exception name is +printed but the debugger's state is not changed. + +The debugger supports aliases, which can save typing. And aliases can +have parameters (see the alias help entry) which allows one a certain +level of adaptability to the context under examination. + +Multiple commands may be entered on a single line, separated by the +pair ';;'. No intelligence is applied to separating the commands; the +input is split at the first ';;', even if it is in the middle of a +quoted string. + +If a file ".pdbrc" exists in your home directory or in the current +directory, it is read in and executed as if it had been typed at the +debugger prompt. This is particularly useful for aliases. If both +files exist, the one in the home directory is read first and aliases +defined there can be overridden by the local file. This behavior can be +disabled by passing the "readrc=False" argument to the Pdb constructor. + +Aside from aliases, the debugger is not directly programmable; but it +is implemented as a class from which you can derive your own debugger +class, which you can make as fancy as you like. + + +Debugger commands +================= + +'u' +The Python Debugger Pdb +======================= + +To use the debugger in its simplest form: + + >>> import pdb + >>> pdb.run('') + +The debugger's prompt is '(Pdb) '. This will stop in the first +function call in . + +Alternatively, if a statement terminated with an unhandled exception, +you can use pdb's post-mortem facility to inspect the contents of the +traceback: + + >>> + + >>> import pdb + >>> pdb.pm() + +The commands recognized by the debugger are listed in the next +section. Most can be abbreviated as indicated; e.g., h(elp) means +that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel', +nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in +square brackets. Alternatives in the command syntax are separated +by a vertical bar (|). + +A blank line repeats the previous command literally, except for +'list', where it lists the next 11 lines. + +Commands that the debugger doesn't recognize are assumed to be Python +statements and are executed in the context of the program being +debugged. Python statements can also be prefixed with an exclamation +point ('!'). This is a powerful way to inspect the program being +debugged; it is even possible to change variables or call functions. +When an exception occurs in such a statement, the exception name is +printed but the debugger's state is not changed. + +The debugger supports aliases, which can save typing. And aliases can +have parameters (see the alias help entry) which allows one a certain +level of adaptability to the context under examination. + +Multiple commands may be entered on a single line, separated by the +pair ';;'. No intelligence is applied to separating the commands; the +input is split at the first ';;', even if it is in the middle of a +quoted string. + +If a file ".pdbrc" exists in your home directory or in the current +directory, it is read in and executed as if it had been typed at the +debugger prompt. This is particularly useful for aliases. If both +files exist, the one in the home directory is read first and aliases +defined there can be overridden by the local file. This behavior can be +disabled by passing the "readrc=False" argument to the Pdb constructor. + +Aside from aliases, the debugger is not directly programmable; but it +is implemented as a class from which you can derive your own debugger +class, which you can make as fancy as you like. + + +Debugger commands +================= + +'b'Causes a debugger to be restarted for the debugged python program.'u'Causes a debugger to be restarted for the debugged python program.'b'run'u'run'b'pm'u'pm'b'Pdb'u'Pdb'b'runeval'u'runeval'b'runctx'u'runctx'b'runcall'u'runcall'b'set_trace'u'set_trace'b'post_mortem'u'post_mortem'b'def\s+%s\s*[(]'u'def\s+%s\s*[(]'b'String that doesn't quote its repr.'u'String that doesn't quote its repr.'b' +-> 'u' +-> 'b'pdb.Pdb'u'pdb.Pdb'b'(Pdb) 'u'(Pdb) 'b' +`@#$%^&*()=+[{]}\|;:'",<>?'u' +`@#$%^&*()=+[{]}\|;:'",<>?'b'~/.pdbrc'u'~/.pdbrc'b'.pdbrc'u'.pdbrc'b' +Program interrupted. (Use 'cont' to resume).'u' +Program interrupted. (Use 'cont' to resume).'b'This method is called when there is the remote possibility + that we ever need to stop in this function.'u'This method is called when there is the remote possibility + that we ever need to stop in this function.'b'--Call--'u'--Call--'b'This function is called when we stop or break at this line.'u'This function is called when we stop or break at this line.'b'Call every command that was set for the current active breakpoint + (if there is one). + + Returns True if the normal interaction function must be called, + False otherwise.'u'Call every command that was set for the current active breakpoint + (if there is one). + + Returns True if the normal interaction function must be called, + False otherwise.'b'currentbp'u'currentbp'b'This function is called when a return trap is set here.'u'This function is called when a return trap is set here.'b'--Return--'u'--Return--'b'This function is called if an exception occurs, + but only if we are to stop at or just below this level.'u'This function is called if an exception occurs, + but only if we are to stop at or just below this level.'b'__exception__'u'__exception__'b'Internal 'u'Internal 'b'--KeyboardInterrupt--'u'--KeyboardInterrupt--'b'display %s: %r [old: %r]'u'display %s: %r [old: %r]'b'Custom displayhook for the exec in default(), which prevents + assignment of the _ variable in the builtins. + 'u'Custom displayhook for the exec in default(), which prevents + assignment of the _ variable in the builtins. + 'b''u''b'Handle alias expansion and ';;' separator.'u'Handle alias expansion and ';;' separator.'b'%*'u'%*'b';;'u';;'b'Interpret the argument as though it had been typed in response + to the prompt. + + Checks whether this line is typed at the normal prompt or in + a breakpoint command list definition. + 'u'Interpret the argument as though it had been typed in response + to the prompt. + + Checks whether this line is typed at the normal prompt or in + a breakpoint command list definition. + 'b'Handles one command line during command list definition.'u'Handles one command line during command list definition.'b'silent'u'silent'b'***'u'***'b'commands [bpnumber] + (com) ... + (com) end + (Pdb) + + Specify a list of commands for breakpoint number bpnumber. + The commands themselves are entered on the following lines. + Type a line containing just 'end' to terminate the commands. + The commands are executed when the breakpoint is hit. + + To remove all commands from a breakpoint, type commands and + follow it immediately with end; that is, give no commands. + + With no bpnumber argument, commands refers to the last + breakpoint set. + + You can use breakpoint commands to start your program up + again. Simply use the continue command, or step, or any other + command that resumes execution. + + Specifying any command resuming execution (currently continue, + step, next, return, jump, quit and their abbreviations) + terminates the command list (as if that command was + immediately followed by end). This is because any time you + resume execution (even with a simple next or step), you may + encounter another breakpoint -- which could have its own + command list, leading to ambiguities about which list to + execute. + + If you use the 'silent' command in the command list, the usual + message about stopping at a breakpoint is not printed. This + may be desirable for breakpoints that are to print a specific + message and then continue. If none of the other commands + print anything, you will see no sign that the breakpoint was + reached. + 'u'commands [bpnumber] + (com) ... + (com) end + (Pdb) + + Specify a list of commands for breakpoint number bpnumber. + The commands themselves are entered on the following lines. + Type a line containing just 'end' to terminate the commands. + The commands are executed when the breakpoint is hit. + + To remove all commands from a breakpoint, type commands and + follow it immediately with end; that is, give no commands. + + With no bpnumber argument, commands refers to the last + breakpoint set. + + You can use breakpoint commands to start your program up + again. Simply use the continue command, or step, or any other + command that resumes execution. + + Specifying any command resuming execution (currently continue, + step, next, return, jump, quit and their abbreviations) + terminates the command list (as if that command was + immediately followed by end). This is because any time you + resume execution (even with a simple next or step), you may + encounter another breakpoint -- which could have its own + command list, leading to ambiguities about which list to + execute. + + If you use the 'silent' command in the command list, the usual + message about stopping at a breakpoint is not printed. This + may be desirable for breakpoints that are to print a specific + message and then continue. If none of the other commands + print anything, you will see no sign that the breakpoint was + reached. + 'b'Usage: commands [bnum] + ... + end'u'Usage: commands [bnum] + ... + end'b'(com) 'u'(com) 'b'command definition aborted, old commands restored'u'command definition aborted, old commands restored'b'b(reak) [ ([filename:]lineno | function) [, condition] ] + Without argument, list all breaks. + + With a line number argument, set a break at this line in the + current file. With a function name, set a break at the first + executable line of that function. If a second argument is + present, it is a string specifying an expression which must + evaluate to true before the breakpoint is honored. + + The line number may be prefixed with a filename and a colon, + to specify a breakpoint in another file (probably one that + hasn't been loaded yet). The file is searched for on + sys.path; the .py suffix may be omitted. + 'u'b(reak) [ ([filename:]lineno | function) [, condition] ] + Without argument, list all breaks. + + With a line number argument, set a break at this line in the + current file. With a function name, set a break at the first + executable line of that function. If a second argument is + present, it is a string specifying an expression which must + evaluate to true before the breakpoint is honored. + + The line number may be prefixed with a filename and a colon, + to specify a breakpoint in another file (probably one that + hasn't been loaded yet). The file is searched for on + sys.path; the .py suffix may be omitted. + 'b'Num Type Disp Enb Where'u'Num Type Disp Enb Where'b'%r not found from sys.path'u'%r not found from sys.path'b'Bad lineno: %s'u'Bad lineno: %s'b'The specified object %r is not a function or was not found along sys.path.'u'The specified object %r is not a function or was not found along sys.path.'b'Breakpoint %d at %s:%d'u'Breakpoint %d at %s:%d'b'Produce a reasonable default.'u'Produce a reasonable default.'b'tbreak [ ([filename:]lineno | function) [, condition] ] + Same arguments as break, but sets a temporary breakpoint: it + is automatically deleted when first hit. + 'u'tbreak [ ([filename:]lineno | function) [, condition] ] + Same arguments as break, but sets a temporary breakpoint: it + is automatically deleted when first hit. + 'b'Check whether specified line seems to be executable. + + Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank + line or EOF). Warning: testing is not comprehensive. + 'u'Check whether specified line seems to be executable. + + Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank + line or EOF). Warning: testing is not comprehensive. + 'b'curframe'u'curframe'b'End of file'u'End of file'b'"""'u'"""'b'''''u'''''b'Blank or comment'u'Blank or comment'b'enable bpnumber [bpnumber ...] + Enables the breakpoints given as a space separated list of + breakpoint numbers. + 'u'enable bpnumber [bpnumber ...] + Enables the breakpoints given as a space separated list of + breakpoint numbers. + 'b'Enabled %s'u'Enabled %s'b'disable bpnumber [bpnumber ...] + Disables the breakpoints given as a space separated list of + breakpoint numbers. Disabling a breakpoint means it cannot + cause the program to stop execution, but unlike clearing a + breakpoint, it remains in the list of breakpoints and can be + (re-)enabled. + 'u'disable bpnumber [bpnumber ...] + Disables the breakpoints given as a space separated list of + breakpoint numbers. Disabling a breakpoint means it cannot + cause the program to stop execution, but unlike clearing a + breakpoint, it remains in the list of breakpoints and can be + (re-)enabled. + 'b'Disabled %s'u'Disabled %s'b'condition bpnumber [condition] + Set a new condition for the breakpoint, an expression which + must evaluate to true before the breakpoint is honored. If + condition is absent, any existing condition is removed; i.e., + the breakpoint is made unconditional. + 'u'condition bpnumber [condition] + Set a new condition for the breakpoint, an expression which + must evaluate to true before the breakpoint is honored. If + condition is absent, any existing condition is removed; i.e., + the breakpoint is made unconditional. + 'b'Breakpoint %d is now unconditional.'u'Breakpoint %d is now unconditional.'b'New condition set for breakpoint %d.'u'New condition set for breakpoint %d.'b'ignore bpnumber [count] + Set the ignore count for the given breakpoint number. If + count is omitted, the ignore count is set to 0. A breakpoint + becomes active when the ignore count is zero. When non-zero, + the count is decremented each time the breakpoint is reached + and the breakpoint is not disabled and any associated + condition evaluates to true. + 'u'ignore bpnumber [count] + Set the ignore count for the given breakpoint number. If + count is omitted, the ignore count is set to 0. A breakpoint + becomes active when the ignore count is zero. When non-zero, + the count is decremented each time the breakpoint is reached + and the breakpoint is not disabled and any associated + condition evaluates to true. + 'b'%d crossings'u'%d crossings'b'1 crossing'u'1 crossing'b'Will ignore next %s of breakpoint %d.'u'Will ignore next %s of breakpoint %d.'b'Will stop next time breakpoint %d is reached.'u'Will stop next time breakpoint %d is reached.'b'cl(ear) filename:lineno +cl(ear) [bpnumber [bpnumber...]] + With a space separated list of breakpoint numbers, clear + those breakpoints. Without argument, clear all breaks (but + first ask confirmation). With a filename:lineno argument, + clear all breaks at that line in that file. + 'u'cl(ear) filename:lineno +cl(ear) [bpnumber [bpnumber...]] + With a space separated list of breakpoint numbers, clear + those breakpoints. Without argument, clear all breaks (but + first ask confirmation). With a filename:lineno argument, + clear all breaks at that line in that file. + 'b'Clear all breaks? 'u'Clear all breaks? 'b'yes'u'yes'b'Deleted %s'u'Deleted %s'b'Invalid line number (%s)'u'Invalid line number (%s)'b'w(here) + Print a stack trace, with the most recent frame at the bottom. + An arrow indicates the "current frame", which determines the + context of most commands. 'bt' is an alias for this command. + 'u'w(here) + Print a stack trace, with the most recent frame at the bottom. + An arrow indicates the "current frame", which determines the + context of most commands. 'bt' is an alias for this command. + 'b'u(p) [count] + Move the current frame count (default one) levels up in the + stack trace (to an older frame). + 'u'u(p) [count] + Move the current frame count (default one) levels up in the + stack trace (to an older frame). + 'b'Oldest frame'u'Oldest frame'b'Invalid frame count (%s)'u'Invalid frame count (%s)'b'd(own) [count] + Move the current frame count (default one) levels down in the + stack trace (to a newer frame). + 'u'd(own) [count] + Move the current frame count (default one) levels down in the + stack trace (to a newer frame). + 'b'Newest frame'u'Newest frame'b'unt(il) [lineno] + Without argument, continue execution until the line with a + number greater than the current one is reached. With a line + number, continue execution until a line with a number greater + or equal to that is reached. In both cases, also stop when + the current frame returns. + 'u'unt(il) [lineno] + Without argument, continue execution until the line with a + number greater than the current one is reached. With a line + number, continue execution until a line with a number greater + or equal to that is reached. In both cases, also stop when + the current frame returns. + 'b'Error in argument: %r'u'Error in argument: %r'b'"until" line number is smaller than current line number'u'"until" line number is smaller than current line number'b's(tep) + Execute the current line, stop at the first possible occasion + (either in a function that is called or in the current + function). + 'u's(tep) + Execute the current line, stop at the first possible occasion + (either in a function that is called or in the current + function). + 'b'n(ext) + Continue execution until the next line in the current function + is reached or it returns. + 'u'n(ext) + Continue execution until the next line in the current function + is reached or it returns. + 'b'run [args...] + Restart the debugged python program. If a string is supplied + it is split with "shlex", and the result is used as the new + sys.argv. History, breakpoints, actions and debugger options + are preserved. "restart" is an alias for "run". + 'u'run [args...] + Restart the debugged python program. If a string is supplied + it is split with "shlex", and the result is used as the new + sys.argv. History, breakpoints, actions and debugger options + are preserved. "restart" is an alias for "run". + 'b'r(eturn) + Continue execution until the current function returns. + 'u'r(eturn) + Continue execution until the current function returns. + 'b'c(ont(inue)) + Continue execution, only stop when a breakpoint is encountered. + 'u'c(ont(inue)) + Continue execution, only stop when a breakpoint is encountered. + 'b'j(ump) lineno + Set the next line that will be executed. Only available in + the bottom-most frame. This lets you jump back and execute + code again, or jump forward to skip code that you don't want + to run. + + It should be noted that not all jumps are allowed -- for + instance it is not possible to jump into the middle of a + for loop or out of a finally clause. + 'u'j(ump) lineno + Set the next line that will be executed. Only available in + the bottom-most frame. This lets you jump back and execute + code again, or jump forward to skip code that you don't want + to run. + + It should be noted that not all jumps are allowed -- for + instance it is not possible to jump into the middle of a + for loop or out of a finally clause. + 'b'You can only jump within the bottom frame'u'You can only jump within the bottom frame'b'Jump failed: %s'u'Jump failed: %s'b'The 'jump' command requires a line number'u'The 'jump' command requires a line number'b'debug code + Enter a recursive debugger that steps through the code + argument (which is an arbitrary expression or statement to be + executed in the current environment). + 'u'debug code + Enter a recursive debugger that steps through the code + argument (which is an arbitrary expression or statement to be + executed in the current environment). + 'b'ENTERING RECURSIVE DEBUGGER'u'ENTERING RECURSIVE DEBUGGER'b'LEAVING RECURSIVE DEBUGGER'u'LEAVING RECURSIVE DEBUGGER'b'q(uit) +exit + Quit from the debugger. The program being executed is aborted. + 'u'q(uit) +exit + Quit from the debugger. The program being executed is aborted. + 'b'EOF + Handles the receipt of EOF as a command. + 'u'EOF + Handles the receipt of EOF as a command. + 'b'a(rgs) + Print the argument list of the current function. + 'u'a(rgs) + Print the argument list of the current function. + 'b'%s = %r'u'%s = %r'b'%s = *** undefined ***'u'%s = *** undefined ***'b'retval + Print the return value for the last return of a function. + 'u'retval + Print the return value for the last return of a function. + 'b'Not yet returned!'u'Not yet returned!'b'** raised %s **'u'** raised %s **'b'p expression + Print the value of the expression. + 'u'p expression + Print the value of the expression. + 'b'pp expression + Pretty-print the value of the expression. + 'u'pp expression + Pretty-print the value of the expression. + 'b'l(ist) [first [,last] | .] + + List source code for the current file. Without arguments, + list 11 lines around the current line or continue the previous + listing. With . as argument, list 11 lines around the current + line. With one argument, list 11 lines starting at that line. + With two arguments, list the given range; if the second + argument is less than the first, it is a count. + + The current line in the current frame is indicated by "->". + If an exception is being debugged, the line where the + exception was originally raised or propagated is indicated by + ">>", if it differs from the current line. + 'u'l(ist) [first [,last] | .] + + List source code for the current file. Without arguments, + list 11 lines around the current line or continue the previous + listing. With . as argument, list 11 lines around the current + line. With one argument, list 11 lines starting at that line. + With two arguments, list the given range; if the second + argument is less than the first, it is a count. + + The current line in the current frame is indicated by "->". + If an exception is being debugged, the line where the + exception was originally raised or propagated is indicated by + ">>", if it differs from the current line. + 'b'[EOF]'u'[EOF]'b'longlist | ll + List the whole source code for the current function or frame. + 'u'longlist | ll + List the whole source code for the current function or frame. + 'b'source expression + Try to get source code for the given object and display it. + 'u'source expression + Try to get source code for the given object and display it. + 'b'Print a range of lines.'u'Print a range of lines.'b'whatis arg + Print the type of the argument. + 'u'whatis arg + Print the type of the argument. + 'b'Method %s'u'Method %s'b'Function %s'u'Function %s'b'Class %s.%s'u'Class %s.%s'b'display [expression] + + Display the value of the expression if it changed, each time execution + stops in the current frame. + + Without expression, list all display expressions for the current frame. + 'u'display [expression] + + Display the value of the expression if it changed, each time execution + stops in the current frame. + + Without expression, list all display expressions for the current frame. + 'b'Currently displaying:'u'Currently displaying:'b'display %s: %r'u'display %s: %r'b'undisplay [expression] + + Do not display the expression any more in the current frame. + + Without expression, clear all display expressions for the current frame. + 'u'undisplay [expression] + + Do not display the expression any more in the current frame. + + Without expression, clear all display expressions for the current frame. + 'b'not displaying %s'u'not displaying %s'b'interact + + Start an interactive interpreter whose global namespace + contains all the (global and local) names found in the current scope. + 'u'interact + + Start an interactive interpreter whose global namespace + contains all the (global and local) names found in the current scope. + 'b'*interactive*'u'*interactive*'b'alias [name [command [parameter parameter ...] ]] + Create an alias called 'name' that executes 'command'. The + command must *not* be enclosed in quotes. Replaceable + parameters can be indicated by %1, %2, and so on, while %* is + replaced by all the parameters. If no command is given, the + current alias for name is shown. If no name is given, all + aliases are listed. + + Aliases may be nested and can contain anything that can be + legally typed at the pdb prompt. Note! You *can* override + internal pdb commands with aliases! Those internal commands + are then hidden until the alias is removed. Aliasing is + recursively applied to the first word of the command line; all + other words in the line are left alone. + + As an example, here are two useful aliases (especially when + placed in the .pdbrc file): + + # Print instance variables (usage "pi classInst") + alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) + # Print instance variables in self + alias ps pi self + 'u'alias [name [command [parameter parameter ...] ]] + Create an alias called 'name' that executes 'command'. The + command must *not* be enclosed in quotes. Replaceable + parameters can be indicated by %1, %2, and so on, while %* is + replaced by all the parameters. If no command is given, the + current alias for name is shown. If no name is given, all + aliases are listed. + + Aliases may be nested and can contain anything that can be + legally typed at the pdb prompt. Note! You *can* override + internal pdb commands with aliases! Those internal commands + are then hidden until the alias is removed. Aliasing is + recursively applied to the first word of the command line; all + other words in the line are left alone. + + As an example, here are two useful aliases (especially when + placed in the .pdbrc file): + + # Print instance variables (usage "pi classInst") + alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k]) + # Print instance variables in self + alias ps pi self + 'b'%s = %s'u'%s = %s'b'unalias name + Delete the specified alias. + 'u'unalias name + Delete the specified alias. + 'b'do_continue'u'do_continue'b'do_step'u'do_step'b'do_next'u'do_next'b'do_return'u'do_return'b'do_quit'u'do_quit'b'do_jump'u'do_jump'b'> 'u'> 'b'h(elp) + Without argument, print the list of available commands. + With a command name as argument, print help about that command. + "help pdb" shows the full pdb documentation. + "help exec" gives help on the ! command. + 'u'h(elp) + Without argument, print the list of available commands. + With a command name as argument, print help about that command. + "help pdb" shows the full pdb documentation. + "help exec" gives help on the ! command. + 'b'No help for %r; please do not run Python with -OO if you need command help'u'No help for %r; please do not run Python with -OO if you need command help'b'No help for %r'u'No help for %r'b'(!) statement + Execute the (one-line) statement in the context of the current + stack frame. The exclamation point can be omitted unless the + first word of the statement resembles a debugger command. To + assign to a global variable you must always prefix the command + with a 'global' command, e.g.: + (Pdb) global list_options; list_options = ['-l'] + (Pdb) + 'u'(!) statement + Execute the (one-line) statement in the context of the current + stack frame. The exclamation point can be omitted unless the + first word of the statement resembles a debugger command. To + assign to a global variable you must always prefix the command + with a 'global' command, e.g.: + (Pdb) global list_options; list_options = ['-l'] + (Pdb) + 'b'Helper function for break/clear parsing -- may be overridden. + + lookupmodule() translates (possibly incomplete) file or module name + into an absolute file name. + 'u'Helper function for break/clear parsing -- may be overridden. + + lookupmodule() translates (possibly incomplete) file or module name + into an absolute file name. + 'b'exec(compile(%r, %r, 'exec'))'u'exec(compile(%r, %r, 'exec'))'b'where'u'where'b'down'u'down'b'up'u'up'b'tbreak'u'tbreak'b'enable'u'enable'b'condition'u'condition'b'step'b'until'u'until'b'jump'u'jump'b'retval'u'retval'b'longlist'u'longlist'b'p'u'p'b'whatis'u'whatis'b'display'u'display'b'undisplay'u'undisplay'b'unalias'u'unalias'b'quit'u'quit'b'A valid traceback must be passed if no exception is being handled'u'A valid traceback must be passed if no exception is being handled'b'import x; x.main()'u'import x; x.main()'b'usage: pdb.py [-c command] ... [-m module | pyfile] [arg] ... + +Debug the Python program given by pyfile. Alternatively, +an executable module or package to debug can be specified using +the -m switch. + +Initial commands are read from .pdbrc files in your home directory +and in the current directory, if they exist. Commands supplied with +-c are executed after commands from .pdbrc files. + +To let the script run until an exception occurs, use "-c continue". +To let the script run up to a given line X in the debugged file, use +"-c 'until X'".'u'usage: pdb.py [-c command] ... [-m module | pyfile] [arg] ... + +Debug the Python program given by pyfile. Alternatively, +an executable module or package to debug can be specified using +the -m switch. + +Initial commands are read from .pdbrc files in your home directory +and in the current directory, if they exist. Commands supplied with +-c are executed after commands from .pdbrc files. + +To let the script run until an exception occurs, use "-c continue". +To let the script run up to a given line X in the debugged file, use +"-c 'until X'".'b'mhc:'u'mhc:'b'command='u'command='b'--command'u'--command'b'Error:'u'Error:'b'does not exist'u'does not exist'b'The program finished and will be restarted'u'The program finished and will be restarted'b'Restarting'u'Restarting'b'with arguments:'u'with arguments:'b'The program exited via sys.exit(). Exit status:'u'The program exited via sys.exit(). Exit status:'b'Uncaught exception. Entering post mortem debugging'u'Uncaught exception. Entering post mortem debugging'b'Running 'cont' or 'step' will restart the program'u'Running 'cont' or 'step' will restart the program'b'Post mortem debugger finished. The 'u'Post mortem debugger finished. The 'b' will be restarted'u' will be restarted'u'pdb'PgenGrammarParserGeneratorclose_streamgettokenstartsymboladdfirstsetsmake_grammarmake_labelisfinalmake_firstrawfirstitokencalcfirsttotalsetoverlapcheckrecursion for rule %rinverserule %s is ambiguous; %s is in the first sets of %s as well as %s"rule %s is ambiguous; %s is in the"" first sets of %s as well as %s"ENDMARKERexpectparse_rhsmake_dfaoldlensimplify_dfanewlenfinishNFAStateaddclosureDFAStatenfastatenfasetaddarcdump_nfaDump of NFA for State(final) -> %d %s -> %ddump_dfaDump of DFA forchangesstate_istate_junifystateparse_altaazzparse_itemparse_atomraise_errorexpected (...) or NAME or STRING, got %s/%sexpected %s/%s, got %s/%s# Initialize lookahead# map from symbol name to set of tokens##assert ilabel not in first # XXX failed on <> ... !=# XXX Maybe this should be a method on a subclass of converter?# Either a symbol name or a named token# A symbol name (a non-terminal)# A named token (NAME, NUMBER, STRING)# Either a keyword or an operator# A keyword# An operator (any non-numeric token)# Fails if unknown token#print name, self.first[name].keys()# dummy to detect left recursion# MSTART: (NEWLINE | RULE)* ENDMARKER# RULE: NAME ':' RHS NEWLINE#self.dump_nfa(name, a, z)#self.dump_dfa(name, dfa)#print name, oldlen, newlen# To turn an NFA into a DFA, we define the states of the DFA# to correspond to *sets* of states of the NFA. Then do some# state reduction. Let's represent sets as dicts with 1 for# values.# NB states grows while we're iterating# List of DFAState instances; first one is start# This is not theoretically optimal, but works well enough.# Algorithm: repeatedly look for two states that have the same# set of arcs (same labels pointing to the same nodes) and# unify them, until things stop changing.# dfa is a list of DFAState instances#print " unify", i, j# RHS: ALT ('|' ALT)*# ALT: ITEM+# ITEM: '[' RHS ']' | ATOM ['+' | '*']# ATOM: '(' RHS ')' | NAME | STRING#print token.tok_name[self.type], repr(self.value)# list of (label, NFAState) pairs# map from label to DFAState# Equality test -- ignore the nfaset instance variable# Can't just return self.arcs == other.arcs, because that# would invoke this method recursively, with cycles...# For Py3 compatibility.b'recursion for rule %r'u'recursion for rule %r'b'rule %s is ambiguous; %s is in the first sets of %s as well as %s'u'rule %s is ambiguous; %s is in the first sets of %s as well as %s'b'Dump of NFA for'u'Dump of NFA for'b' State'u' State'b'(final)'u'(final)'b' -> %d'u' -> %d'b' %s -> %d'u' %s -> %d'b'Dump of DFA for'u'Dump of DFA for'b'expected (...) or NAME or STRING, got %s/%s'u'expected (...) or NAME or STRING, got %s/%s'b'expected %s/%s, got %s/%s'u'expected %s/%s, got %s/%s'u'lib2to3.pgen2.pgen'u'pgen2.pgen'u'pgen'Create portable serialized representations of Python objects. + +See module copyreg for a mechanism for registering custom picklers. +See module pickletools source for extensive comments. + +Classes: + + Pickler + Unpickler + +Functions: + + dump(object, file) + dumps(object) -> string + load(file) -> object + loads(string) -> object + +Misc variables: + + __version__ + format_version + compatible_formats + +_compat_pickle_HAVE_PICKLE_BUFFER4.0format_version1.21.33.0compatible_formatsDEFAULT_PROTOCOLA common base class for the other pickling exceptions.This exception is raised when an unpicklable object is passed to the + dump() method. + + This exception is raised when there is a problem unpickling an object, + such as a security violation. + + Note that other exceptions may also be raised during unpickling, including + (but not necessarily limited to) AttributeError, EOFError, ImportError, + and IndexError. + + _StopMARKSTOPPOPPOP_MARKDUPFLOATINTJBININTBININT1LONGBININT2PERSIDBINPERSIDREDUCEBINSTRINGSHORT_BINSTRINGVBINUNICODEAPPENDBUILDGLOBALDICTEMPTY_DICTAPPENDSBINGETINSTLONG_BINGETEMPTY_LISTOBJBINPUTLONG_BINPUTSETITEMTUPLEEMPTY_TUPLESETITEMSBINFLOATI01 +I00 +€PROTONEWOBJ‚EXT1ƒEXT2„EXT4…TUPLE1†TUPLE2‡TUPLE3ˆNEWTRUE‰NEWFALSEŠLONG1‹LONG4_tuplesize2codeBINBYTESSHORT_BINBYTESŒSHORT_BINUNICODEBINUNICODE8ŽBINBYTES8EMPTY_SETADDITEMS‘FROZENSET’NEWOBJ_EX“STACK_GLOBAL”MEMOIZE•FRAME–BYTEARRAY8—NEXT_BUFFER˜READONLY_BUFFER[A-Z][A-Z0-9_]+$_Framer_FRAME_SIZE_MIN_FRAME_SIZE_TARGETfile_writecurrent_framestart_framingend_framingcommit_frameCan't get local attribute {!r} on {!r}Can't get attribute {!r} on {!r}whichmoduleFind the module an object belong to.encode_longEncode a long to a two's complement little-endian binary string. + Note that 0 is a special case, returning an empty string, to save a + byte in the LONG1 pickling context. + + >>> encode_long(0) + b'' + >>> encode_long(255) + b'\xff\x00' + >>> encode_long(32767) + b'\xff\x7f' + >>> encode_long(-256) + b'\x00\xff' + >>> encode_long(-32768) + b'\x00\x80' + >>> encode_long(-128) + b'\x80' + >>> encode_long(127) + b'\x7f' + >>> + signed0xffdecode_longDecode a long from a two's complement little-endian binary string. + + >>> decode_long(b'') + 0 + >>> decode_long(b"\xff\x00") + 255 + >>> decode_long(b"\xff\x7f") + 32767 + >>> decode_long(b"\x00\xff") + -256 + >>> decode_long(b"\x00\x80") + -32768 + >>> decode_long(b"\x80") + -128 + >>> decode_long(b"\x7f") + 127 + _Picklerfix_importsbuffer_callbackThis takes a binary file for writing a pickle data stream. + + The optional *protocol* argument tells the pickler to use the + given protocol; supported protocols are 0, 1, 2, 3, 4 and 5. + The default protocol is 4. It was introduced in Python 3.4, and + is incompatible with previous versions. + + Specifying a negative protocol version selects the highest + protocol version supported. The higher the protocol used, the + more recent the version of Python needed to read the pickle + produced. + + The *file* argument must have a write() method that accepts a + single bytes argument. It can thus be a file object opened for + binary writing, an io.BytesIO instance, or any other custom + object that meets this interface. + + If *fix_imports* is True and *protocol* is less than 3, pickle + will try to map the new Python 3 names to the old module names + used in Python 2, so that the pickle data stream is readable + with Python 2. + + If *buffer_callback* is None (the default), buffer views are + serialized into *file* as part of the pickle stream. + + If *buffer_callback* is not None, then it can be called any number + of times with a buffer view. If the callback returns a false value + (such as None), the given buffer is out-of-band; otherwise the + buffer is serialized in-band, i.e. inside the pickle stream. + + It is an error if *buffer_callback* is not None and *protocol* + is None or smaller than 5. + pickle protocol must be <= %dbuffer_callback needs protocol >= 5_buffer_callback_file_writefile must have a 'write' attributeframer_write_large_bytesClears the pickler's "memo". + + The memo is the data structure that remembers which objects the + pickler has already seen, so that shared or recursive objects + are pickled by reference and not by value. This method is + useful when re-using picklers. + Write a pickled representation of obj to the open file.Pickler.__init__() was not called by %s.__init__()"Pickler.__init__() was not called by ""%s.__init__()"dsave_bytessave_bytearraysave_picklebufferPickleBuffer can only pickled with protocol >= 5"PickleBuffer can only pickled with ""protocol >= 5"PickleBuffer can not be pickled when pointing to a non-contiguous buffer"PickleBuffer can not be pickled when ""pointing to a non-contiguous buffer"in_bandsave_strsurrogatepass\u005c\u0000\u000a\u000d\u001asave_tuplesave_list_BATCHSIZEsave_dictsave_setbatchsave_frozensetobj2Can't pickle %r: it's not the same object as %s.%sCan't pickle %r: it's not found as %s.%slastnamer_name_mappingr_import_mappingcan't pickle global identifier '%s.%s' using pickle protocol %i"can't pickle global identifier '%s.%s' using ""pickle protocol %i"save_type_UnpicklerbuffersThis takes a binary file for reading a pickle data stream. + + The protocol version of the pickle is detected automatically, so + no proto argument is needed. + + The argument *file* must have two methods, a read() method that + takes an integer argument, and a readline() method that requires + no arguments. Both methods should return bytes. Thus *file* + can be a binary file object opened for reading, an io.BytesIO + object, or any other custom object that meets this interface. + + The file-like object must have two methods, a read() method + that takes an integer argument, and a readline() method that + requires no arguments. Both methods should return bytes. + Thus file-like object can be a binary file object opened for + reading, a BytesIO object, or any other custom object that + meets this interface. + + If *buffers* is not None, it should be an iterable of buffer-enabled + objects that is consumed each time the pickle stream references + an out-of-band buffer view. Such buffers have been given in order + to the *buffer_callback* of a Pickler object. + + If *buffers* is None (the default), then the buffers are taken + from the pickle stream, assuming they are serialized there. + It is an error for *buffers* to be None if the pickle stream + was produced with a non-None *buffer_callback*. + + Other optional arguments are *fix_imports*, *encoding* and + *errors*, which are used to control compatibility support for + pickle stream generated by Python 2. If *fix_imports* is True, + pickle will try to map the old Python 2 names to the new names + used in Python 3. The *encoding* and *errors* tell pickle how + to decode 8-bit string instances pickled by Python 2; these + default to 'ASCII' and 'strict', respectively. *encoding* can be + 'bytes' to read theses 8-bit string instances as bytes objects. + _buffers_file_readline_file_readRead a pickled object representation from the open file. + + Return the reconstituted object hierarchy specified in the file. + Unpickler.__init__() was not called by %s.__init__()"Unpickler.__init__() was not called by "_unframermetastackstopinstpop_markunsupported persistent id encounteredload_protounsupported pickle protocol: %dframe size > sys.maxsize: %dload_persidload_binpersidload_noneload_falseload_trueload_intload_binintload_binint1load_binint2load_longload_long1load_long4LONG pickle has negative byte countload_floatload_binfloat_decode_stringload_string"'the STRING opcode argument must be quotedload_binstringBINSTRING pickle has negative byte countload_binbytesBINBYTES exceeds system's maximum size of %d bytes"BINBYTES exceeds system's maximum size ""of %d bytes"load_unicodeload_binunicodeBINUNICODE exceeds system's maximum size of %d bytes"BINUNICODE exceeds system's maximum size "load_binunicode8BINUNICODE8 exceeds system's maximum size of %d bytes"BINUNICODE8 exceeds system's maximum size "load_binbytes8BINBYTES8 exceeds system's maximum size of %d bytes"BINBYTES8 exceeds system's maximum size "load_bytearray8BYTEARRAY8 exceeds system's maximum size of %d bytes"BYTEARRAY8 exceeds system's maximum size "load_next_bufferpickle stream refers to out-of-band data but no *buffers* argument was given"pickle stream refers to out-of-band data ""but no *buffers* argument was given"not enough out-of-band buffersload_readonly_bufferload_short_binstringload_short_binbytesload_short_binunicodeload_tupleload_empty_tupleload_tuple1load_tuple2load_tuple3load_empty_listload_empty_dictionaryload_empty_setload_frozensetload_listload_dict_instantiatein constructor for %s: %sload_instload_objload_newobjload_newobj_exload_globalload_stack_globalSTACK_GLOBAL requires strload_ext1get_extensionload_ext2load_ext4EXT specifies code <= 0unregistered extension code %dpickle.find_classload_reduceload_popload_pop_markload_dupload_getload_bingetload_long_bingetload_putnegative PUT argumentload_binputnegative BINPUT argumentload_long_binputnegative LONG_BINPUT argumentload_memoizeload_appendload_appendslist_objload_setitemload_setitemsload_additemsset_objload_buildload_markload_stopCan't load pickle from unicode stringdisplay contents of the pickle filespickle_filethe pickle file--testrun self-test suiterun verbosely; only affects self-test run# Shortcut for use in isinstance testing# These are purely informational; no code uses these.# File format version we write# Original protocol 0# Protocol 0 with INST added# Original protocol 1# Protocol 1 with BINFLOAT added# Protocol 2# Protocol 3# Protocol 4# Protocol 5# Old format versions we can read# This is the highest protocol number we know how to read.# The protocol we write by default. May be less than HIGHEST_PROTOCOL.# Only bump this if the oldest still supported version of Python already# includes it.# An instance of _Stop is raised by Unpickler.load_stop() in response to# the STOP opcode, passing the object that is the result of unpickling.# Jython has PyStringMap; it's a dict subclass with string keys# Pickle opcodes. See pickletools.py for extensive docs. The listing# here is in kind-of alphabetical order of 1-character pickle code.# pickletools groups them by purpose.# push special markobject on stack# every pickle ends with STOP# discard topmost stack item# discard stack top through topmost markobject# duplicate top stack item# push float object; decimal string argument# push integer or bool; decimal string argument# push four-byte signed int# push 1-byte unsigned int# push long; decimal string argument# push 2-byte unsigned int# push None# push persistent object; id is taken from string arg# " " " ; " " " " stack# apply callable to argtuple, both on stack# push string; NL-terminated string argument# push string; counted binary string argument# " " ; " " " " < 256 bytes# push Unicode string; raw-unicode-escaped'd argument# " " " ; counted UTF-8 string argument# append stack top to list below it# call __setstate__ or __dict__.update()# push self.find_class(modname, name); 2 string args# build a dict from stack items# push empty dict# extend list on stack by topmost stack slice# push item from memo on stack; index is string arg# " " " " " " ; " " 1-byte arg# build & push class instance# push item from memo on stack; index is 4-byte arg# build list from topmost stack items# push empty list# store stack top in memo; index is string arg# " " " " " ; " " 1-byte arg# " " " " " ; " " 4-byte arg# add key+value pair to dict# build tuple from topmost stack items# push empty tuple# modify dict by adding topmost key+value pairs# push float; arg is 8-byte float encoding# not an opcode; see INT docs in pickletools.py# identify pickle protocol# build object by applying cls.__new__ to argtuple# push object from extension registry; 1-byte index# ditto, but 2-byte index# ditto, but 4-byte index# build 1-tuple from stack top# build 2-tuple from two topmost stack items# build 3-tuple from three topmost stack items# push True# push False# push long from < 256 bytes# push really big long# Protocol 3 (Python 3.x)# push bytes; counted binary string argument# push short string; UTF-8 length < 256 bytes# push very long string# push very long bytes string# push empty set on the stack# modify set by adding topmost stack items# build frozenset from topmost stack items# like NEWOBJ but work with keyword only arguments# same as GLOBAL but using names on the stacks# store top of the stack in memo# indicate the beginning of a new frame# push bytearray# push next out-of-band buffer# make top of stack readonly# Issue a single call to the write method of the underlying# file object for the frame opcode with the size of the# frame. The concatenation is expected to be less expensive# than issuing an additional call to write.# Issue a separate call to write to append the frame# contents without concatenation to the above to avoid a# memory copy.# Start the new frame with a new io.BytesIO instance so that# the file object can have delayed access to the previous frame# contents via an unreleased memoryview of the previous# io.BytesIO instance.# Terminate the current frame and flush it to the file.# Perform direct write of the header and payload of the large binary# object. Be careful not to concatenate the header and the payload# prior to calling 'write' as we do not want to allocate a large# temporary bytes object.# We intentionally do not insert a protocol 4 frame opcode to make# it possible to optimize file.read calls in the loader.# Tools used for pickling.# Protect the iteration by using a list copy of sys.modules against dynamic# modules that trigger imports of other modules upon calls to getattr.# bpo-42406# Pickling machinery# Check whether Pickler was initialized correctly. This is# only needed to mimic the behavior of _pickle.Pickler.dump().# The Pickler memo is a dictionary mapping object ids to 2-tuples# that contain the Unpickler memo key and the object being memoized.# The memo key is written to the pickle and will become# the key in the Unpickler's memo. The object is stored in the# Pickler memo so that transient objects are kept alive during# pickling.# The use of the Unpickler memo length as the memo key is just a# convention. The only requirement is that the memo values be unique.# But there appears no advantage to any other scheme, and this# scheme allows the Unpickler memo to be implemented as a plain (but# growable) array, indexed by memo key.# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.# Check for persistent id (defined by a subclass)# Check the memo# Check the type dispatch table# Call unbound method with explicit self# Check private dispatch table if any, or else# copyreg.dispatch_table# Check for a class with a custom metaclass; treat as regular# class# Check for a __reduce_ex__ method, fall back to __reduce__# Check for string returned by reduce(), meaning "save as global"# Assert that reduce() returned a tuple# Assert that it returned an appropriately sized tuple# Save the reduce() output and finally memoize the object# This exists so a subclass can override it# Save a persistent id reference# This API is called by some subclasses# A __reduce__ implementation can direct protocol 2 or newer to# use the more efficient NEWOBJ opcode, while still# allowing protocol 0 and 1 to work normally. For this to# work, the function returned by __reduce__ should be# called __newobj__, and its first argument should be a# class. The implementation for __newobj__# should be as follows, although pickle has no way to# verify this:# def __newobj__(cls, *args):# return cls.__new__(cls, *args)# Protocols 0 and 1 will pickle a reference to __newobj__,# while protocol 2 (and above) will pickle a reference to# cls, the remaining args tuple, and the NEWOBJ code,# which calls cls.__new__(cls, *args) at unpickling time# (see load_newobj below). If __reduce__ returns a# three-tuple, the state from the third tuple item will be# pickled regardless of the protocol, calling __setstate__# at unpickling time (see load_build below).# Note that no standard __newobj__ implementation exists;# you have to provide your own. This is to enforce# compatibility with Python 2.2 (pickles written using# protocol 0 or 1 in Python 2.3 should be unpicklable by# Python 2.2).# If the object is already in the memo, this means it is# recursive. In this case, throw away everything we put on the# stack, and fetch the object back from the memo.# More new special cases (that work with older protocols as# well): when __reduce__ returns a tuple with 4 or 5 items,# the 4th and 5th item should be iterators that provide list# items and dict items (as (key, value) tuples), or None.# If a state_setter is specified, call it instead of load_build# to update obj's with its previous state.# First, push state_setter and its tuple of expected arguments# (obj, state) onto the stack.# simple BINGET opcode as obj is already memoized.# Trigger a state_setter(obj, state) function call.# The purpose of state_setter is to carry-out an# inplace modification of obj. We do not care about what the# method might return, so its output is eventually removed from# the stack.# Methods below this point are dispatched through the dispatch table# If the int is small enough to fit in a signed 4-byte 2's-comp# format, we can store it more efficiently than the general# case.# First one- and two-byte unsigned ints:# Next check for 4-byte signed ints:# bytes object is empty# bytearray is empty# Write data in-band# XXX The C implementation avoids a copy here# Write data out-of-band# EOF on DOS# tuple is empty# Subtle. Same as in the big comment below.# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple# has more than 3 elements.# Subtle. d was not in memo when we entered save_tuple(), so# the process of saving the tuple's elements must have saved# the tuple itself: the tuple is recursive. The proper action# now is to throw away everything we put on the stack, and# simply GET the tuple (it's already constructed). This check# could have been done in the "for element" loop instead, but# recursive tuples are a rare thing.# proto 0 -- POP_MARK not available# No recursion.# proto 0 -- can't use EMPTY_LIST# Helper to batch up APPENDS sequences# else tmp is empty, and we're done# proto 0 -- can't use EMPTY_DICT# Helper to batch up SETITEMS sequences; proto >= 1 only# Non-ASCII identifiers are supported only with protocols >= 3.# Unpickling machinery# Check whether Unpickler was initialized correctly. This is# only needed to mimic the behavior of _pickle.Unpickler.dump().# Return a list of items pushed in the stack after last MARK instruction.# Corrupt or hostile pickle -- we never write one like this# Used to allow strings from Python 2 to be decoded either as# bytes or Unicode strings. This should be used only with the# STRING, BINSTRING and SHORT_BINSTRING opcodes.# Strip outermost quotes# Deprecated BINSTRING uses signed 32-bit length# INST and OBJ differ only in how they get a class object. It's not# only sensible to do the rest in a common routine, the two routines# previously diverged and grew different bugs.# klass is the class to instantiate, and k points to the topmost mark# object, following which are the arguments for klass.__init__.# Stack is ... markobject classobject arg1 arg2 ...# note that 0 is forbidden# Corrupt or hostile pickle.# Subclasses may override this.# Even if the PEP 307 requires extend() and append() methods,# fall back on append() if the object has no extend() method# for backward compatibility.# Shorthands# Use the faster _pickle if possible# Doctestb'Create portable serialized representations of Python objects. + +See module copyreg for a mechanism for registering custom picklers. +See module pickletools source for extensive comments. + +Classes: + + Pickler + Unpickler + +Functions: + + dump(object, file) + dumps(object) -> string + load(file) -> object + loads(string) -> object + +Misc variables: + + __version__ + format_version + compatible_formats + +'u'Create portable serialized representations of Python objects. + +See module copyreg for a mechanism for registering custom picklers. +See module pickletools source for extensive comments. + +Classes: + + Pickler + Unpickler + +Functions: + + dump(object, file) + dumps(object) -> string + load(file) -> object + loads(string) -> object + +Misc variables: + + __version__ + format_version + compatible_formats + +'b'PickleError'u'PickleError'b'PicklingError'u'PicklingError'b'UnpicklingError'u'UnpicklingError'b'Pickler'u'Pickler'b'Unpickler'u'Unpickler'b'dumps'u'dumps'b'load'u'load'b'loads'u'loads'b'PickleBuffer'u'PickleBuffer'b'4.0'u'4.0'b'1.2'u'1.2'b'1.3'u'1.3'b'2.0'u'2.0'b'3.0'u'3.0'b'5.0'u'5.0'b'A common base class for the other pickling exceptions.'u'A common base class for the other pickling exceptions.'b'This exception is raised when an unpicklable object is passed to the + dump() method. + + 'u'This exception is raised when an unpicklable object is passed to the + dump() method. + + 'b'This exception is raised when there is a problem unpickling an object, + such as a security violation. + + Note that other exceptions may also be raised during unpickling, including + (but not necessarily limited to) AttributeError, EOFError, ImportError, + and IndexError. + + 'u'This exception is raised when there is a problem unpickling an object, + such as a security violation. + + Note that other exceptions may also be raised during unpickling, including + (but not necessarily limited to) AttributeError, EOFError, ImportError, + and IndexError. + + 'b'J'b'K'b'R'b'V'b'j'b'o'b'I01 +'b'I00 +'b'€'b'‚'b'ƒ'b'„'b'…'b'†'b'‡'b'ˆ'b'‰'b'Š'b'‹'b'Œ'b'Ž'b'‘'b'’'b'“'b'”'b'•'b'–'b'—'b'˜'b'[A-Z][A-Z0-9_]+$'u'[A-Z][A-Z0-9_]+$'b''u''b'Can't get local attribute {!r} on {!r}'u'Can't get local attribute {!r} on {!r}'b'Can't get attribute {!r} on {!r}'u'Can't get attribute {!r} on {!r}'b'Find the module an object belong to.'u'Find the module an object belong to.'b'Encode a long to a two's complement little-endian binary string. + Note that 0 is a special case, returning an empty string, to save a + byte in the LONG1 pickling context. + + >>> encode_long(0) + b'' + >>> encode_long(255) + b'\xff\x00' + >>> encode_long(32767) + b'\xff\x7f' + >>> encode_long(-256) + b'\x00\xff' + >>> encode_long(-32768) + b'\x00\x80' + >>> encode_long(-128) + b'\x80' + >>> encode_long(127) + b'\x7f' + >>> + 'u'Encode a long to a two's complement little-endian binary string. + Note that 0 is a special case, returning an empty string, to save a + byte in the LONG1 pickling context. + + >>> encode_long(0) + b'' + >>> encode_long(255) + b'\xff\x00' + >>> encode_long(32767) + b'\xff\x7f' + >>> encode_long(-256) + b'\x00\xff' + >>> encode_long(-32768) + b'\x00\x80' + >>> encode_long(-128) + b'\x80' + >>> encode_long(127) + b'\x7f' + >>> + 'b'Decode a long from a two's complement little-endian binary string. + + >>> decode_long(b'') + 0 + >>> decode_long(b"\xff\x00") + 255 + >>> decode_long(b"\xff\x7f") + 32767 + >>> decode_long(b"\x00\xff") + -256 + >>> decode_long(b"\x00\x80") + -32768 + >>> decode_long(b"\x80") + -128 + >>> decode_long(b"\x7f") + 127 + 'u'Decode a long from a two's complement little-endian binary string. + + >>> decode_long(b'') + 0 + >>> decode_long(b"\xff\x00") + 255 + >>> decode_long(b"\xff\x7f") + 32767 + >>> decode_long(b"\x00\xff") + -256 + >>> decode_long(b"\x00\x80") + -32768 + >>> decode_long(b"\x80") + -128 + >>> decode_long(b"\x7f") + 127 + 'b'This takes a binary file for writing a pickle data stream. + + The optional *protocol* argument tells the pickler to use the + given protocol; supported protocols are 0, 1, 2, 3, 4 and 5. + The default protocol is 4. It was introduced in Python 3.4, and + is incompatible with previous versions. + + Specifying a negative protocol version selects the highest + protocol version supported. The higher the protocol used, the + more recent the version of Python needed to read the pickle + produced. + + The *file* argument must have a write() method that accepts a + single bytes argument. It can thus be a file object opened for + binary writing, an io.BytesIO instance, or any other custom + object that meets this interface. + + If *fix_imports* is True and *protocol* is less than 3, pickle + will try to map the new Python 3 names to the old module names + used in Python 2, so that the pickle data stream is readable + with Python 2. + + If *buffer_callback* is None (the default), buffer views are + serialized into *file* as part of the pickle stream. + + If *buffer_callback* is not None, then it can be called any number + of times with a buffer view. If the callback returns a false value + (such as None), the given buffer is out-of-band; otherwise the + buffer is serialized in-band, i.e. inside the pickle stream. + + It is an error if *buffer_callback* is not None and *protocol* + is None or smaller than 5. + 'u'This takes a binary file for writing a pickle data stream. + + The optional *protocol* argument tells the pickler to use the + given protocol; supported protocols are 0, 1, 2, 3, 4 and 5. + The default protocol is 4. It was introduced in Python 3.4, and + is incompatible with previous versions. + + Specifying a negative protocol version selects the highest + protocol version supported. The higher the protocol used, the + more recent the version of Python needed to read the pickle + produced. + + The *file* argument must have a write() method that accepts a + single bytes argument. It can thus be a file object opened for + binary writing, an io.BytesIO instance, or any other custom + object that meets this interface. + + If *fix_imports* is True and *protocol* is less than 3, pickle + will try to map the new Python 3 names to the old module names + used in Python 2, so that the pickle data stream is readable + with Python 2. + + If *buffer_callback* is None (the default), buffer views are + serialized into *file* as part of the pickle stream. + + If *buffer_callback* is not None, then it can be called any number + of times with a buffer view. If the callback returns a false value + (such as None), the given buffer is out-of-band; otherwise the + buffer is serialized in-band, i.e. inside the pickle stream. + + It is an error if *buffer_callback* is not None and *protocol* + is None or smaller than 5. + 'b'pickle protocol must be <= %d'u'pickle protocol must be <= %d'b'buffer_callback needs protocol >= 5'u'buffer_callback needs protocol >= 5'b'file must have a 'write' attribute'u'file must have a 'write' attribute'b'Clears the pickler's "memo". + + The memo is the data structure that remembers which objects the + pickler has already seen, so that shared or recursive objects + are pickled by reference and not by value. This method is + useful when re-using picklers. + 'u'Clears the pickler's "memo". + + The memo is the data structure that remembers which objects the + pickler has already seen, so that shared or recursive objects + are pickled by reference and not by value. This method is + useful when re-using picklers. + 'b'Write a pickled representation of obj to the open file.'u'Write a pickled representation of obj to the open file.'b'_file_write'u'_file_write'b'Pickler.__init__() was not called by %s.__init__()'u'Pickler.__init__() was not called by %s.__init__()'b'd'u'>d'b'PickleBuffer can only pickled with protocol >= 5'u'PickleBuffer can only pickled with protocol >= 5'b'PickleBuffer can not be pickled when pointing to a non-contiguous buffer'u'PickleBuffer can not be pickled when pointing to a non-contiguous buffer'b'surrogatepass'u'surrogatepass'b'\u005c'u'\u005c'b'\u0000'u'\u0000'b'\u000a'u'\u000a'b'\u000d'u'\u000d'b''u''b'\u001a'u'\u001a'b'Can't pickle %r: it's not the same object as %s.%s'u'Can't pickle %r: it's not the same object as %s.%s'b'Can't pickle %r: it's not found as %s.%s'u'Can't pickle %r: it's not found as %s.%s'b'can't pickle global identifier '%s.%s' using pickle protocol %i'u'can't pickle global identifier '%s.%s' using pickle protocol %i'b'This takes a binary file for reading a pickle data stream. + + The protocol version of the pickle is detected automatically, so + no proto argument is needed. + + The argument *file* must have two methods, a read() method that + takes an integer argument, and a readline() method that requires + no arguments. Both methods should return bytes. Thus *file* + can be a binary file object opened for reading, an io.BytesIO + object, or any other custom object that meets this interface. + + The file-like object must have two methods, a read() method + that takes an integer argument, and a readline() method that + requires no arguments. Both methods should return bytes. + Thus file-like object can be a binary file object opened for + reading, a BytesIO object, or any other custom object that + meets this interface. + + If *buffers* is not None, it should be an iterable of buffer-enabled + objects that is consumed each time the pickle stream references + an out-of-band buffer view. Such buffers have been given in order + to the *buffer_callback* of a Pickler object. + + If *buffers* is None (the default), then the buffers are taken + from the pickle stream, assuming they are serialized there. + It is an error for *buffers* to be None if the pickle stream + was produced with a non-None *buffer_callback*. + + Other optional arguments are *fix_imports*, *encoding* and + *errors*, which are used to control compatibility support for + pickle stream generated by Python 2. If *fix_imports* is True, + pickle will try to map the old Python 2 names to the new names + used in Python 3. The *encoding* and *errors* tell pickle how + to decode 8-bit string instances pickled by Python 2; these + default to 'ASCII' and 'strict', respectively. *encoding* can be + 'bytes' to read theses 8-bit string instances as bytes objects. + 'u'This takes a binary file for reading a pickle data stream. + + The protocol version of the pickle is detected automatically, so + no proto argument is needed. + + The argument *file* must have two methods, a read() method that + takes an integer argument, and a readline() method that requires + no arguments. Both methods should return bytes. Thus *file* + can be a binary file object opened for reading, an io.BytesIO + object, or any other custom object that meets this interface. + + The file-like object must have two methods, a read() method + that takes an integer argument, and a readline() method that + requires no arguments. Both methods should return bytes. + Thus file-like object can be a binary file object opened for + reading, a BytesIO object, or any other custom object that + meets this interface. + + If *buffers* is not None, it should be an iterable of buffer-enabled + objects that is consumed each time the pickle stream references + an out-of-band buffer view. Such buffers have been given in order + to the *buffer_callback* of a Pickler object. + + If *buffers* is None (the default), then the buffers are taken + from the pickle stream, assuming they are serialized there. + It is an error for *buffers* to be None if the pickle stream + was produced with a non-None *buffer_callback*. + + Other optional arguments are *fix_imports*, *encoding* and + *errors*, which are used to control compatibility support for + pickle stream generated by Python 2. If *fix_imports* is True, + pickle will try to map the old Python 2 names to the new names + used in Python 3. The *encoding* and *errors* tell pickle how + to decode 8-bit string instances pickled by Python 2; these + default to 'ASCII' and 'strict', respectively. *encoding* can be + 'bytes' to read theses 8-bit string instances as bytes objects. + 'b'Read a pickled object representation from the open file. + + Return the reconstituted object hierarchy specified in the file. + 'u'Read a pickled object representation from the open file. + + Return the reconstituted object hierarchy specified in the file. + 'b'_file_read'u'_file_read'b'Unpickler.__init__() was not called by %s.__init__()'u'Unpickler.__init__() was not called by %s.__init__()'b'unsupported persistent id encountered'u'unsupported persistent id encountered'b'unsupported pickle protocol: %d'u'unsupported pickle protocol: %d'b'frame size > sys.maxsize: %d'u'frame size > sys.maxsize: %d'b'LONG pickle has negative byte count'u'LONG pickle has negative byte count'b'"''b'the STRING opcode argument must be quoted'u'the STRING opcode argument must be quoted'b'BINSTRING pickle has negative byte count'u'BINSTRING pickle has negative byte count'b'BINBYTES exceeds system's maximum size of %d bytes'u'BINBYTES exceeds system's maximum size of %d bytes'b'BINUNICODE exceeds system's maximum size of %d bytes'u'BINUNICODE exceeds system's maximum size of %d bytes'b'BINUNICODE8 exceeds system's maximum size of %d bytes'u'BINUNICODE8 exceeds system's maximum size of %d bytes'b'BINBYTES8 exceeds system's maximum size of %d bytes'u'BINBYTES8 exceeds system's maximum size of %d bytes'b'BYTEARRAY8 exceeds system's maximum size of %d bytes'u'BYTEARRAY8 exceeds system's maximum size of %d bytes'b'pickle stream refers to out-of-band data but no *buffers* argument was given'u'pickle stream refers to out-of-band data but no *buffers* argument was given'b'not enough out-of-band buffers'u'not enough out-of-band buffers'b'in constructor for %s: %s'u'in constructor for %s: %s'b'STACK_GLOBAL requires str'u'STACK_GLOBAL requires str'b'EXT specifies code <= 0'u'EXT specifies code <= 0'b'unregistered extension code %d'u'unregistered extension code %d'b'pickle.find_class'u'pickle.find_class'b'negative PUT argument'u'negative PUT argument'b'negative BINPUT argument'u'negative BINPUT argument'b'negative LONG_BINPUT argument'u'negative LONG_BINPUT argument'b'Can't load pickle from unicode string'u'Can't load pickle from unicode string'b'display contents of the pickle files'u'display contents of the pickle files'b'pickle_file'u'pickle_file'b'the pickle file'u'the pickle file'b'--test'u'--test'b'run self-test suite'u'run self-test suite'b'run verbosely; only affects self-test run'u'run verbosely; only affects self-test run'Utilities to support packages.simplegenericget_importeriter_importersget_loaderwalk_packagesiter_modulesImpImporterImpLoaderread_codeextend_pathModuleInfomodule_finder name ispkgA namedtuple with minimal info about a module.Return the finder-specific module spec.Yields ModuleInfo for all modules recursively + on path, or, if path is None, all accessible modules. + + 'path' should be either None or a list of paths to look for + modules in. + + 'prefix' is a string to output on the front of every module name + on output. + + Note that this function must import all *packages* (NOT all + modules!) on the given path, in order to access the __path__ + attribute to find submodules. + + 'onerror' is a function which gets called with one argument (the + name of the package which was being imported) if any exception + occurs while trying to import a package. If no onerror function is + supplied, ImportErrors are caught and ignored, while all other + exceptions are propagated, terminating the search. + + Examples: + + # list all modules python can access + walk_packages() + + # list all submodules of ctypes + walk_packages(ctypes.__path__, ctypes.__name__+'.') + ispkgYields ModuleInfo for all submodules on path, + or, if path is None, all top-level modules on sys.path. + + 'path' should be either None or a list of paths to look for + modules in. + + 'prefix' is a string to output on the front of every module name + on output. + importerspath must be None or list of paths to look for modules in"path must be None or list of paths to look for ""modules in"yieldediter_importer_modulesimporter_iter_file_finder_modulesdircontentssubname_import_impPEP 302 Finder that wraps Python's "classic" import algorithm + + ImpImporter(dirname) produces a PEP 302 finder that searches that + directory. ImpImporter(None) produces a PEP 302 finder that searches + the current sys.path, plus any modules that are frozen or built-in. + + Note that ImpImporter does not currently support being used by placement + on sys.meta_path. + This emulation is deprecated, use 'importlib' insteadetcPEP 302 Loader that wraps Python's "classic" import algorithm + _reopenmod_typePY_SOURCEPY_COMPILEDC_EXTENSION_fix_nameLoader for module %s cannot handle module %s"Loader for module %s cannot handle ""module %s"PKG_DIRECTORY_get_delegatezipimportzipimporteriter_zipimport_modules_zip_directory_cachedirlistpath_itemRetrieve a finder for the given path item + + The returned finder is cached in sys.path_importer_cache + if it was newly created by a path hook. + + The cache (or part of it) can be cleared manually if a + rescan of sys.path_hooks is necessary. + Yield finders for the given module name + + If fullname contains a '.', the finders will be for the package + containing fullname, otherwise they will be all registered top level + finders (i.e. those on both sys.meta_path and sys.path_hooks). + + If the named module is in a package, that package is imported as a side + effect of invoking this function. + + If no module name is specified, all top level finders are produced. + Relative module name {!r} not supportedpkg_namepkgmodule_or_nameGet a "loader" object for module_or_name + + Returns None if the module cannot be found or imported. + If the named module is not already imported, its containing package + (if any) is imported, in order to establish the package __path__. + Find a "loader" object for fullname + + This is a backwards compatibility wrapper around + importlib.util.find_spec that converts most failures to ImportError + and only returns the loader rather than the full spec + Error while finding loader for {!r} ({}: {})Extend a package's path. + + Intended use is to place the following code in a package's __init__.py: + + from pkgutil import extend_path + __path__ = extend_path(__path__, __name__) + + This will add to the package's __path__ all subdirectories of + directories on sys.path named after the package. This is useful + if one wants to distribute different parts of a single logical + package as multiple directories. + + It also looks for *.pkg files beginning where * matches the name + argument. This feature is similar to *.pth files (see site.py), + except that it doesn't special-case lines starting with 'import'. + A *.pkg file is trusted at face value: apart from checking for + duplicates, all entries found in a *.pkg file are added to the + path, regardless of whether they are exist the filesystem. (This + is a feature.) + + If the input path is not a list (as is the case for frozen + packages) it is returned unchanged. The input path is not + modified; an extended copy is returned. Items are only appended + to the copy at the end. + + It is assumed that sys.path is a sequence. Items of sys.path that + are not (unicode or 8-bit) strings referring to existing + directories are ignored. Unicode items of sys.path that cause + errors when used as filenames may cause this function to raise an + exception (in line with os.path.isdir() behavior). + .pkgsname_pkgparent_packagefinal_namesearch_pathportionpkgfileCan't open %s: %s +Get a resource from a package. + + This is a wrapper round the PEP 302 loader get_data API. The package + argument should be the name of a package, in standard module format + (foo.bar). The resource argument should be in the form of a relative + filename, using '/' as the path separator. The parent directory name '..' + is not allowed, and nor is a rooted name (starting with a '/'). + + The function returns a binary string, which is the contents of the + specified resource. + + For packages located in the filesystem, which have already been imported, + this is the rough equivalent of + + d = os.path.dirname(sys.modules[package].__file__) + data = open(os.path.join(d, resource), 'rb').read() + + If the package cannot be located or loaded, or it uses a PEP 302 loader + which does not support get_data(), then None is returned. + # Works with legacy finders.# This helper is needed in order for the PEP 302 emulation to# correctly handle compiled files# Skip rest of the header# don't traverse path items we've seen before# Implement a file walker for the normal importlib path hook# ignore unreadable directories like import does# handle packages before same-named modules# not a package# Note: we ignore 'path' argument since it is only used via meta_path# Note: we don't set __loader__ because we want the module to look# normal; i.e. this is just a wrapper for standard import machinery# Get the containing package's __path__# This hack fixes an impedance mismatch between pkgutil and# importlib, where the latter raises other errors for cases where# pkgutil previously raised ImportError# This could happen e.g. when this is called from inside a# frozen package. Return the path unchanged in that case.# Start with a copy of the existing path# We can't do anything: find_loader() returns None when# passed a dotted name.# Is this finder PEP 420 compliant?# XXX This may still add duplicate entries to path on# case-insensitive filesystems# XXX Is this the right thing for subpackages like zope.app?# It looks for a file named "zope.app.pkg"# Don't check for existence!# XXX needs test# Modify the resource name to be compatible with the loader.get_data# signature - an os.path format "filename" starting with the dirname of# the package's __file__b'Utilities to support packages.'u'Utilities to support packages.'b'get_importer'u'get_importer'b'iter_importers'u'iter_importers'b'get_loader'u'get_loader'b'walk_packages'u'walk_packages'b'iter_modules'u'iter_modules'b'ImpImporter'u'ImpImporter'b'ImpLoader'u'ImpLoader'b'read_code'u'read_code'b'extend_path'u'extend_path'b'ModuleInfo'u'ModuleInfo'b'module_finder name ispkg'u'module_finder name ispkg'b'A namedtuple with minimal info about a module.'u'A namedtuple with minimal info about a module.'b'Return the finder-specific module spec.'u'Return the finder-specific module spec.'b'Yields ModuleInfo for all modules recursively + on path, or, if path is None, all accessible modules. + + 'path' should be either None or a list of paths to look for + modules in. + + 'prefix' is a string to output on the front of every module name + on output. + + Note that this function must import all *packages* (NOT all + modules!) on the given path, in order to access the __path__ + attribute to find submodules. + + 'onerror' is a function which gets called with one argument (the + name of the package which was being imported) if any exception + occurs while trying to import a package. If no onerror function is + supplied, ImportErrors are caught and ignored, while all other + exceptions are propagated, terminating the search. + + Examples: + + # list all modules python can access + walk_packages() + + # list all submodules of ctypes + walk_packages(ctypes.__path__, ctypes.__name__+'.') + 'u'Yields ModuleInfo for all modules recursively + on path, or, if path is None, all accessible modules. + + 'path' should be either None or a list of paths to look for + modules in. + + 'prefix' is a string to output on the front of every module name + on output. + + Note that this function must import all *packages* (NOT all + modules!) on the given path, in order to access the __path__ + attribute to find submodules. + + 'onerror' is a function which gets called with one argument (the + name of the package which was being imported) if any exception + occurs while trying to import a package. If no onerror function is + supplied, ImportErrors are caught and ignored, while all other + exceptions are propagated, terminating the search. + + Examples: + + # list all modules python can access + walk_packages() + + # list all submodules of ctypes + walk_packages(ctypes.__path__, ctypes.__name__+'.') + 'b'Yields ModuleInfo for all submodules on path, + or, if path is None, all top-level modules on sys.path. + + 'path' should be either None or a list of paths to look for + modules in. + + 'prefix' is a string to output on the front of every module name + on output. + 'u'Yields ModuleInfo for all submodules on path, + or, if path is None, all top-level modules on sys.path. + + 'path' should be either None or a list of paths to look for + modules in. + + 'prefix' is a string to output on the front of every module name + on output. + 'b'path must be None or list of paths to look for modules in'u'path must be None or list of paths to look for modules in'b'imp'u'imp'b'PEP 302 Finder that wraps Python's "classic" import algorithm + + ImpImporter(dirname) produces a PEP 302 finder that searches that + directory. ImpImporter(None) produces a PEP 302 finder that searches + the current sys.path, plus any modules that are frozen or built-in. + + Note that ImpImporter does not currently support being used by placement + on sys.meta_path. + 'u'PEP 302 Finder that wraps Python's "classic" import algorithm + + ImpImporter(dirname) produces a PEP 302 finder that searches that + directory. ImpImporter(None) produces a PEP 302 finder that searches + the current sys.path, plus any modules that are frozen or built-in. + + Note that ImpImporter does not currently support being used by placement + on sys.meta_path. + 'b'This emulation is deprecated, use 'importlib' instead'u'This emulation is deprecated, use 'importlib' instead'b'PEP 302 Loader that wraps Python's "classic" import algorithm + 'u'PEP 302 Loader that wraps Python's "classic" import algorithm + 'b'Loader for module %s cannot handle module %s'u'Loader for module %s cannot handle module %s'b'Retrieve a finder for the given path item + + The returned finder is cached in sys.path_importer_cache + if it was newly created by a path hook. + + The cache (or part of it) can be cleared manually if a + rescan of sys.path_hooks is necessary. + 'u'Retrieve a finder for the given path item + + The returned finder is cached in sys.path_importer_cache + if it was newly created by a path hook. + + The cache (or part of it) can be cleared manually if a + rescan of sys.path_hooks is necessary. + 'b'Yield finders for the given module name + + If fullname contains a '.', the finders will be for the package + containing fullname, otherwise they will be all registered top level + finders (i.e. those on both sys.meta_path and sys.path_hooks). + + If the named module is in a package, that package is imported as a side + effect of invoking this function. + + If no module name is specified, all top level finders are produced. + 'u'Yield finders for the given module name + + If fullname contains a '.', the finders will be for the package + containing fullname, otherwise they will be all registered top level + finders (i.e. those on both sys.meta_path and sys.path_hooks). + + If the named module is in a package, that package is imported as a side + effect of invoking this function. + + If no module name is specified, all top level finders are produced. + 'b'Relative module name {!r} not supported'u'Relative module name {!r} not supported'b'Get a "loader" object for module_or_name + + Returns None if the module cannot be found or imported. + If the named module is not already imported, its containing package + (if any) is imported, in order to establish the package __path__. + 'u'Get a "loader" object for module_or_name + + Returns None if the module cannot be found or imported. + If the named module is not already imported, its containing package + (if any) is imported, in order to establish the package __path__. + 'b'Find a "loader" object for fullname + + This is a backwards compatibility wrapper around + importlib.util.find_spec that converts most failures to ImportError + and only returns the loader rather than the full spec + 'u'Find a "loader" object for fullname + + This is a backwards compatibility wrapper around + importlib.util.find_spec that converts most failures to ImportError + and only returns the loader rather than the full spec + 'b'Error while finding loader for {!r} ({}: {})'u'Error while finding loader for {!r} ({}: {})'b'Extend a package's path. + + Intended use is to place the following code in a package's __init__.py: + + from pkgutil import extend_path + __path__ = extend_path(__path__, __name__) + + This will add to the package's __path__ all subdirectories of + directories on sys.path named after the package. This is useful + if one wants to distribute different parts of a single logical + package as multiple directories. + + It also looks for *.pkg files beginning where * matches the name + argument. This feature is similar to *.pth files (see site.py), + except that it doesn't special-case lines starting with 'import'. + A *.pkg file is trusted at face value: apart from checking for + duplicates, all entries found in a *.pkg file are added to the + path, regardless of whether they are exist the filesystem. (This + is a feature.) + + If the input path is not a list (as is the case for frozen + packages) it is returned unchanged. The input path is not + modified; an extended copy is returned. Items are only appended + to the copy at the end. + + It is assumed that sys.path is a sequence. Items of sys.path that + are not (unicode or 8-bit) strings referring to existing + directories are ignored. Unicode items of sys.path that cause + errors when used as filenames may cause this function to raise an + exception (in line with os.path.isdir() behavior). + 'u'Extend a package's path. + + Intended use is to place the following code in a package's __init__.py: + + from pkgutil import extend_path + __path__ = extend_path(__path__, __name__) + + This will add to the package's __path__ all subdirectories of + directories on sys.path named after the package. This is useful + if one wants to distribute different parts of a single logical + package as multiple directories. + + It also looks for *.pkg files beginning where * matches the name + argument. This feature is similar to *.pth files (see site.py), + except that it doesn't special-case lines starting with 'import'. + A *.pkg file is trusted at face value: apart from checking for + duplicates, all entries found in a *.pkg file are added to the + path, regardless of whether they are exist the filesystem. (This + is a feature.) + + If the input path is not a list (as is the case for frozen + packages) it is returned unchanged. The input path is not + modified; an extended copy is returned. Items are only appended + to the copy at the end. + + It is assumed that sys.path is a sequence. Items of sys.path that + are not (unicode or 8-bit) strings referring to existing + directories are ignored. Unicode items of sys.path that cause + errors when used as filenames may cause this function to raise an + exception (in line with os.path.isdir() behavior). + 'b'.pkg'u'.pkg'b'Can't open %s: %s +'u'Can't open %s: %s +'b'Get a resource from a package. + + This is a wrapper round the PEP 302 loader get_data API. The package + argument should be the name of a package, in standard module format + (foo.bar). The resource argument should be in the form of a relative + filename, using '/' as the path separator. The parent directory name '..' + is not allowed, and nor is a rooted name (starting with a '/'). + + The function returns a binary string, which is the contents of the + specified resource. + + For packages located in the filesystem, which have already been imported, + this is the rough equivalent of + + d = os.path.dirname(sys.modules[package].__file__) + data = open(os.path.join(d, resource), 'rb').read() + + If the package cannot be located or loaded, or it uses a PEP 302 loader + which does not support get_data(), then None is returned. + 'u'Get a resource from a package. + + This is a wrapper round the PEP 302 loader get_data API. The package + argument should be the name of a package, in standard module format + (foo.bar). The resource argument should be in the form of a relative + filename, using '/' as the path separator. The parent directory name '..' + is not allowed, and nor is a rooted name (starting with a '/'). + + The function returns a binary string, which is the contents of the + specified resource. + + For packages located in the filesystem, which have already been imported, + this is the rough equivalent of + + d = os.path.dirname(sys.modules[package].__file__) + data = open(os.path.join(d, resource), 'rb').read() + + If the package cannot be located or loaded, or it uses a PEP 302 loader + which does not support get_data(), then None is returned. + 'u'pkgutil' This module tries to retrieve as much platform-identifying data as + possible. It makes this information available via function APIs. + + If called from the command line, it prints the platform + information concatenated as single string to stdout. The output + format is useable as part of a filename. + + + Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com + Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com + + Permission to use, copy, modify, and distribute this software and its + documentation for any purpose and without fee or royalty is hereby granted, + provided that the above copyright notice appear in all copies and that + both that copyright notice and this permission notice appear in + supporting documentation or portions thereof, including modifications, + that you make. + + EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO + THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND + FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, + INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING + FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + WITH THE USE OR PERFORMANCE OF THIS SOFTWARE ! + +1.0.8RCrc_ver_stages([0-9]+|[._+-])_component_re_comparable_version._+-(__libc_init)|(GLIBC_([0-9.]+))|(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)b'(__libc_init)'b'(GLIBC_([0-9.]+))'br'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)'_libc_searchlibc_ver Tries to determine the libc version that the file executable + (which defaults to the Python interpreter) is linked against. + + Returns a tuple of strings (lib,version) which default to the + given parameters in case the lookup fails. + + Note that the function has intimate knowledge of how different + libc versions add symbols to the executable and thus is probably + only useable for executables compiled using gcc. + + The file is read and scanned in chunks of chunksize bytes. + + confstrCS_GNU_LIBC_VERSIONverlibcGLIBClibcinitglibcglibcversionsosoversion_norm_version Normalize the version and build strings and return a single + version string using the format major.minor.build (or patchlevel). + (?:([\w ]+) ([\w.]+) .*\[.* ([\d.]+)\])r'(?:([\w ]+) ([\w.]+) 'r'.*'r'\[.* ([\d.]+)\])'_ver_output_syscmd_verwin16dossupported_platforms Tries to figure out the OS version used and returns + a tuple (system, release, version). + + It uses the "ver" shell command for this which is known + to exists on Windows, DOS. XXX Others too ? + + In case this fails, the given parameters are used as + defaults. + + command /c vercmd /c verCalledProcessErrorXP2003Serverpost2003Vista8.1post8.1post10_WIN32_CLIENT_RELEASES2008Server2008ServerR22012Server2012ServerR2post2012ServerR2_WIN32_SERVER_RELEASESwin32_is_iotwin32_editionIoTUAPNanoServerWindowsCoreHeadlessIoTEdgeOSSOFTWARE\Microsoft\Windows NT\CurrentVersioncvkeyOpenKeyExEditionIdwin32_vercsdptypewinverplatform_version{0}.{1}.{2}SP{}service_pack_majorService Pack SPproduct_typeCurrentType_mac_ver_xmlplistlibProductVersionversioninfoPower Macintosh Get macOS version information and return it as tuple (release, + versioninfo, machine) with versioninfo being a tuple (version, + dev_stage, non_release_version). + + Entries which cannot be determined are set to the parameter values + which default to ''. All tuple entries are strings. + _java_getpropjava.langSystemgetPropertyjava_vervendorvminfoosinfo Version interface for Jython. + + Returns a tuple (release, vendor, vminfo, osinfo) with vminfo being + a tuple (vm_name, vm_release, vm_vendor) and osinfo being a + tuple (os_name, os_version, os_arch). + + Values which cannot be determined are set to the defaults + given as parameters (which all default to ''). + + java.vendorjava.versionvm_namevm_releasevm_vendorjava.vm.namejava.vm.vendorjava.vm.versionos_nameos_versionos_archjava.os.archjava.os.namejava.os.versionsystem_alias Returns (system, release, version) aliased to common + marketing names used for some systems. + + It also does some reordering of the information in some cases + where it would otherwise cause confusion. + + SunOSSolarisIRIX64IRIX (64bit)64bitWindows_platform Helper to format the platform string in a filename + compatible format e.g. "system-version-machine". + cleaned_node Helper to determine the node name of this machine. + _follow_symlinks In case filepath is a symlink, follow it until a + real file is reached. + _syscmd_uname Interface to the system's uname command. + _syscmd_file Interface to the system's file command. + + The function uses the -b option of the file command to have it + omit the filename in its output. Follow the symlinks. It returns + default in case the command should fail. + + WindowsPEMSDOS_default_architecturearchitecturelinkage Queries the given executable (defaults to the Python interpreter + binary) for various architecture information. + + Returns a tuple (bits, linkage) which contains information about + the bit architecture and the linkage format used for the + executable. Both values are returned as strings. + + Values that cannot be determined are returned as given by the + parameter presets. If bits is given as '', the sizeof(pointer) + (or sizeof(long) on Python version < 1.5.2) is used as + indicator for the supported pointer size. + + The function relies on the system's "file" command to do the + actual work. This is available on most if not all Unix + platforms. On some non-Unix platforms where the "file" command + does not exist and the executable is set to the Python interpreter + binary defaults from _default_architecture are used. + + fileoutshared object32-bit32bitN32n32bit64-bitELFPECOFFMS-DOSuname_resultsystem node release version machine processor_uname_cache Fairly portable uname interface. Returns a tuple + of strings (system, node, release, version, machine, processor) + identifying the underlying platform. + + Note that unlike the os.uname function this also returns + possible processor information as an additional tuple entry. + + Entries which cannot be determined are set to ''. + + no_os_unameprocessoruse_syscmd_verPROCESSOR_ARCHITEW6432PROCESSOR_ARCHITECTUREPROCESSOR_IDENTIFIERMicrosoft WindowsMicrosoft6.016bitJavaOpenVMSvms_libgetsyiSYI$_CPUcsidcpu_numberVAX Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'. + + An empty string is returned if the value cannot be determined. + + Returns the computer's network name (which may not be fully + qualified) + + An empty string is returned if the value cannot be determined. + + Returns the system's release, e.g. '2.2.0' or 'NT' + + An empty string is returned if the value cannot be determined. + + Returns the system's release version, e.g. '#3 on degas' + + An empty string is returned if the value cannot be determined. + + Returns the machine type, e.g. 'i386' + + An empty string is returned if the value cannot be determined. + + Returns the (true) processor name, e.g. 'amdk6' + + An empty string is returned if the value cannot be + determined. Note that many platforms do not provide this + information or simply return the same value as for machine(), + e.g. NetBSD does this. + + ([\w.+]+)\s*\(#?([^,]+)(?:,\s*([\w ]*)(?:,\s*([\w :]*))?)?\)\s*\[([^\]]+)\]?r'([\w.+]+)\s*'r'\(#?([^,]+)'r'(?:,\s*([\w ]*)'r'(?:,\s*([\w :]*))?)?\)\s*'r'\[([^\]]+)\]?'_sys_version_parserIronPython\s*([\d\.]+)(?: \(([\d\.]+)\))? on (.NET [\d\.]+)r'IronPython\s*'r'([\d\.]+)'r'(?: \(([\d\.]+)\))?'r' on (.NET [\d\.]+)'_ironpython_sys_version_parser([\d.]+)\s*\(IronPython\s*[\d.]+\s*\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)r'([\d.]+)\s*'r'\(IronPython\s*'r'[\d.]+\s*'r'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'_ironpython26_sys_version_parser([\w.+]+)\s*\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*\[PyPy [^\]]+\]?r'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'r'\[PyPy [^\]]+\]?'_pypy_sys_version_parser_sys_version_cache_sys_version Returns a parsed version of Python's sys.version as tuple + (name, version, branch, revision, buildno, builddate, compiler) + referring to the Python implementation name, version, branch, + revision, build number, build date/time as string and the compiler + identification string. + + Note that unlike the Python sys.version, the returned value + for the Python version will always include the patchlevel (it + defaults to '.0'). + + The function returns empty strings for tuple entries that + cannot be determined. + + sys_version may be given to parse an alternative version + string, e.g. if the version was read from a different Python + interpreter. + + IronPythonfailed to parse IronPython sys.version: %salt_versionbuildnobuilddateJythonfailed to parse Jython sys.version: %sbuildtimePyPyfailed to parse PyPy sys.version: %sfailed to parse CPython sys.version: %sCPythonbranchrevision_mercurial Returns a string identifying the Python implementation. + + Currently, the following implementations are identified: + 'CPython' (C implementation of Python), + 'IronPython' (.NET implementation of Python), + 'Jython' (Java implementation of Python), + 'PyPy' (Python implementation of Python). + + python_version Returns the Python version as string 'major.minor.patchlevel' + + Note that unlike the Python sys.version, the returned value + will always include the patchlevel (it defaults to 0). + + python_version_tuple Returns the Python version as tuple (major, minor, patchlevel) + of strings. + + Note that unlike the Python sys.version, the returned value + will always include the patchlevel (it defaults to 0). + + python_branch Returns a string identifying the Python implementation + branch. + + For CPython this is the SCM branch from which the + Python binary was built. + + If not available, an empty string is returned. + + python_revision Returns a string identifying the Python implementation + revision. + + For CPython this is the SCM revision from which the + Python binary was built. + + If not available, an empty string is returned. + + python_build Returns a tuple (buildno, builddate) stating the Python + build number and date as strings. + + python_compiler Returns a string identifying the compiler used for compiling + Python. + + _platform_cachealiasedterse Returns a single string identifying the underlying platform + with as much useful information as possible (but no more :). + + The output is intended to be human readable rather than + machine parseable. It may look different on different + platforms and this is intended. + + If "aliased" is true, the function will use aliases for + various platforms that report system names which differ from + their common names, e.g. SunOS will be reported as + Solaris. The system_alias() function is used to implement + this. + + Setting terse to true causes the function to return only the + absolute minimum information needed to identify the platform. + + Darwinmacos_releasemacOSrelverslibcnamelibcversion--tersenonaliased--nonaliased#!/usr/bin/env python3# This module is maintained by Marc-Andre Lemburg .# If you find problems, please submit bug reports/patches via the# Python bug tracker (http://bugs.python.org) and assign them to "lemburg".# Still needed:# * support for MS-DOS (PythonDX ?)# * support for Amiga and other still unsupported platforms running Python# * support for additional Linux distributions# Many thanks to all those who helped adding platform-specific# checks (in no particular order):# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter, Steve# Dower# History:# # 1.0.8 - changed Windows support to read version from kernel32.dll# 1.0.7 - added DEV_NULL# 1.0.6 - added linux_distribution()# 1.0.5 - fixed Java support to allow running the module on Jython# 1.0.4 - added IronPython support# 1.0.3 - added normalization of Windows system name# 1.0.2 - added more Windows support# 1.0.1 - reformatted to make doc.py happy# 1.0.0 - reformatted a bit and checked into Python CVS# 0.8.0 - added sys.version parser and various new access# APIs (python_version(), python_compiler(), etc.)# 0.7.2 - fixed architecture() to use sizeof(pointer) where available# 0.7.1 - added support for Caldera OpenLinux# 0.7.0 - some fixes for WinCE; untabified the source file# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and# vms_lib.getsyi() configured# 0.6.1 - added code to prevent 'uname -p' on platforms which are# known not to support it# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;# did some cleanup of the interfaces - some APIs have changed# 0.5.5 - fixed another type in the MacOS code... should have# used more coffee today ;-)# 0.5.4 - fixed a few typos in the MacOS code# 0.5.3 - added experimental MacOS support; added better popen()# workarounds in _syscmd_ver() -- still not 100% elegant# though# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all# return values (the system uname command tends to return# 'unknown' instead of just leaving the field empty)# 0.5.1 - included code for slackware dist; added exception handlers# to cover up situations where platforms don't have os.popen# (e.g. Mac) or fail on socket.gethostname(); fixed libc# detection RE# 0.5.0 - changed the API names referring to system commands to *syscmd*;# added java_ver(); made syscmd_ver() a private# API (was system_ver() in previous versions) -- use uname()# instead; extended the win32_ver() to also return processor# type information# 0.4.0 - added win32_ver() and modified the platform() output for WinXX# 0.3.4 - fixed a bug in _follow_symlinks()# 0.3.3 - fixed popen() and "file" command invocation bugs# 0.3.2 - added architecture() API and support for it in platform()# 0.3.1 - fixed syscmd_ver() RE to support Windows NT# 0.3.0 - added system alias support# 0.2.3 - removed 'wince' again... oh well.# 0.2.2 - added 'wince' to syscmd_ver() supported platforms# 0.2.1 - added cache logic and changed the platform string format# 0.2.0 - changed the API to use functions instead of module globals# since some action take too long to be run on module import# 0.1.0 - first release# You can always get the latest version of this module at:# http://www.egenix.com/files/python/platform.py# If that URL should fail, try contacting the author.### Globals & Constants# Helper for comparing two version number strings.# Based on the description of the PHP's version_compare():# http://php.net/manual/en/function.version-compare.php# any string not found in this dict, will get 0 assigned# number, will get 100 assigned### Platform specific APIs# parse 'glibc 2.28' as ('glibc', '2.28')# os.confstr() or CS_GNU_LIBC_VERSION value not available# Python 2.2 introduced os.path.realpath(); it is used# here to work around problems with Cygwin not being# able to open symlinks for reading# Examples of VER command output:# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]# Windows XP: Microsoft Windows XP [Version 5.1.2600]# Windows Vista: Microsoft Windows [Version 6.0.6002]# Note that the "Version" string gets localized on different# Windows versions.# Try some common cmd strings#print('Command %s failed: %s' % (cmd, why))# Parse the output# Strip trailing dots from version and release# Normalize the version and build strings (eliminating additional# zeros)# Strictly, 5.2 client is XP 64-bit, but platform.py historically# has always called it 2003 Server# Server release name lookup will default to client names if necessary# getwindowsversion() reflect the compatibility mode Python is# running under, and so the service pack value is only going to be# valid if the versions match.# VER_NT_SERVER = 3# Canonical name# First try reading the information from an XML file which should# always be present# If that also doesn't work return the default values# Import the needed APIs### System name aliasing# Sun's OS# These releases use the old name SunOS# Modify release (marketing release = SunOS release - 3)# XXX Whatever the new SunOS marketing name is...# IRIX reports IRIX64 on platforms with 64-bit support; yet it# is really a version and not a different platform, since 32-bit# apps are also supported..# In case one of the other tricks# bpo-35516: Don't replace Darwin with macOS since input release and# version arguments can be different than the currently running version.### Various internal helpers# Format the platform string# Cleanup some possible filename obstacles...# No need to report 'unknown' information...# Fold '--'s and remove trailing '-'# No sockets...# Still not working...# XXX Others too ?# "file" output is locale dependent: force the usage of the C locale# to get deterministic behavior.# -b: do not prepend filenames to output lines (brief mode)# With the C locale, the output should be mostly ASCII-compatible.# Decode from Latin-1 to prevent Unicode decode error.### Information about the used architecture# Default values for architecture; non-empty strings override the# defaults given as parameters# Use the sizeof(pointer) as default number of bits if nothing# else is given as default.# Get data from the 'file' system command# "file" command did not return anything; we'll try to provide# some sensible defaults then...# Format not supported# Bits# On Irix only# Linkage# E.g. Windows uses this format# XXX the A.OUT format also falls under this class...### Portable uname() interface# Get some infos from the builtin os.uname API...# Hmm, no there is either no uname or uname has returned#'unknowns'... we'll have to poke around the system then.# Try win32_ver() on win32 platforms# Try to use the PROCESSOR_* environment variables# available on Win XP and later; see# http://support.microsoft.com/kb/888731 and# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM# WOW64 processes mask the native architecture# Try the 'ver' system command available on some# platforms# Normalize system to what win32_ver() normally returns# (_syscmd_ver() tends to return the vendor name as well)# Under Windows Vista and Windows Server 2008,# Microsoft changed the output of the ver command. The# release is no longer printed. This causes the# system and release to be misidentified.# In case we still don't know anything useful, we'll try to# help ourselves# System specific extensions# OpenVMS seems to have release and version mixed up# Get processor information# Get processor information from the uname system command#If any unknowns still exist, replace them with ''s, which are more portable# normalize name### Direct interfaces to some of the uname() return values### Various APIs for extracting information from sys.version# "version"# "(#buildno"# ", builddate"# ", buildtime)"# "[compiler]"# IronPython covering 2.6 and 2.7# Get the Python version# Try the cache first# Parse it# IronPython# Jython# PyPy# CPython# Add the patchlevel version if missing# Build and cache the result### The Opus Magnum of platform strings :-)# Get uname information and then apply platform specific cosmetics# to it...# macOS (darwin kernel)# MS platforms# check for libc vs. glibc# Java platforms# Generic handler### Command line interface# Default is to print the aliased verbose platform stringb' This module tries to retrieve as much platform-identifying data as + possible. It makes this information available via function APIs. + + If called from the command line, it prints the platform + information concatenated as single string to stdout. The output + format is useable as part of a filename. + +'u' This module tries to retrieve as much platform-identifying data as + possible. It makes this information available via function APIs. + + If called from the command line, it prints the platform + information concatenated as single string to stdout. The output + format is useable as part of a filename. + +'b' + Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com + Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com + + Permission to use, copy, modify, and distribute this software and its + documentation for any purpose and without fee or royalty is hereby granted, + provided that the above copyright notice appear in all copies and that + both that copyright notice and this permission notice appear in + supporting documentation or portions thereof, including modifications, + that you make. + + EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO + THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND + FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, + INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING + FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + WITH THE USE OR PERFORMANCE OF THIS SOFTWARE ! + +'u' + Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com + Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com + + Permission to use, copy, modify, and distribute this software and its + documentation for any purpose and without fee or royalty is hereby granted, + provided that the above copyright notice appear in all copies and that + both that copyright notice and this permission notice appear in + supporting documentation or portions thereof, including modifications, + that you make. + + EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO + THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND + FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, + INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING + FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + WITH THE USE OR PERFORMANCE OF THIS SOFTWARE ! + +'b'1.0.8'u'1.0.8'b'dev'u'dev'b'RC'u'RC'b'rc'u'rc'b'([0-9]+|[._+-])'u'([0-9]+|[._+-])'b'._+-'u'._+-'b'(__libc_init)|(GLIBC_([0-9.]+))|(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)'b' Tries to determine the libc version that the file executable + (which defaults to the Python interpreter) is linked against. + + Returns a tuple of strings (lib,version) which default to the + given parameters in case the lookup fails. + + Note that the function has intimate knowledge of how different + libc versions add symbols to the executable and thus is probably + only useable for executables compiled using gcc. + + The file is read and scanned in chunks of chunksize bytes. + + 'u' Tries to determine the libc version that the file executable + (which defaults to the Python interpreter) is linked against. + + Returns a tuple of strings (lib,version) which default to the + given parameters in case the lookup fails. + + Note that the function has intimate knowledge of how different + libc versions add symbols to the executable and thus is probably + only useable for executables compiled using gcc. + + The file is read and scanned in chunks of chunksize bytes. + + 'b'CS_GNU_LIBC_VERSION'u'CS_GNU_LIBC_VERSION'b'libc'b'GLIBC'u'libc'b'glibc'u'glibc'b' Normalize the version and build strings and return a single + version string using the format major.minor.build (or patchlevel). + 'u' Normalize the version and build strings and return a single + version string using the format major.minor.build (or patchlevel). + 'b'(?:([\w ]+) ([\w.]+) .*\[.* ([\d.]+)\])'u'(?:([\w ]+) ([\w.]+) .*\[.* ([\d.]+)\])'b'win16'u'win16'b'dos'u'dos'b' Tries to figure out the OS version used and returns + a tuple (system, release, version). + + It uses the "ver" shell command for this which is known + to exists on Windows, DOS. XXX Others too ? + + In case this fails, the given parameters are used as + defaults. + + 'u' Tries to figure out the OS version used and returns + a tuple (system, release, version). + + It uses the "ver" shell command for this which is known + to exists on Windows, DOS. XXX Others too ? + + In case this fails, the given parameters are used as + defaults. + + 'b'ver'u'ver'b'command /c ver'u'command /c ver'b'cmd /c ver'u'cmd /c ver'b'2000'u'2000'b'XP'u'XP'b'2003Server'u'2003Server'b'post2003'u'post2003'b'Vista'u'Vista'b'8.1'u'8.1'b'post8.1'u'post8.1'b'post10'u'post10'b'2008Server'u'2008Server'b'2008ServerR2'u'2008ServerR2'b'2012Server'u'2012Server'b'2012ServerR2'u'2012ServerR2'b'post2012ServerR2'u'post2012ServerR2'b'IoTUAP'u'IoTUAP'b'NanoServer'u'NanoServer'b'WindowsCoreHeadless'u'WindowsCoreHeadless'b'IoTEdgeOS'u'IoTEdgeOS'b'SOFTWARE\Microsoft\Windows NT\CurrentVersion'u'SOFTWARE\Microsoft\Windows NT\CurrentVersion'b'EditionId'u'EditionId'b'{0}.{1}.{2}'u'{0}.{1}.{2}'b'SP{}'u'SP{}'b'Service Pack 'u'Service Pack 'b'SP'u'SP'b'product_type'u'product_type'b'CurrentType'u'CurrentType'b'ProductVersion'u'ProductVersion'b'Power Macintosh'u'Power Macintosh'b' Get macOS version information and return it as tuple (release, + versioninfo, machine) with versioninfo being a tuple (version, + dev_stage, non_release_version). + + Entries which cannot be determined are set to the parameter values + which default to ''. All tuple entries are strings. + 'u' Get macOS version information and return it as tuple (release, + versioninfo, machine) with versioninfo being a tuple (version, + dev_stage, non_release_version). + + Entries which cannot be determined are set to the parameter values + which default to ''. All tuple entries are strings. + 'b' Version interface for Jython. + + Returns a tuple (release, vendor, vminfo, osinfo) with vminfo being + a tuple (vm_name, vm_release, vm_vendor) and osinfo being a + tuple (os_name, os_version, os_arch). + + Values which cannot be determined are set to the defaults + given as parameters (which all default to ''). + + 'u' Version interface for Jython. + + Returns a tuple (release, vendor, vminfo, osinfo) with vminfo being + a tuple (vm_name, vm_release, vm_vendor) and osinfo being a + tuple (os_name, os_version, os_arch). + + Values which cannot be determined are set to the defaults + given as parameters (which all default to ''). + + 'b'java.vendor'u'java.vendor'b'java.version'u'java.version'b'java.vm.name'u'java.vm.name'b'java.vm.vendor'u'java.vm.vendor'b'java.vm.version'u'java.vm.version'b'java.os.arch'u'java.os.arch'b'java.os.name'u'java.os.name'b'java.os.version'u'java.os.version'b' Returns (system, release, version) aliased to common + marketing names used for some systems. + + It also does some reordering of the information in some cases + where it would otherwise cause confusion. + + 'u' Returns (system, release, version) aliased to common + marketing names used for some systems. + + It also does some reordering of the information in some cases + where it would otherwise cause confusion. + + 'b'SunOS'u'SunOS'b'Solaris'u'Solaris'b'IRIX64'u'IRIX64'b'IRIX'u'IRIX'b' (64bit)'u' (64bit)'b'64bit'u'64bit'b'Windows'u'Windows'b' Helper to format the platform string in a filename + compatible format e.g. "system-version-machine". + 'u' Helper to format the platform string in a filename + compatible format e.g. "system-version-machine". + 'b' Helper to determine the node name of this machine. + 'u' Helper to determine the node name of this machine. + 'b' In case filepath is a symlink, follow it until a + real file is reached. + 'u' In case filepath is a symlink, follow it until a + real file is reached. + 'b' Interface to the system's uname command. + 'u' Interface to the system's uname command. + 'b'uname'u'uname'b' Interface to the system's file command. + + The function uses the -b option of the file command to have it + omit the filename in its output. Follow the symlinks. It returns + default in case the command should fail. + + 'u' Interface to the system's file command. + + The function uses the -b option of the file command to have it + omit the filename in its output. Follow the symlinks. It returns + default in case the command should fail. + + 'b'WindowsPE'u'WindowsPE'b'MSDOS'u'MSDOS'b' Queries the given executable (defaults to the Python interpreter + binary) for various architecture information. + + Returns a tuple (bits, linkage) which contains information about + the bit architecture and the linkage format used for the + executable. Both values are returned as strings. + + Values that cannot be determined are returned as given by the + parameter presets. If bits is given as '', the sizeof(pointer) + (or sizeof(long) on Python version < 1.5.2) is used as + indicator for the supported pointer size. + + The function relies on the system's "file" command to do the + actual work. This is available on most if not all Unix + platforms. On some non-Unix platforms where the "file" command + does not exist and the executable is set to the Python interpreter + binary defaults from _default_architecture are used. + + 'u' Queries the given executable (defaults to the Python interpreter + binary) for various architecture information. + + Returns a tuple (bits, linkage) which contains information about + the bit architecture and the linkage format used for the + executable. Both values are returned as strings. + + Values that cannot be determined are returned as given by the + parameter presets. If bits is given as '', the sizeof(pointer) + (or sizeof(long) on Python version < 1.5.2) is used as + indicator for the supported pointer size. + + The function relies on the system's "file" command to do the + actual work. This is available on most if not all Unix + platforms. On some non-Unix platforms where the "file" command + does not exist and the executable is set to the Python interpreter + binary defaults from _default_architecture are used. + + 'b'bit'u'bit'b'shared object'u'shared object'b'32-bit'u'32-bit'b'32bit'u'32bit'b'N32'u'N32'b'n32bit'u'n32bit'b'64-bit'u'64-bit'b'ELF'u'ELF'b'PE'u'PE'b'COFF'u'COFF'b'MS-DOS'u'MS-DOS'b'uname_result'u'uname_result'b'system node release version machine processor'u'system node release version machine processor'b' Fairly portable uname interface. Returns a tuple + of strings (system, node, release, version, machine, processor) + identifying the underlying platform. + + Note that unlike the os.uname function this also returns + possible processor information as an additional tuple entry. + + Entries which cannot be determined are set to ''. + + 'u' Fairly portable uname interface. Returns a tuple + of strings (system, node, release, version, machine, processor) + identifying the underlying platform. + + Note that unlike the os.uname function this also returns + possible processor information as an additional tuple entry. + + Entries which cannot be determined are set to ''. + + 'b'PROCESSOR_ARCHITEW6432'u'PROCESSOR_ARCHITEW6432'b'PROCESSOR_ARCHITECTURE'u'PROCESSOR_ARCHITECTURE'b'PROCESSOR_IDENTIFIER'u'PROCESSOR_IDENTIFIER'b'Microsoft Windows'u'Microsoft Windows'b'Microsoft'u'Microsoft'b'6.0'u'6.0'b'16bit'u'16bit'b'Java'u'Java'b'OpenVMS'u'OpenVMS'b'SYI$_CPU'u'SYI$_CPU'b'VAX'u'VAX'b' Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'. + + An empty string is returned if the value cannot be determined. + + 'u' Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'. + + An empty string is returned if the value cannot be determined. + + 'b' Returns the computer's network name (which may not be fully + qualified) + + An empty string is returned if the value cannot be determined. + + 'u' Returns the computer's network name (which may not be fully + qualified) + + An empty string is returned if the value cannot be determined. + + 'b' Returns the system's release, e.g. '2.2.0' or 'NT' + + An empty string is returned if the value cannot be determined. + + 'u' Returns the system's release, e.g. '2.2.0' or 'NT' + + An empty string is returned if the value cannot be determined. + + 'b' Returns the system's release version, e.g. '#3 on degas' + + An empty string is returned if the value cannot be determined. + + 'u' Returns the system's release version, e.g. '#3 on degas' + + An empty string is returned if the value cannot be determined. + + 'b' Returns the machine type, e.g. 'i386' + + An empty string is returned if the value cannot be determined. + + 'u' Returns the machine type, e.g. 'i386' + + An empty string is returned if the value cannot be determined. + + 'b' Returns the (true) processor name, e.g. 'amdk6' + + An empty string is returned if the value cannot be + determined. Note that many platforms do not provide this + information or simply return the same value as for machine(), + e.g. NetBSD does this. + + 'u' Returns the (true) processor name, e.g. 'amdk6' + + An empty string is returned if the value cannot be + determined. Note that many platforms do not provide this + information or simply return the same value as for machine(), + e.g. NetBSD does this. + + 'b'([\w.+]+)\s*\(#?([^,]+)(?:,\s*([\w ]*)(?:,\s*([\w :]*))?)?\)\s*\[([^\]]+)\]?'u'([\w.+]+)\s*\(#?([^,]+)(?:,\s*([\w ]*)(?:,\s*([\w :]*))?)?\)\s*\[([^\]]+)\]?'b'IronPython\s*([\d\.]+)(?: \(([\d\.]+)\))? on (.NET [\d\.]+)'u'IronPython\s*([\d\.]+)(?: \(([\d\.]+)\))? on (.NET [\d\.]+)'b'([\d.]+)\s*\(IronPython\s*[\d.]+\s*\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'u'([\d.]+)\s*\(IronPython\s*[\d.]+\s*\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'b'([\w.+]+)\s*\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*\[PyPy [^\]]+\]?'u'([\w.+]+)\s*\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*\[PyPy [^\]]+\]?'b' Returns a parsed version of Python's sys.version as tuple + (name, version, branch, revision, buildno, builddate, compiler) + referring to the Python implementation name, version, branch, + revision, build number, build date/time as string and the compiler + identification string. + + Note that unlike the Python sys.version, the returned value + for the Python version will always include the patchlevel (it + defaults to '.0'). + + The function returns empty strings for tuple entries that + cannot be determined. + + sys_version may be given to parse an alternative version + string, e.g. if the version was read from a different Python + interpreter. + + 'u' Returns a parsed version of Python's sys.version as tuple + (name, version, branch, revision, buildno, builddate, compiler) + referring to the Python implementation name, version, branch, + revision, build number, build date/time as string and the compiler + identification string. + + Note that unlike the Python sys.version, the returned value + for the Python version will always include the patchlevel (it + defaults to '.0'). + + The function returns empty strings for tuple entries that + cannot be determined. + + sys_version may be given to parse an alternative version + string, e.g. if the version was read from a different Python + interpreter. + + 'b'IronPython'u'IronPython'b'failed to parse IronPython sys.version: %s'u'failed to parse IronPython sys.version: %s'b'Jython'u'Jython'b'failed to parse Jython sys.version: %s'u'failed to parse Jython sys.version: %s'b'PyPy'u'PyPy'b'failed to parse PyPy sys.version: %s'u'failed to parse PyPy sys.version: %s'b'failed to parse CPython sys.version: %s'u'failed to parse CPython sys.version: %s'b'CPython'b'_git'u'_git'b'_mercurial'u'_mercurial'b' Returns a string identifying the Python implementation. + + Currently, the following implementations are identified: + 'CPython' (C implementation of Python), + 'IronPython' (.NET implementation of Python), + 'Jython' (Java implementation of Python), + 'PyPy' (Python implementation of Python). + + 'u' Returns a string identifying the Python implementation. + + Currently, the following implementations are identified: + 'CPython' (C implementation of Python), + 'IronPython' (.NET implementation of Python), + 'Jython' (Java implementation of Python), + 'PyPy' (Python implementation of Python). + + 'b' Returns the Python version as string 'major.minor.patchlevel' + + Note that unlike the Python sys.version, the returned value + will always include the patchlevel (it defaults to 0). + + 'u' Returns the Python version as string 'major.minor.patchlevel' + + Note that unlike the Python sys.version, the returned value + will always include the patchlevel (it defaults to 0). + + 'b' Returns the Python version as tuple (major, minor, patchlevel) + of strings. + + Note that unlike the Python sys.version, the returned value + will always include the patchlevel (it defaults to 0). + + 'u' Returns the Python version as tuple (major, minor, patchlevel) + of strings. + + Note that unlike the Python sys.version, the returned value + will always include the patchlevel (it defaults to 0). + + 'b' Returns a string identifying the Python implementation + branch. + + For CPython this is the SCM branch from which the + Python binary was built. + + If not available, an empty string is returned. + + 'u' Returns a string identifying the Python implementation + branch. + + For CPython this is the SCM branch from which the + Python binary was built. + + If not available, an empty string is returned. + + 'b' Returns a string identifying the Python implementation + revision. + + For CPython this is the SCM revision from which the + Python binary was built. + + If not available, an empty string is returned. + + 'u' Returns a string identifying the Python implementation + revision. + + For CPython this is the SCM revision from which the + Python binary was built. + + If not available, an empty string is returned. + + 'b' Returns a tuple (buildno, builddate) stating the Python + build number and date as strings. + + 'u' Returns a tuple (buildno, builddate) stating the Python + build number and date as strings. + + 'b' Returns a string identifying the compiler used for compiling + Python. + + 'u' Returns a string identifying the compiler used for compiling + Python. + + 'b' Returns a single string identifying the underlying platform + with as much useful information as possible (but no more :). + + The output is intended to be human readable rather than + machine parseable. It may look different on different + platforms and this is intended. + + If "aliased" is true, the function will use aliases for + various platforms that report system names which differ from + their common names, e.g. SunOS will be reported as + Solaris. The system_alias() function is used to implement + this. + + Setting terse to true causes the function to return only the + absolute minimum information needed to identify the platform. + + 'u' Returns a single string identifying the underlying platform + with as much useful information as possible (but no more :). + + The output is intended to be human readable rather than + machine parseable. It may look different on different + platforms and this is intended. + + If "aliased" is true, the function will use aliases for + various platforms that report system names which differ from + their common names, e.g. SunOS will be reported as + Solaris. The system_alias() function is used to implement + this. + + Setting terse to true causes the function to return only the + absolute minimum information needed to identify the platform. + + 'b'Darwin'u'Darwin'b'macOS'u'macOS'b'on'u'on'b'terse'u'terse'b'--terse'u'--terse'b'nonaliased'u'nonaliased'b'--nonaliased'u'--nonaliased'u'platform'plistlib.py -- a tool to generate and parse MacOSX .plist files. + +The property list (.plist) file format is a simple XML pickle supporting +basic object types, like dictionaries, lists, numbers and strings. +Usually the top level object is a dictionary. + +To write out a plist file, use the dump(value, file) +function. 'value' is the top level object, 'file' is +a (writable) file object. + +To parse a plist from a file, use the load(file) function, +with a (readable) file object as the only argument. It +returns the top level object (again, usually a dictionary). + +To work with plist data in bytes objects, you can use loads() +and dumps(). + +Values can be strings, integers, floats, booleans, tuples, lists, +dictionaries (but only with string keys), Data, bytes, bytearray, or +datetime.datetime objects. + +Generate Plist example: + + pl = dict( + aString = "Doodah", + aList = ["A", "B", 12, 32.1, [1, 2, 3]], + aFloat = 0.1, + anInt = 728, + aDict = dict( + anotherString = "", + aUnicodeValue = "M\xe4ssig, Ma\xdf", + aTrueValue = True, + aFalseValue = False, + ), + someData = b"", + someMoreData = b"" * 10, + aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())), + ) + with open(fileName, 'wb') as fp: + dump(pl, fp) + +Parse Plist example: + + with open(fileName, 'rb') as fp: + pl = load(fp) + print(pl["aKey"]) +readPlistwritePlistreadPlistFromByteswritePlistToBytesDataInvalidFileExceptionFMT_XMLFMT_BINARYUIDxml.parsers.expatPlistFormatFMT_XML FMT_BINARY_maybe_openpathOrFile + Read a .plist from a path or file. pathOrFile should either + be a file name, or a readable binary file object. + + This function is deprecated, use load instead. + The readPlist function is deprecated, use load() instead + Write 'value' to a .plist file. 'pathOrFile' may either be a + file name or a (writable) file object. + + This function is deprecated, use dump instead. + The writePlist function is deprecated, use dump() insteadsort_keysskipkeys + Read a plist data from a bytes object. Return the root object. + + This function is deprecated, use loads instead. + The readPlistFromBytes function is deprecated, use loads() instead + Return 'value' as a plist-formatted bytes object. + + This function is deprecated, use dumps instead. + The writePlistToBytes function is deprecated, use dumps() instead + Wrapper for binary data. + + This class is deprecated, use a bytes object instead. + data must be as bytesfromBase64_decode_base64asBase64maxlinelength_encode_base64data must be an intUIDs cannot be >= 2**64UIDs must be positive + +b"""PLISTHEADER[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]"_controlCharPatmaxbinsize(?P\d\d\d\d)(?:-(?P\d\d)(?:-(?P\d\d)(?:T(?P\d\d)(?::(?P\d\d)(?::(?P\d\d))?)?)?)?)?Z_dateParser_date_from_string_date_to_string%04d-%02d-%02dT%02d:%02d:%02dZ_escapestrings can't contains control characters; use bytes instead"strings can't contains control characters; ""use bytes instead"_PlistParserdict_typecurrent_key_dict_typehandle_begin_elementhandle_end_elementhandle_datahandle_entity_declEntityDeclHandlerParseFileentity_nameis_parameter_entitysystem_idpublic_idnotation_nameXML entity declarations are not supported in plist filesbegin_add_objectunexpected element at line %dCurrentLineNumberbegin_dictend_dictmissing value for key '%s' at line %dend_keyunexpected key at line %dbegin_arrayend_trueend_falseend_integer0Xend_realend_dataend_date_DumbXMLWriterindent_level_indent_levelbegin_elementwriteln<%s>end_elementsimple_element<%s>%s<%s/>_PlistWriterwriteHeader_sort_keys_skipkeyswrite_valuetruefalsewrite_dictwrite_datawrite_byteswrite_arrayunsupported type: %skeys must be strings_is_fmt_xml6xBBQQQoffset_size_ref_sizenum_objectstop_objectoffset_table_offset_read_ints_object_offsets_read_object_get_sizetokenL return the size of the next object.0xF_read_refs + read the object by reference. + + May recursively read sub-objects (content of an array/dict/set) + 0xF00x0FtokenH0x080x090x0f0x22>f0x230x330x400x500x60utf-16be0xA0obj_refs0xD0key_refs_count_to_size_scalars_BinaryPlistWriter_objlist_objtable_objidtable_ref_formatbplist00_write_object_getrefnumoffset_formatsort_version>5xBBBQQQrefnum_write_size>B>BBB>BBH>BBL>BBQ>Bq>BB>BH>BL>BQ>BdrefskeyRefsvalRefsrootItems_is_fmt_binarydetect_FORMATSRead a .plist file. 'fp' should be a readable and binary file object. + Return the unpacked root object (which usually is a dictionary). + Read a .plist file from a bytes object. + Return the unpacked root object (which usually is a dictionary). + Write 'value' to a .plist file. 'fp' should be a writable, + binary file object. + Unsupported format: %rReturn a bytes object with the contents for a .plist file. + # Deprecated functionality# base64.decodebytes just calls binascii.a2b_base64;# it seems overkill to use both base64 and binascii.# End of deprecated functionality# XML support# XML 'header'# Regex to find any control chars, except for \t \n and \r# copied from base64.encodebytes(), with added maxlinelength argument# Contents should conform to a subset of ISO 8601# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units# may be omitted with # a loss of precision)# convert DOS line endings# convert Mac line endings# escape '&'# escape '<'# escape '>'# Reject plist files with entity declarations to avoid XML vulnerabilies in expat.# Regular plist files don't contain those declerations, and Apple's plutil tool does not# accept them either.# this is the root object# element handlers# plist has fixed encoding of utf-8# XXX: is this test needed?# Also check for alternative XML encodings, this is slightly# overkill because the Apple tools (and plistlib) will not# generate files with these encodings.# expat does not support utf-32#(codecs.BOM_UTF32_BE, "utf-32-be"),#(codecs.BOM_UTF32_LE, "utf-32-le"),# Binary Plist# The basic file format:# HEADER# object...# refid->offset...# TRAILER# The referenced source code also mentions URL (0x0c, 0x0d) and# UUID (0x0e), but neither can be generated using the Cocoa libraries.# int# real# date# timestamp 0 of binary plists corresponds to 1/1/2001# (year of Mac OS X 10.0), instead of 1/1/1970.# data# ascii string# unicode string# UID# used by Key-Archiver plist files# array# tokenH == 0xB0 is documented as 'ordset', but is not actually# implemented in the Apple reference code.# tokenH == 0xC0 is documented as 'set', but sets cannot be used in# plists.# dict# Flattened object list:# Mappings from object->objectid# First dict has (type(object), object) as the key,# second dict is used when object is not hashable and# has id(object) as the key.# Create list of all objects in the plist# Size of object references in serialized containers# depends on the number of objects in the plist.# Write file header# Write object list# Write refnum->object offset table# Write trailer# First check if the object is in the object table, not used for# containers to ensure that two subcontainers with the same contents# will be serialized as distinct values.# Add to objectreference map# And finally recurse into containers# Generic bitsb'plistlib.py -- a tool to generate and parse MacOSX .plist files. + +The property list (.plist) file format is a simple XML pickle supporting +basic object types, like dictionaries, lists, numbers and strings. +Usually the top level object is a dictionary. + +To write out a plist file, use the dump(value, file) +function. 'value' is the top level object, 'file' is +a (writable) file object. + +To parse a plist from a file, use the load(file) function, +with a (readable) file object as the only argument. It +returns the top level object (again, usually a dictionary). + +To work with plist data in bytes objects, you can use loads() +and dumps(). + +Values can be strings, integers, floats, booleans, tuples, lists, +dictionaries (but only with string keys), Data, bytes, bytearray, or +datetime.datetime objects. + +Generate Plist example: + + pl = dict( + aString = "Doodah", + aList = ["A", "B", 12, 32.1, [1, 2, 3]], + aFloat = 0.1, + anInt = 728, + aDict = dict( + anotherString = "", + aUnicodeValue = "M\xe4ssig, Ma\xdf", + aTrueValue = True, + aFalseValue = False, + ), + someData = b"", + someMoreData = b"" * 10, + aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())), + ) + with open(fileName, 'wb') as fp: + dump(pl, fp) + +Parse Plist example: + + with open(fileName, 'rb') as fp: + pl = load(fp) + print(pl["aKey"]) +'u'plistlib.py -- a tool to generate and parse MacOSX .plist files. + +The property list (.plist) file format is a simple XML pickle supporting +basic object types, like dictionaries, lists, numbers and strings. +Usually the top level object is a dictionary. + +To write out a plist file, use the dump(value, file) +function. 'value' is the top level object, 'file' is +a (writable) file object. + +To parse a plist from a file, use the load(file) function, +with a (readable) file object as the only argument. It +returns the top level object (again, usually a dictionary). + +To work with plist data in bytes objects, you can use loads() +and dumps(). + +Values can be strings, integers, floats, booleans, tuples, lists, +dictionaries (but only with string keys), Data, bytes, bytearray, or +datetime.datetime objects. + +Generate Plist example: + + pl = dict( + aString = "Doodah", + aList = ["A", "B", 12, 32.1, [1, 2, 3]], + aFloat = 0.1, + anInt = 728, + aDict = dict( + anotherString = "", + aUnicodeValue = "M\xe4ssig, Ma\xdf", + aTrueValue = True, + aFalseValue = False, + ), + someData = b"", + someMoreData = b"" * 10, + aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())), + ) + with open(fileName, 'wb') as fp: + dump(pl, fp) + +Parse Plist example: + + with open(fileName, 'rb') as fp: + pl = load(fp) + print(pl["aKey"]) +'b'readPlist'u'readPlist'b'writePlist'u'writePlist'b'readPlistFromBytes'u'readPlistFromBytes'b'writePlistToBytes'u'writePlistToBytes'b'Data'u'Data'b'InvalidFileException'u'InvalidFileException'b'FMT_XML'u'FMT_XML'b'FMT_BINARY'u'FMT_BINARY'b'UID'u'UID'b'PlistFormat'u'PlistFormat'b'FMT_XML FMT_BINARY'u'FMT_XML FMT_BINARY'b' + Read a .plist from a path or file. pathOrFile should either + be a file name, or a readable binary file object. + + This function is deprecated, use load instead. + 'u' + Read a .plist from a path or file. pathOrFile should either + be a file name, or a readable binary file object. + + This function is deprecated, use load instead. + 'b'The readPlist function is deprecated, use load() instead'u'The readPlist function is deprecated, use load() instead'b' + Write 'value' to a .plist file. 'pathOrFile' may either be a + file name or a (writable) file object. + + This function is deprecated, use dump instead. + 'u' + Write 'value' to a .plist file. 'pathOrFile' may either be a + file name or a (writable) file object. + + This function is deprecated, use dump instead. + 'b'The writePlist function is deprecated, use dump() instead'u'The writePlist function is deprecated, use dump() instead'b' + Read a plist data from a bytes object. Return the root object. + + This function is deprecated, use loads instead. + 'u' + Read a plist data from a bytes object. Return the root object. + + This function is deprecated, use loads instead. + 'b'The readPlistFromBytes function is deprecated, use loads() instead'u'The readPlistFromBytes function is deprecated, use loads() instead'b' + Return 'value' as a plist-formatted bytes object. + + This function is deprecated, use dumps instead. + 'u' + Return 'value' as a plist-formatted bytes object. + + This function is deprecated, use dumps instead. + 'b'The writePlistToBytes function is deprecated, use dumps() instead'u'The writePlistToBytes function is deprecated, use dumps() instead'b' + Wrapper for binary data. + + This class is deprecated, use a bytes object instead. + 'u' + Wrapper for binary data. + + This class is deprecated, use a bytes object instead. + 'b'data must be as bytes'u'data must be as bytes'b'data must be an int'u'data must be an int'b'UIDs cannot be >= 2**64'u'UIDs cannot be >= 2**64'b'UIDs must be positive'u'UIDs must be positive'b' + +'b'[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]'u'[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]'b'(?P\d\d\d\d)(?:-(?P\d\d)(?:-(?P\d\d)(?:T(?P\d\d)(?::(?P\d\d)(?::(?P\d\d))?)?)?)?)?Z'u'(?P\d\d\d\d)(?:-(?P\d\d)(?:-(?P\d\d)(?:T(?P\d\d)(?::(?P\d\d)(?::(?P\d\d))?)?)?)?)?Z'b'day'u'day'b'hour'u'hour'b'minute'u'minute'b'second'u'second'b'%04d-%02d-%02dT%02d:%02d:%02dZ'u'%04d-%02d-%02dT%02d:%02d:%02dZ'b'strings can't contains control characters; use bytes instead'u'strings can't contains control characters; use bytes instead'b'XML entity declarations are not supported in plist files'u'XML entity declarations are not supported in plist files'b'begin_'u'begin_'b'unexpected element at line %d'u'unexpected element at line %d'b'missing value for key '%s' at line %d'u'missing value for key '%s' at line %d'b'unexpected key at line %d'u'unexpected key at line %d'b'0X'u'0X'b'<%s>'u'<%s>'b''u''b'<%s>%s'u'<%s>%s'b'<%s/>'u'<%s/>'b''u''b''u''b'true'u'true'b'false'u'false'b'unsupported type: %s'u'unsupported type: %s'b'keys must be strings'u'keys must be strings'b'6xBBQQQ'u'>6xBBQQQ'b' return the size of the next object.'u' return the size of the next object.'b' + read the object by reference. + + May recursively read sub-objects (content of an array/dict/set) + 'u' + read the object by reference. + + May recursively read sub-objects (content of an array/dict/set) + 'b'>f'u'>f'b'utf-16be'u'utf-16be'b'bplist00'b'>5xBBBQQQ'u'>5xBBBQQQ'b'>B'u'>B'b'>BBB'u'>BBB'b'>BBH'u'>BBH'b'>BBL'u'>BBL'b'>BBQ'u'>BBQ'b'>Bq'u'>Bq'b'>BB'u'>BB'b'>BH'u'>BH'b'>BL'u'>BL'b'>BQ'u'>BQ'b''b'>Bd'u'>Bd'b'Read a .plist file. 'fp' should be a readable and binary file object. + Return the unpacked root object (which usually is a dictionary). + 'u'Read a .plist file. 'fp' should be a readable and binary file object. + Return the unpacked root object (which usually is a dictionary). + 'b'detect'u'detect'b'Read a .plist file from a bytes object. + Return the unpacked root object (which usually is a dictionary). + 'u'Read a .plist file from a bytes object. + Return the unpacked root object (which usually is a dictionary). + 'b'Write 'value' to a .plist file. 'fp' should be a writable, + binary file object. + 'u'Write 'value' to a .plist file. 'fp' should be a writable, + binary file object. + 'b'Unsupported format: %r'u'Unsupported format: %r'b'writer'u'writer'b'Return a bytes object with the contents for a .plist file. + 'u'Return a bytes object with the contents for a .plist file. + 'u'plistlib'CLD_CONTINUEDCLD_DUMPEDCLD_EXITEDCLD_TRAPPEDinodeis_fileposix.DirEntryDirEntryEX_CANTCREATEX_CONFIGEX_DATAERREX_IOERREX_NOHOSTEX_NOINPUTEX_NOPERMEX_NOUSEREX_OKEX_OSERREX_OSFILEEX_PROTOCOLEX_SOFTWAREEX_TEMPFAILEX_UNAVAILABLEEX_USAGEF_LOCKF_OKF_TESTF_TLOCKF_ULOCKNGROUPS_MAXO_ACCMODEO_APPENDO_ASYNCO_CLOEXECO_DIRECTORYO_DSYNCO_EXLOCKO_NDELAYO_NOCTTYO_NOFOLLOWO_NONBLOCKO_RDWRO_SHLOCKO_SYNCPOSIX_SPAWN_CLOSEPOSIX_SPAWN_DUP2POSIX_SPAWN_OPENPRIO_PGRPPRIO_PROCESSPRIO_USERP_ALLP_PGIDP_PIDRTLD_LAZYRTLD_NODELETERTLD_NOLOADR_OKSCHED_FIFOSCHED_OTHERSCHED_RRSEEK_DATASEEK_HOLEST_NOSUIDST_RDONLY308915776TMP_MAXWCONTINUEDWCOREDUMPWEXITEDWIFCONTINUEDWNOWAITWSTOPPEDWSTOPSIGWUNTRACEDW_OKX_OK_COPYFILE_DATAu'This module provides access to operating system functionality that is +standardized by the C Standard and the POSIX standard (a thinly +disguised Unix interface). Refer to the library manual and +corresponding Unix manual entries for more information on calls.'_fcopyfilechrootcloserangeconfstr_namesctermiddevice_encodingdup2fchdirfchmodfchownforkptyfpathconffsyncftruncateget_blockingget_inheritablegetegidgeteuidgetgidgetgrouplistgetgroupsgetloadavggetlogingetpgidgetpgrpgetppidgetprioritygetsidinitgroupskillpglchflagslchmodlockflseekmakedevniceopenptypathconf_namesposix_spawnposix_spawnppreadpreadvpwritepwritevreadvsched_get_priority_maxsched_get_priority_minsched_yieldset_inheritablesetegidseteuidsetgidsetgroupssetpgidsetpgrpsetprioritysetregidsetreuidsetsidsetuidu'stat_result: Result from stat, fstat, or lstat. + +This object may be accessed either as a tuple of + (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) +or via the attributes st_mode, st_ino, st_dev, st_nlink, st_uid, and so on. + +Posix/windows: If your platform supports st_blksize, st_blocks, st_rdev, +or st_flags, they are available as attributes only. + +See os.stat for more information.'st_atime_nsst_birthtimest_blksizest_blocksst_ctime_nsst_flagsst_genst_gidst_mtime_nsst_nlinkst_rdevos.stat_resultstat_resultu'statvfs_result: Result from statvfs or fstatvfs. + +This object may be accessed either as a tuple of + (bsize, frsize, blocks, bfree, bavail, files, ffree, favail, flag, namemax), +or via the attributes f_bsize, f_frsize, f_blocks, f_bfree, and so on. + +See os.statvfs for more information.'f_bavailf_bfreef_blocksf_bsizef_favailf_ffreef_filesf_flagf_frsizef_fsidf_namemaxos.statvfs_resultstatvfs_resultsysconf_namestcgetpgrptcsetpgrpu'A tuple of (columns, lines) for holding terminal window size'os.terminal_sizeterminal_sizeu'times_result: Result from os.times(). + +This object may be accessed either as a tuple of + (user, system, children_user, children_system, elapsed), +or via the attributes user, system, children_user, children_system, +and elapsed. + +See os.times for more information.'children_systemchildren_userelapsedposix.times_resulttimes_resultttynameu'uname_result: Result from os.uname(). + +This object may be accessed either as a tuple of + (sysname, nodename, release, version, machine), +or via the attributes sysname, nodename, release, version, and machine. + +See os.uname for more information.'nodenameposix.uname_resultwait3wait4writevCommon operations on Posix pathnames. + +Instead of importing this module directly, import os and refer to +this module as os.path. The "os.path" name is an alias for this +module on Posix systems; on other systems (e.g. Windows), +os.path provides the same operations in a manner specific to that +platform, and is an alias to another module (e.g. ntpath). + +Some of this can actually be useful on non-Posix systems too, e.g. +for manipulation of the pathname component of URLs. +/bin:/usr/bin/dev/null_get_sepNormalize case of pathname. Has no effect under PosixJoin two or more pathname components, inserting '/' as needed. + If any component is an absolute path, all previous path components + will be discarded. An empty last part will result in a path that + ends with a separator.Split a pathname. Returns tuple "(head, tail)" where "tail" is + everything after the final slash. Either part may be empty.Split a pathname into drive and path. On Posix, drive is always + empty.Test whether a path is a symbolic linkTest whether a path is a mount pointdev1dev2ino1ino2Expand ~ and ~user constructions. If user or $HOME is unknown, + do nothing.pw_dirgetpwnampwent_varprog_varprogbExpand shell variables of form $var and ${var}. Unknown variables + are left unchanged.\$(\w+|\{[^}]*\})dotdotinitial_slashesnew_compsReturn an absolute path.Return the canonical path of the specified filename, eliminating any +symbolic links encountered in the path._joinrealpathnewpath# Strings representing various path-related bits and pieces.# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.# On MS-DOS this may also turn slashes into backslashes; however, other# normalizations (such as optimizing '../' away) are not allowed# (another function should be defined to do that).# Trivial in Posix, harder on the Mac or MS-DOS.# Join pathnames.# Ignore the previous parts if a part is absolute.# Insert a '/' unless the first part is empty or already ends in '/'.# rest). If the path ends in '/', tail will be empty. If there is no# '/' in the path, head will be empty.# Trailing '/'es are stripped from head unless it is the root.# Split a pathname into a drive specification and the rest of the# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.# Return the tail (basename) part of a path, same as split(path)[1].# Return the head (dirname) part of a path, same as split(path)[0].# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)# It doesn't exist -- so not a mount point. :-)# A symlink can never be a mount point# path/.. on a different device as path# path/.. is the same i-node as path# bpo-10496: if the current user identifier doesn't exist in the# password database, return the path unchanged# bpo-10496: if the user name from the path doesn't exist in the# This expands the forms $variable and ${variable} only.# Non-existent variables are left unchanged.# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.# It should be understood that this may change the meaning of the path# if it contains symbolic links!# POSIX allows one or two initial slashes, but treats three or more# as single slash.# Return a canonical path (i.e. the absolute location of a file on the# filesystem).# Join two paths, normalizing and eliminating any symbolic links# encountered in the second path.# current dir# parent dir# Resolve the symbolic link# Already seen this path# use cached value# The symlink is not resolved, so we must have a symlink loop.# Return already resolved part + rest of the path unchanged.# not resolved symlink# resolved symlink# The paths are not normalized before comparing them (this is the# responsibility of the caller). Any trailing separator is stripped from the# returned path.b'Common operations on Posix pathnames. + +Instead of importing this module directly, import os and refer to +this module as os.path. The "os.path" name is an alias for this +module on Posix systems; on other systems (e.g. Windows), +os.path provides the same operations in a manner specific to that +platform, and is an alias to another module (e.g. ntpath). + +Some of this can actually be useful on non-Posix systems too, e.g. +for manipulation of the pathname component of URLs. +'u'Common operations on Posix pathnames. + +Instead of importing this module directly, import os and refer to +this module as os.path. The "os.path" name is an alias for this +module on Posix systems; on other systems (e.g. Windows), +os.path provides the same operations in a manner specific to that +platform, and is an alias to another module (e.g. ntpath). + +Some of this can actually be useful on non-Posix systems too, e.g. +for manipulation of the pathname component of URLs. +'b'/bin:/usr/bin'u'/bin:/usr/bin'b'/dev/null'u'/dev/null'b'Normalize case of pathname. Has no effect under Posix'u'Normalize case of pathname. Has no effect under Posix'b'Join two or more pathname components, inserting '/' as needed. + If any component is an absolute path, all previous path components + will be discarded. An empty last part will result in a path that + ends with a separator.'u'Join two or more pathname components, inserting '/' as needed. + If any component is an absolute path, all previous path components + will be discarded. An empty last part will result in a path that + ends with a separator.'b'Split a pathname. Returns tuple "(head, tail)" where "tail" is + everything after the final slash. Either part may be empty.'u'Split a pathname. Returns tuple "(head, tail)" where "tail" is + everything after the final slash. Either part may be empty.'b'Split a pathname into drive and path. On Posix, drive is always + empty.'u'Split a pathname into drive and path. On Posix, drive is always + empty.'b'Test whether a path is a symbolic link'u'Test whether a path is a symbolic link'b'Test whether a path is a mount point'u'Test whether a path is a mount point'b'Expand ~ and ~user constructions. If user or $HOME is unknown, + do nothing.'u'Expand ~ and ~user constructions. If user or $HOME is unknown, + do nothing.'b'Expand shell variables of form $var and ${var}. Unknown variables + are left unchanged.'u'Expand shell variables of form $var and ${var}. Unknown variables + are left unchanged.'b'\$(\w+|\{[^}]*\})'u'\$(\w+|\{[^}]*\})'b'Return an absolute path.'u'Return an absolute path.'b'Return the canonical path of the specified filename, eliminating any +symbolic links encountered in the path.'u'Return the canonical path of the specified filename, eliminating any +symbolic links encountered in the path.'u'posixpath'Support to pretty-print lists, tuples, & dictionaries recursively. + +Very simple, but useful, especially in debugging data structures. + +Classes +------- + +PrettyPrinter() + Handle pretty-printing operations onto a stream using a configured + set of formatting parameters. + +Functions +--------- + +pformat() + Format a Python object into a pretty-printed representation. + +pprint() + Pretty-print a Python object to a stream [default is sys.stdout]. + +saferepr() + Generate a 'standard' repr()-like value, but protect against recursive + data structures. + +_types_StringIOisreadableisrecursivesafereprPrettyPrintercompactsort_dictsPretty-print a Python object to a stream [default is sys.stdout].printerFormat a Python object into a pretty-printed representation.Pretty-print a Python objectVersion of repr() which can handle recursive data structures._safe_reprDetermine if saferepr(object) is readable by eval().Determine if object requires a recursive representation._safe_keyHelper function for key functions when sorting unorderable objects. + + The wrapped-object will fallback to a Py2.x style comparison for + unorderable types (sorting first comparing the type name and then by + the obj ids). Does not work recursively, so dict.items() must have + _safe_key applied to both the key and the value. + + _safe_tupleHelper function for comparing 2-tuplesHandle pretty printing operations onto a stream using a set of + configured parameters. + + indent + Number of spaces to indent for each level of nesting. + + width + Attempted maximum number of columns in the output. + + depth + The maximum depth to print out nested structures. + + stream + The desired output stream. If omitted (or false), the standard + output stream available at construction will be used. + + compact + If true, several items will be combined in one line. + + sort_dicts + If true, dict keys are sorted. + + indent must be >= 0depth must be > 0width must be != 0_depth_indent_per_level_compact_sort_dictsallowanceobjid_recursion_recursivemax_width_dispatch_pprint_dict_format_dict_items_pprint_ordered_dict_pprint_list_format_items_pprint_tuple,)endchar_pprint_set({})_pprint_strmax_width1\S*\s*max_width2_pprint_bytesparens_wrap_bytes_repr_pprint_bytearraybytearray(_pprint_mappingproxymappingproxy(, +delimnllast_indexentnext_entmaxlevelsFormat object for a specific context, returning a string + and flags indicating whether the representation is 'readable' + and whether the object represents a recursive construct. + _pprint_default_dict%s(%s, +%s_pprint_counter_pprint_chain_map_pprint_deque])rml], +%smaxlen=%s)_pprint_user_dict_pprint_user_list_pprint_user_string_builtin_scalars{...}kreprkreadablekrecurvreprvreadablevrecur[](%s,)oreproreadableorecur_perfcheck100000perf_countert3_safe_repr:pformat:# Author: Fred L. Drake, Jr.# fdrake@acm.org# This is a simple little module I wrote to make life easier. I didn't# see anything quite like it in the library, though I may have overlooked# something. I wrote this when I was trying to read some heavily nested# tuples with fairly non-descriptive content. This is modeled very much# after Lisp/Scheme - style pretty-printing of lists. If you find it# useful, thank small children who sleep at night.# A list of alternating (non-space, space) strings# drop empty last part# Return triple (repr_string, isreadable, isrecursive).b'Support to pretty-print lists, tuples, & dictionaries recursively. + +Very simple, but useful, especially in debugging data structures. + +Classes +------- + +PrettyPrinter() + Handle pretty-printing operations onto a stream using a configured + set of formatting parameters. + +Functions +--------- + +pformat() + Format a Python object into a pretty-printed representation. + +pprint() + Pretty-print a Python object to a stream [default is sys.stdout]. + +saferepr() + Generate a 'standard' repr()-like value, but protect against recursive + data structures. + +'u'Support to pretty-print lists, tuples, & dictionaries recursively. + +Very simple, but useful, especially in debugging data structures. + +Classes +------- + +PrettyPrinter() + Handle pretty-printing operations onto a stream using a configured + set of formatting parameters. + +Functions +--------- + +pformat() + Format a Python object into a pretty-printed representation. + +pprint() + Pretty-print a Python object to a stream [default is sys.stdout]. + +saferepr() + Generate a 'standard' repr()-like value, but protect against recursive + data structures. + +'b'pprint'u'pprint'b'pformat'u'pformat'b'isreadable'u'isreadable'b'isrecursive'u'isrecursive'b'saferepr'u'saferepr'b'PrettyPrinter'u'PrettyPrinter'b'Pretty-print a Python object to a stream [default is sys.stdout].'u'Pretty-print a Python object to a stream [default is sys.stdout].'b'Format a Python object into a pretty-printed representation.'u'Format a Python object into a pretty-printed representation.'b'Pretty-print a Python object'u'Pretty-print a Python object'b'Version of repr() which can handle recursive data structures.'u'Version of repr() which can handle recursive data structures.'b'Determine if saferepr(object) is readable by eval().'u'Determine if saferepr(object) is readable by eval().'b'Determine if object requires a recursive representation.'u'Determine if object requires a recursive representation.'b'Helper function for key functions when sorting unorderable objects. + + The wrapped-object will fallback to a Py2.x style comparison for + unorderable types (sorting first comparing the type name and then by + the obj ids). Does not work recursively, so dict.items() must have + _safe_key applied to both the key and the value. + + 'u'Helper function for key functions when sorting unorderable objects. + + The wrapped-object will fallback to a Py2.x style comparison for + unorderable types (sorting first comparing the type name and then by + the obj ids). Does not work recursively, so dict.items() must have + _safe_key applied to both the key and the value. + + 'b'Helper function for comparing 2-tuples'u'Helper function for comparing 2-tuples'b'Handle pretty printing operations onto a stream using a set of + configured parameters. + + indent + Number of spaces to indent for each level of nesting. + + width + Attempted maximum number of columns in the output. + + depth + The maximum depth to print out nested structures. + + stream + The desired output stream. If omitted (or false), the standard + output stream available at construction will be used. + + compact + If true, several items will be combined in one line. + + sort_dicts + If true, dict keys are sorted. + + 'u'Handle pretty printing operations onto a stream using a set of + configured parameters. + + indent + Number of spaces to indent for each level of nesting. + + width + Attempted maximum number of columns in the output. + + depth + The maximum depth to print out nested structures. + + stream + The desired output stream. If omitted (or false), the standard + output stream available at construction will be used. + + compact + If true, several items will be combined in one line. + + sort_dicts + If true, dict keys are sorted. + + 'b'indent must be >= 0'u'indent must be >= 0'b'depth must be > 0'u'depth must be > 0'b'width must be != 0'u'width must be != 0'b',)'u',)'b'({'u'({'b'})'u'})'b'\S*\s*'u'\S*\s*'b'bytearray('u'bytearray('b'mappingproxy('u'mappingproxy('b', +'u', +'b'Format object for a specific context, returning a string + and flags indicating whether the representation is 'readable' + and whether the object represents a recursive construct. + 'u'Format object for a specific context, returning a string + and flags indicating whether the representation is 'readable' + and whether the object represents a recursive construct. + 'b'%s(%s, +%s'u'%s(%s, +%s'b'])'u'])'b'], +%smaxlen=%s)'u'], +%smaxlen=%s)'b'{...}'u'{...}'b'[]'u'[]'b'(%s,)'u'(%s,)'b''u''b'_safe_repr:'u'_safe_repr:'b'pformat:'u'pformat:'Event loop using a proactor and related classes. + +A proactor is a "notify-on-completion" multiplexer. Currently a +proactor is only implemented on Windows with IOCP. +BaseProactorEventLoop_set_socket_extrasocknamegetsockname() failed on %rpeername_ProactorBasePipeTransportBaseTransportBase class for pipe and socket transports._set_extra_sock_server_read_fut_write_fut_pending_write_conn_lost_closing_eof_writtenfd=read=write=write_bufsize=EOF writtenFatal error on pipe transport%r: %s_force_close_empty_waiterget_write_buffer_size_ProactorReadPipeTransportReadTransportTransport for read pipes._pending_data_paused_loop_reading%r pauses reading_data_received%r resumes reading_eof_received%r received EOFkeep_openFatal error: protocol.eof_received() call failed.BufferedProtocol_feed_data_to_buffered_protoFatal error: protocol.buffer_updated() call failed.'Fatal error: protocol.buffer_updated() ''call failed.'_proactorFatal read error on pipe transportRead error on pipe transport while closing_ProactorBaseWritePipeTransportWriteTransportTransport for write pipes.data argument must be a bytes-like object, not "data argument must be a bytes-like object, "write_eof() already calledunable to write; sendfile is in progresssocket.send() raised exception._loop_writing_maybe_pause_protocol_maybe_resume_protocolFatal write error on pipe transportcan_write_eof_make_empty_waiterEmpty waiter is already set_reset_empty_waiter_ProactorWritePipeTransport_pipe_closed_ProactorDatagramTransportmax_sizedata argument must be bytes-like object (%r)Invalid address: must be None or socket.sendto() raised exception.error_receivedFatal write error on datagram transportdatagram_received_ProactorDuplexPipeTransportTransport for duplex pipes._ProactorSocketTransportTransport for connected sockets.proactorUsing proactor: %s_self_reading_future_accept_futuresset_loop_make_self_pipemain_thread_csock_make_duplex_pipe_transport_stop_accept_futures_close_self_pipenot a regular filefsize0xffff_ffffend_pos_ssock_loop_self_readingError on reading from the event loop self pipecsockFail to write a null byte into the self-pipe socket"Fail to write a null byte into the ""self-pipe socket"%r got a new connection from %r: %rAccept failed on a socketAccept failed on socket %r# UDP sockets may not have a peer name# None or bytearray.# Set when close() called.# only wake up the waiter when connection_made() has been called# XXX If there is a pending overlapped read on the other# end then it may fail with ERROR_NETNAME_DELETED if we# just close our end. First calling shutdown() seems to# cure it, but maybe using DisconnectEx() would be better.# bpo-33694: Don't cancel self._read_fut because cancelling an# overlapped WSASend() loss silently data with the current proactor# implementation.# If CancelIoEx() fails with ERROR_NOT_FOUND, it means that WSASend()# completed (even if HasOverlappedIoCompleted() returns 0), but# Overlapped.cancel() currently silently ignores the ERROR_NOT_FOUND# error. Once the overlapped is ignored, the IOCP loop will ignores the# completion I/O event and so not read the result of the overlapped# WSARecv().# Call the protocol methode after calling _loop_reading(),# since the protocol can decide to pause reading again.# Don't call any protocol method while reading is paused.# The protocol will be called on resume_reading().# deliver data later in "finally" clause# the future will be replaced by next proactor.recv call# since close() has been called we ignore any read data# we got end-of-file so no need to reschedule a new read# bpo-33694: buffer_updated() has currently no fast path because of# a data loss issue caused by overlapped WSASend() cancellation.# reschedule a new read# Observable states:# 1. IDLE: _write_fut and _buffer both None# 2. WRITING: _write_fut set; _buffer None# 3. BACKED UP: _write_fut set; _buffer a bytearray# We always copy the data, so the caller can't modify it# while we're still waiting for the I/O to happen.# IDLE -> WRITING# Pass a copy, except if it's already immutable.# WRITING -> BACKED UP# Make a mutable copy which we can extend.# BACKED UP# Append to buffer (also copies).# XXX most likely self._force_close() has been called, and# it has set self._write_fut to None.# Now that we've reduced the buffer size, tell the# protocol to resume writing if it was paused. Note that# we do this last since the callback is called immediately# and it may add more data to the buffer (even causing the# protocol to be paused again).# the transport has been closed# We don't need to call _protocol.connection_made() since our base# constructor does it for us.# The base constructor sets _buffer = None, so we set it here# Ensure that what we buffer is immutable.# No current write operations are active, kick one off# else: A write operation is already kicked off# We are in a _loop_writing() done callback, get the result# The connection has been closed# convenient alias# socket file descriptor => Future# wakeup fd can only be installed to a file descriptor from the main thread# We want connection_lost() to be called when other end closes# Call these methods before closing the event loop (before calling# BaseEventLoop.close), because they can schedule callbacks with# call_soon(), which is forbidden when the event loop is closed.# Close the event loop# empty file# A self-socket, really. :-)# may raise# When we scheduled this Future, we assigned it to# _self_reading_future. If it's not there now, something has# tried to cancel the loop while this callback was still in the# queue (see windows_events.ProactorEventLoop.run_forever). In# that case stop here instead of continuing to schedule a new# iteration.# _close_self_pipe() has been called, stop waiting for data# This may be called from a different thread, possibly after# _close_self_pipe() has been called or even while it is# running. Guard for self._csock being None or closed. When# a socket is closed, send() raises OSError (with errno set to# EBADF, but let's not rely on the exact error code).# Events are processed in the IocpProactor._poll() methodb'Event loop using a proactor and related classes. + +A proactor is a "notify-on-completion" multiplexer. Currently a +proactor is only implemented on Windows with IOCP. +'u'Event loop using a proactor and related classes. + +A proactor is a "notify-on-completion" multiplexer. Currently a +proactor is only implemented on Windows with IOCP. +'b'BaseProactorEventLoop'u'BaseProactorEventLoop'b'sockname'u'sockname'b'getsockname() failed on %r'u'getsockname() failed on %r'b'peername'u'peername'b'Base class for pipe and socket transports.'u'Base class for pipe and socket transports.'b'fd='u'fd='b'read='u'read='b'write='u'write='b'write_bufsize='u'write_bufsize='b'EOF written'u'EOF written'b'pipe'u'pipe'b'Fatal error on pipe transport'u'Fatal error on pipe transport'b'%r: %s'u'%r: %s'b'Transport for read pipes.'u'Transport for read pipes.'b'%r pauses reading'u'%r pauses reading'b'%r resumes reading'u'%r resumes reading'b'%r received EOF'u'%r received EOF'b'Fatal error: protocol.eof_received() call failed.'u'Fatal error: protocol.eof_received() call failed.'b'Fatal error: protocol.buffer_updated() call failed.'u'Fatal error: protocol.buffer_updated() call failed.'b'Fatal read error on pipe transport'u'Fatal read error on pipe transport'b'Read error on pipe transport while closing'u'Read error on pipe transport while closing'b'Transport for write pipes.'u'Transport for write pipes.'b'data argument must be a bytes-like object, not 'u'data argument must be a bytes-like object, not 'b'write_eof() already called'u'write_eof() already called'b'unable to write; sendfile is in progress'u'unable to write; sendfile is in progress'b'socket.send() raised exception.'u'socket.send() raised exception.'b'Fatal write error on pipe transport'u'Fatal write error on pipe transport'b'Empty waiter is already set'u'Empty waiter is already set'b'data argument must be bytes-like object (%r)'u'data argument must be bytes-like object (%r)'b'Invalid address: must be None or 'u'Invalid address: must be None or 'b'socket.sendto() raised exception.'u'socket.sendto() raised exception.'b'Fatal write error on datagram transport'u'Fatal write error on datagram transport'b'Transport for duplex pipes.'u'Transport for duplex pipes.'b'Transport for connected sockets.'u'Transport for connected sockets.'b'Using proactor: %s'u'Using proactor: %s'b'not a regular file'u'not a regular file'b'Error on reading from the event loop self pipe'u'Error on reading from the event loop self pipe'b'loop'u'loop'b'Fail to write a null byte into the self-pipe socket'u'Fail to write a null byte into the self-pipe socket'b'%r got a new connection from %r: %r'u'%r got a new connection from %r: %r'b'Accept failed on a socket'u'Accept failed on a socket'b'Accept failed on socket %r'u'Accept failed on socket %r'u'asyncio.proactor_events'u'proactor_events'Implements ProcessPoolExecutor. + +The following diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | | | | Call Q | | Process | +| | +----------+ | | +-----------+ | Pool | +| | | ... | | | | ... | +---------+ +| | | 6 | => | | => | 5, call() | => | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | | | | 4, result | | | +| | | ... | | | | 3, except | | | ++----------+ +------------+ +--------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Result Q" +_baseFullmultiprocessing.queues_threads_wakeups_global_shutdown_ThreadWakeup_reader_writer_python_exitthread_wakeupEXTRA_QUEUED_CALLS_MAX_WINDOWS_WORKERS_RemoteTraceback_ExceptionWithTraceback +""" +%s"""_rebuild_exc_WorkItem_ResultItemwork_id_CallItem_SafeQueueSafe Queue set exception to the future object linked to a jobpending_work_items_on_queue_feeder_error +""" +{}"""work_item_get_chunks Iterates over zip()ed iterables in chunks. _process_chunk Processes a chunk of an iterable passed to map. + + Runs the function passed to map() on a chunk of the + iterable passed to map. + + This function is run in a separate process. + + _sendback_resultresult_queueSafely send back the given result or exception_process_workercall_queueEvaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A ctx.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A ctx.Queue of _ResultItems that will written + to by the worker. + initializer: A callable initializer, or None + initargs: A tuple of args for the initializer + Exception in initializer:call_item_add_call_item_to_queuework_idsFills call_queue with _WorkItems from pending_work_items. + + This function never blocks. + + Args: + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids + are consumed and the corresponding _WorkItems from + pending_work_items are transformed into _CallItems and put in + call_queue. + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems. + _queue_management_workerexecutor_referencework_ids_queueManages the communication between this process and the worker processes. + + This function is run in a local thread. + + Args: + executor_reference: A weakref.ref to the ProcessPoolExecutor that owns + this thread. Used to determine if the ProcessPoolExecutor has been + garbage collected and that this function can exit. + process: A list of the ctx.Process instances used as + workers. + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). + call_queue: A ctx.Queue that will be filled with _CallItems + derived from _WorkItems for processing by the process workers. + result_queue: A ctx.SimpleQueue of _ResultItems generated by the + process workers. + thread_wakeup: A _ThreadWakeup to allow waking up the + queue_manager_thread from the main Thread and avoid deadlocks + caused by permanently locked queues. + shutting_down_shutdown_threadshutdown_workern_children_aliven_children_to_stopn_sentinels_sentresult_readerwakeup_readerreadersworker_sentinelscauseis_brokenresult_itemA child process terminated abruptly, the process pool is not usable anymore'A child process terminated ''abruptly, the process pool is not ''usable anymore'_brokenBrokenProcessPoolA process in the process pool was terminated abruptly while the future was running or pending."A process in the process pool was ""terminated abruptly while the future was ""running or pending."bpe +''' +_system_limits_checked_system_limited_check_system_limitsSC_SEM_NSEMS_MAXnsems_maxsystem provides too few semaphores (%d available, 256 necessary)"system provides too few semaphores (%d"" available, 256 necessary)"_chain_from_iterable_of_lists + Specialized implementation of itertools.chain.from_iterable. + Each item in *iterable* should be a list. This function is + careful not to keep references to yielded objects. + + Raised when a process in a ProcessPoolExecutor terminated abruptly + while a future was in the running state. + max_workersmp_contextInitializes a new ProcessPoolExecutor instance. + + Args: + max_workers: The maximum number of processes that can be used to + execute the given calls. If None or not given then as many + worker processes will be created as the machine has processors. + mp_context: A multiprocessing context to launch the workers. This + object should provide SimpleQueue, Queue and Process. + initializer: A callable used to initialize worker processes. + initargs: A tuple of arguments to pass to the initializer. + _max_workersmax_workers must be greater than 0max_workers must be <= _mp_contextinitializer must be a callable_initializer_initargs_queue_management_thread_processes_shutdown_lock_queue_count_pending_work_itemsqueue_size_call_queue_ignore_epipe_result_queue_work_ids_queue_management_thread_wakeup_start_queue_management_threadweakref_cbExecutor collected: triggering callback for QueueManager wakeup'Executor collected: triggering callback for'' QueueManager wakeup'_adjust_process_countQueueManagerThreaddescriptor 'submit' of 'ProcessPoolExecutor' object needs an argument"descriptor 'submit' of 'ProcessPoolExecutor' object "cannot schedule new futures after shutdowncannot schedule new futures after interpreter shutdown'cannot schedule new futures after ''interpreter shutdown'Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: If greater than one, the iterables will be chopped into + chunks of size chunksize and submitted to the process pool. + If set to one, the items in the list will be sent one at a time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + chunksize must be >= 1.# Workers are created as daemon threads and processes. This is done to allow the# interpreter to exit when there are still idle processes in a# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,# allowing workers to die with the interpreter has two undesirable properties:# - The workers would still be running during interpreter shutdown,# meaning that they would fail in unpredictable ways.# - The workers could be killed while evaluating a work item, which could# be bad if the callable being evaluated has external side-effects e.g.# writing to a file.# To work around this problem, an exit handler is installed which tells the# workers to exit when their work queues are empty and then waits until the# threads/processes finish.# Controls how many more calls than processes will be queued in the call queue.# A smaller number will mean that processes spend more time idle waiting for# work while a larger number will make Future.cancel() succeed less frequently# (Futures in the call queue cannot be cancelled).# On Windows, WaitForMultipleObjects is used to wait for processes to finish.# It can wait on, at most, 63 objects. There is an overhead of two objects:# - the result queue reader# - the thread wakeup reader# Hack to embed stringification of remote traceback in local traceback# work_item can be None if another process terminated. In this case,# the queue_manager_thread fails all work_items with BrokenProcessPool# The parent will notice that the process stopped and# mark the pool broken# Wake up queue management thread# Liberate the resource as soon as possible, to avoid holding onto# open files or shared memory that is not needed anymore# This is an upper bound on the number of children alive.# Send the right number of sentinels, to make sure all children are# properly terminated.# Release the queue's resources as soon as possible.# If .join() is not called on the created processes then# some ctx.Queue methods may deadlock on Mac OS X.# Wait for a result to be ready in the result_queue while checking# that all worker processes are still running, or for a wake up# signal send. The wake up signals come either from new tasks being# submitted, from the executor being shutdown/gc-ed, or from the# shutdown of the python interpreter.# Mark the process pool broken so that submits fail right now.# All futures in flight must be marked failed# Delete references to object. See issue16284# Terminate remaining workers forcibly: the queues or their# locks may be in a dirty state and block forever.# Clean shutdown of a worker using its PID# (avoids marking the executor broken)# work_item can be None if another process terminated (see above)# Delete reference to result_item# Check whether we should start shutting down.# No more work items can be added if:# - The interpreter is shutting down OR# - The executor that owns this worker has been collected OR# - The executor that owns this worker has been shutdown.# Flag the executor as shutting down as early as possible if it# is not gc-ed yet.# Since no new work items can be added, it is safe to shutdown# this thread if there are no pending work items.# This is not a problem: we will eventually be woken up (in# result_queue.get()) and be able to send a sentinel again.# sysconf not available or setting not available# indetermined limit, assume that limit is determined# by available memory only# minimum number of semaphores available# according to POSIX# Management thread# Map of pids to processes# Shutdown is a two-step process.# Create communication channels for the executor# Make the call queue slightly larger than the number of processes to# prevent the worker processes from idling. But don't make it too big# because futures in the call queue cannot be cancelled.# Killed worker processes can produce spurious "broken pipe"# tracebacks in the queue's own worker thread. But we detect killed# processes anyway, so silence the tracebacks.# _ThreadWakeup is a communication channel used to interrupt the wait# of the main loop of queue_manager_thread from another thread (e.g.# when calling executor.submit or executor.shutdown). We do not use the# _result_queue to send the wakeup signal to the queue_manager_thread# as it could result in a deadlock if a worker process dies with the# _result_queue write lock still acquired.# When the executor gets garbarge collected, the weakref callback# will wake up the queue management thread so that it can terminate# if there is no pending work item.# Start the processes so that their sentinels are known.# To reduce the risk of opening too many files, remove references to# objects that use file descriptors.b'Implements ProcessPoolExecutor. + +The following diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | | | | Call Q | | Process | +| | +----------+ | | +-----------+ | Pool | +| | | ... | | | | ... | +---------+ +| | | 6 | => | | => | 5, call() | => | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | | | | 4, result | | | +| | | ... | | | | 3, except | | | ++----------+ +------------+ +--------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Result Q" +'u'Implements ProcessPoolExecutor. + +The following diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | | | | Call Q | | Process | +| | +----------+ | | +-----------+ | Pool | +| | | ... | | | | ... | +---------+ +| | | 6 | => | | => | 5, call() | => | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | | | | 4, result | | | +| | | ... | | | | 3, except | | | ++----------+ +------------+ +--------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Result Q" +'b' +""" +%s"""'u' +""" +%s"""'b'Safe Queue set exception to the future object linked to a job'u'Safe Queue set exception to the future object linked to a job'b' +""" +{}"""'u' +""" +{}"""'b' Iterates over zip()ed iterables in chunks. 'u' Iterates over zip()ed iterables in chunks. 'b' Processes a chunk of an iterable passed to map. + + Runs the function passed to map() on a chunk of the + iterable passed to map. + + This function is run in a separate process. + + 'u' Processes a chunk of an iterable passed to map. + + Runs the function passed to map() on a chunk of the + iterable passed to map. + + This function is run in a separate process. + + 'b'Safely send back the given result or exception'u'Safely send back the given result or exception'b'Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A ctx.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A ctx.Queue of _ResultItems that will written + to by the worker. + initializer: A callable initializer, or None + initargs: A tuple of args for the initializer + 'u'Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A ctx.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A ctx.Queue of _ResultItems that will written + to by the worker. + initializer: A callable initializer, or None + initargs: A tuple of args for the initializer + 'b'Exception in initializer:'u'Exception in initializer:'b'Fills call_queue with _WorkItems from pending_work_items. + + This function never blocks. + + Args: + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids + are consumed and the corresponding _WorkItems from + pending_work_items are transformed into _CallItems and put in + call_queue. + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems. + 'u'Fills call_queue with _WorkItems from pending_work_items. + + This function never blocks. + + Args: + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids + are consumed and the corresponding _WorkItems from + pending_work_items are transformed into _CallItems and put in + call_queue. + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems. + 'b'Manages the communication between this process and the worker processes. + + This function is run in a local thread. + + Args: + executor_reference: A weakref.ref to the ProcessPoolExecutor that owns + this thread. Used to determine if the ProcessPoolExecutor has been + garbage collected and that this function can exit. + process: A list of the ctx.Process instances used as + workers. + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). + call_queue: A ctx.Queue that will be filled with _CallItems + derived from _WorkItems for processing by the process workers. + result_queue: A ctx.SimpleQueue of _ResultItems generated by the + process workers. + thread_wakeup: A _ThreadWakeup to allow waking up the + queue_manager_thread from the main Thread and avoid deadlocks + caused by permanently locked queues. + 'u'Manages the communication between this process and the worker processes. + + This function is run in a local thread. + + Args: + executor_reference: A weakref.ref to the ProcessPoolExecutor that owns + this thread. Used to determine if the ProcessPoolExecutor has been + garbage collected and that this function can exit. + process: A list of the ctx.Process instances used as + workers. + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). + call_queue: A ctx.Queue that will be filled with _CallItems + derived from _WorkItems for processing by the process workers. + result_queue: A ctx.SimpleQueue of _ResultItems generated by the + process workers. + thread_wakeup: A _ThreadWakeup to allow waking up the + queue_manager_thread from the main Thread and avoid deadlocks + caused by permanently locked queues. + 'b'A child process terminated abruptly, the process pool is not usable anymore'u'A child process terminated abruptly, the process pool is not usable anymore'b'A process in the process pool was terminated abruptly while the future was running or pending.'u'A process in the process pool was terminated abruptly while the future was running or pending.'b' +''' +'u' +''' +'b'SC_SEM_NSEMS_MAX'u'SC_SEM_NSEMS_MAX'b'system provides too few semaphores (%d available, 256 necessary)'u'system provides too few semaphores (%d available, 256 necessary)'b' + Specialized implementation of itertools.chain.from_iterable. + Each item in *iterable* should be a list. This function is + careful not to keep references to yielded objects. + 'u' + Specialized implementation of itertools.chain.from_iterable. + Each item in *iterable* should be a list. This function is + careful not to keep references to yielded objects. + 'b' + Raised when a process in a ProcessPoolExecutor terminated abruptly + while a future was in the running state. + 'u' + Raised when a process in a ProcessPoolExecutor terminated abruptly + while a future was in the running state. + 'b'Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: The maximum number of processes that can be used to + execute the given calls. If None or not given then as many + worker processes will be created as the machine has processors. + mp_context: A multiprocessing context to launch the workers. This + object should provide SimpleQueue, Queue and Process. + initializer: A callable used to initialize worker processes. + initargs: A tuple of arguments to pass to the initializer. + 'u'Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: The maximum number of processes that can be used to + execute the given calls. If None or not given then as many + worker processes will be created as the machine has processors. + mp_context: A multiprocessing context to launch the workers. This + object should provide SimpleQueue, Queue and Process. + initializer: A callable used to initialize worker processes. + initargs: A tuple of arguments to pass to the initializer. + 'b'max_workers must be greater than 0'u'max_workers must be greater than 0'b'max_workers must be <= 'u'max_workers must be <= 'b'initializer must be a callable'u'initializer must be a callable'b'Executor collected: triggering callback for QueueManager wakeup'u'Executor collected: triggering callback for QueueManager wakeup'b'QueueManagerThread'u'QueueManagerThread'b'descriptor 'submit' of 'ProcessPoolExecutor' object needs an argument'u'descriptor 'submit' of 'ProcessPoolExecutor' object needs an argument'b'cannot schedule new futures after shutdown'u'cannot schedule new futures after shutdown'b'cannot schedule new futures after interpreter shutdown'u'cannot schedule new futures after interpreter shutdown'b'Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: If greater than one, the iterables will be chopped into + chunks of size chunksize and submitted to the process pool. + If set to one, the items in the list will be sent one at a time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + 'u'Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: If greater than one, the iterables will be chopped into + chunks of size chunksize and submitted to the process pool. + If set to one, the items in the list will be sent one at a time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + 'b'chunksize must be >= 1.'u'chunksize must be >= 1.'u'concurrent.futures.process'u'futures.process'u'process'ORIGINAL_DIR + Return process object representing the current process + _current_process + Return list of process objects corresponding to live child processes + _cleanup + Return process object representing the parent process + _parent_process_popen + Process objects represent activity that is run in a separate process + + The class is analogous to `threading.Thread` + group argument must be None for now_process_counter_identity_config_parent_pid_parent_nameprocess object is closed + Method to be run in sub-process; can be overridden in sub-class + + Start child process + cannot start a process twicecan only start a process object created by current processdaemonic processes are not allowed to have children + Terminate process; sends SIGTERM signal or uses TerminateProcess() + + Terminate process; sends SIGKILL signal or uses TerminateProcess() + + Wait until child process terminates + can only join a child processcan only join a started process + Return whether process is alive + can only test a child process + Close the Process object. + + This method releases resources held by the Process object. It is + an error to call this method if the child process is still running. + Cannot close a process while it is still running. You should first call join() or terminate()."Cannot close a process while it is still running. ""You should first call join() or terminate()." + Return whether process is a daemon + daemonic + Set whether process is a daemon + process has already started + Set authorization key of process + AuthenticationStringexitcode + Return exit code of process or `None` if it has yet to stop + + Return identifier (PID) of process or `None` if it has yet to start + + Return a file descriptor (Unix) or handle (Windows) suitable for + waiting for process termination. + process not startedstoppedpid=%sparent=%s_exitcode_to_nameexitcode=%sold_process_ParentProcess_HAVE_THREAD_NATIVE_ID_set_native_id_finalizer_registry_run_after_forkerschild process calling self.run()_exit_functionProcess %s: +_shutdownprocess exiting with exitcode %d_flush_std_streamsPickling an AuthenticationString object is disallowed for security reasons'Pickling an AuthenticationString object is ''disallowed for security reasons' + Wait until parent process terminates + _MainProcess/mpsemprefixSIG# Module providing the `Process` class which emulates `threading.Thread`# multiprocessing/process.py# check for processes which have finished# The `Process` class# Avoid a refcycle if the target function holds an indirect# reference to the process object (see bpo-30775)# delay finalization of the old process object until after# _run_after_forkers() is executed# We subclass bytes to avoid accidental transmission of auth keys over network# Create object representing the parent process# Create object representing the main process# Note that some versions of FreeBSD only allow named# semaphores to have names of up to 14 characters. Therefore# we choose a short prefix.# On MacOSX in a sandbox it may be necessary to use a# different prefix -- see #19478.# Everything in self._config will be inherited by descendant# processes.# Give names to some return codes# For debug and leak testingb'BaseProcess'u'BaseProcess'b'current_process'u'current_process'b'active_children'u'active_children'b'parent_process'u'parent_process'b' + Return process object representing the current process + 'u' + Return process object representing the current process + 'b' + Return list of process objects corresponding to live child processes + 'u' + Return list of process objects corresponding to live child processes + 'b' + Return process object representing the parent process + 'u' + Return process object representing the parent process + 'b' + Process objects represent activity that is run in a separate process + + The class is analogous to `threading.Thread` + 'u' + Process objects represent activity that is run in a separate process + + The class is analogous to `threading.Thread` + 'b'group argument must be None for now'u'group argument must be None for now'b'process object is closed'u'process object is closed'b' + Method to be run in sub-process; can be overridden in sub-class + 'u' + Method to be run in sub-process; can be overridden in sub-class + 'b' + Start child process + 'u' + Start child process + 'b'cannot start a process twice'u'cannot start a process twice'b'can only start a process object created by current process'u'can only start a process object created by current process'b'daemonic processes are not allowed to have children'u'daemonic processes are not allowed to have children'b' + Terminate process; sends SIGTERM signal or uses TerminateProcess() + 'u' + Terminate process; sends SIGTERM signal or uses TerminateProcess() + 'b' + Terminate process; sends SIGKILL signal or uses TerminateProcess() + 'u' + Terminate process; sends SIGKILL signal or uses TerminateProcess() + 'b' + Wait until child process terminates + 'u' + Wait until child process terminates + 'b'can only join a child process'u'can only join a child process'b'can only join a started process'u'can only join a started process'b' + Return whether process is alive + 'u' + Return whether process is alive + 'b'can only test a child process'u'can only test a child process'b' + Close the Process object. + + This method releases resources held by the Process object. It is + an error to call this method if the child process is still running. + 'u' + Close the Process object. + + This method releases resources held by the Process object. It is + an error to call this method if the child process is still running. + 'b'Cannot close a process while it is still running. You should first call join() or terminate().'u'Cannot close a process while it is still running. You should first call join() or terminate().'b' + Return whether process is a daemon + 'u' + Return whether process is a daemon + 'b' + Set whether process is a daemon + 'u' + Set whether process is a daemon + 'b'process has already started'u'process has already started'b'authkey'u'authkey'b' + Set authorization key of process + 'u' + Set authorization key of process + 'b' + Return exit code of process or `None` if it has yet to stop + 'u' + Return exit code of process or `None` if it has yet to stop + 'b' + Return identifier (PID) of process or `None` if it has yet to start + 'u' + Return identifier (PID) of process or `None` if it has yet to start + 'b' + Return a file descriptor (Unix) or handle (Windows) suitable for + waiting for process termination. + 'u' + Return a file descriptor (Unix) or handle (Windows) suitable for + waiting for process termination. + 'b'process not started'u'process not started'b'started'u'started'b'initial'u'initial'b'stopped'u'stopped'b'pid=%s'u'pid=%s'b'parent=%s'u'parent=%s'b'exitcode=%s'u'exitcode=%s'b'child process calling self.run()'u'child process calling self.run()'b'Process %s: +'u'Process %s: +'b'process exiting with exitcode %d'u'process exiting with exitcode %d'b'Pickling an AuthenticationString object is disallowed for security reasons'u'Pickling an AuthenticationString object is disallowed for security reasons'b' + Wait until parent process terminates + 'u' + Wait until parent process terminates + 'b'/mp'u'/mp'b'semprefix'u'semprefix'b'SIG'u'SIG'Abstract Protocol base classes.DatagramProtocolSubprocessProtocolCommon base class for protocol interfaces. + + Usually user implements protocols that derived from BaseProtocol + like Protocol or ProcessProtocol. + + The only case when BaseProtocol should be implemented directly is + write-only transport like write pipe + Called when a connection is made. + + The argument is the transport representing the pipe connection. + To receive data, wait for data_received() calls. + When the connection is closed, connection_lost() is called. + Called when the connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + Called when the transport's buffer goes over the high-water mark. + + Pause and resume calls are paired -- pause_writing() is called + once when the buffer goes strictly over the high-water mark + (even if subsequent writes increases the buffer size even + more), and eventually resume_writing() is called once when the + buffer size reaches the low-water mark. + + Note that if the buffer size equals the high-water mark, + pause_writing() is not called -- it must go strictly over. + Conversely, resume_writing() is called when the buffer size is + equal or lower than the low-water mark. These end conditions + are important to ensure that things go as expected when either + mark is zero. + + NOTE: This is the only Protocol callback that is not called + through EventLoop.call_soon() -- if it were, it would have no + effect when it's most needed (when the app keeps writing + without yielding until pause_writing() is called). + Called when the transport's buffer drains below the low-water mark. + + See pause_writing() for details. + Interface for stream protocol. + + The user should implement this interface. They can inherit from + this class but don't need to. The implementations here do + nothing (they don't raise exceptions). + + When the user wants to requests a transport, they pass a protocol + factory to a utility function (e.g., EventLoop.create_connection()). + + When the connection is made successfully, connection_made() is + called with a suitable transport object. Then data_received() + will be called 0 or more times with data (bytes) received from the + transport; finally, connection_lost() will be called exactly once + with either an exception object or None as an argument. + + State machine of calls: + + start -> CM [-> DR*] [-> ER?] -> CL -> end + + * CM: connection_made() + * DR: data_received() + * ER: eof_received() + * CL: connection_lost() + Called when some data is received. + + The argument is a bytes object. + Called when the other end calls write_eof() or equivalent. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + Interface for stream protocol with manual buffer control. + + Important: this has been added to asyncio in Python 3.7 + *on a provisional basis*! Consider it as an experimental API that + might be changed or removed in Python 3.8. + + Event methods, such as `create_server` and `create_connection`, + accept factories that return protocols that implement this interface. + + The idea of BufferedProtocol is that it allows to manually allocate + and control the receive buffer. Event loops can then use the buffer + provided by the protocol to avoid unnecessary data copies. This + can result in noticeable performance improvement for protocols that + receive big amounts of data. Sophisticated protocols can allocate + the buffer only once at creation time. + + State machine of calls: + + start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end + + * CM: connection_made() + * GB: get_buffer() + * BU: buffer_updated() + * ER: eof_received() + * CL: connection_lost() + get_bufferCalled to allocate a new receive buffer. + + *sizehint* is a recommended minimal size for the returned + buffer. When set to -1, the buffer size can be arbitrary. + + Must return an object that implements the + :ref:`buffer protocol `. + It is an error to return a zero-sized buffer. + buffer_updatedCalled when the buffer was updated with the received data. + + *nbytes* is the total number of bytes that were written to + the buffer. + Interface for datagram protocol.Called when some datagram is received.Called when a send or receive operation raises an OSError. + + (Other than BlockingIOError or InterruptedError.) + Interface for protocol for subprocess calls.Called when the subprocess writes data into stdout/stderr pipe. + + fd is int file descriptor. + data is bytes object. + Called when a file descriptor associated with the child process is + closed. + + fd is the int file descriptor that was closed. + Called when subprocess has exited.data_lenbuf_lenget_buffer() returned an empty bufferb'Abstract Protocol base classes.'u'Abstract Protocol base classes.'b'BaseProtocol'u'BaseProtocol'b'Protocol'u'Protocol'b'DatagramProtocol'u'DatagramProtocol'b'SubprocessProtocol'u'SubprocessProtocol'b'BufferedProtocol'u'BufferedProtocol'b'Common base class for protocol interfaces. + + Usually user implements protocols that derived from BaseProtocol + like Protocol or ProcessProtocol. + + The only case when BaseProtocol should be implemented directly is + write-only transport like write pipe + 'u'Common base class for protocol interfaces. + + Usually user implements protocols that derived from BaseProtocol + like Protocol or ProcessProtocol. + + The only case when BaseProtocol should be implemented directly is + write-only transport like write pipe + 'b'Called when a connection is made. + + The argument is the transport representing the pipe connection. + To receive data, wait for data_received() calls. + When the connection is closed, connection_lost() is called. + 'u'Called when a connection is made. + + The argument is the transport representing the pipe connection. + To receive data, wait for data_received() calls. + When the connection is closed, connection_lost() is called. + 'b'Called when the connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + 'u'Called when the connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + 'b'Called when the transport's buffer goes over the high-water mark. + + Pause and resume calls are paired -- pause_writing() is called + once when the buffer goes strictly over the high-water mark + (even if subsequent writes increases the buffer size even + more), and eventually resume_writing() is called once when the + buffer size reaches the low-water mark. + + Note that if the buffer size equals the high-water mark, + pause_writing() is not called -- it must go strictly over. + Conversely, resume_writing() is called when the buffer size is + equal or lower than the low-water mark. These end conditions + are important to ensure that things go as expected when either + mark is zero. + + NOTE: This is the only Protocol callback that is not called + through EventLoop.call_soon() -- if it were, it would have no + effect when it's most needed (when the app keeps writing + without yielding until pause_writing() is called). + 'u'Called when the transport's buffer goes over the high-water mark. + + Pause and resume calls are paired -- pause_writing() is called + once when the buffer goes strictly over the high-water mark + (even if subsequent writes increases the buffer size even + more), and eventually resume_writing() is called once when the + buffer size reaches the low-water mark. + + Note that if the buffer size equals the high-water mark, + pause_writing() is not called -- it must go strictly over. + Conversely, resume_writing() is called when the buffer size is + equal or lower than the low-water mark. These end conditions + are important to ensure that things go as expected when either + mark is zero. + + NOTE: This is the only Protocol callback that is not called + through EventLoop.call_soon() -- if it were, it would have no + effect when it's most needed (when the app keeps writing + without yielding until pause_writing() is called). + 'b'Called when the transport's buffer drains below the low-water mark. + + See pause_writing() for details. + 'u'Called when the transport's buffer drains below the low-water mark. + + See pause_writing() for details. + 'b'Interface for stream protocol. + + The user should implement this interface. They can inherit from + this class but don't need to. The implementations here do + nothing (they don't raise exceptions). + + When the user wants to requests a transport, they pass a protocol + factory to a utility function (e.g., EventLoop.create_connection()). + + When the connection is made successfully, connection_made() is + called with a suitable transport object. Then data_received() + will be called 0 or more times with data (bytes) received from the + transport; finally, connection_lost() will be called exactly once + with either an exception object or None as an argument. + + State machine of calls: + + start -> CM [-> DR*] [-> ER?] -> CL -> end + + * CM: connection_made() + * DR: data_received() + * ER: eof_received() + * CL: connection_lost() + 'u'Interface for stream protocol. + + The user should implement this interface. They can inherit from + this class but don't need to. The implementations here do + nothing (they don't raise exceptions). + + When the user wants to requests a transport, they pass a protocol + factory to a utility function (e.g., EventLoop.create_connection()). + + When the connection is made successfully, connection_made() is + called with a suitable transport object. Then data_received() + will be called 0 or more times with data (bytes) received from the + transport; finally, connection_lost() will be called exactly once + with either an exception object or None as an argument. + + State machine of calls: + + start -> CM [-> DR*] [-> ER?] -> CL -> end + + * CM: connection_made() + * DR: data_received() + * ER: eof_received() + * CL: connection_lost() + 'b'Called when some data is received. + + The argument is a bytes object. + 'u'Called when some data is received. + + The argument is a bytes object. + 'b'Called when the other end calls write_eof() or equivalent. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + 'u'Called when the other end calls write_eof() or equivalent. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + 'b'Interface for stream protocol with manual buffer control. + + Important: this has been added to asyncio in Python 3.7 + *on a provisional basis*! Consider it as an experimental API that + might be changed or removed in Python 3.8. + + Event methods, such as `create_server` and `create_connection`, + accept factories that return protocols that implement this interface. + + The idea of BufferedProtocol is that it allows to manually allocate + and control the receive buffer. Event loops can then use the buffer + provided by the protocol to avoid unnecessary data copies. This + can result in noticeable performance improvement for protocols that + receive big amounts of data. Sophisticated protocols can allocate + the buffer only once at creation time. + + State machine of calls: + + start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end + + * CM: connection_made() + * GB: get_buffer() + * BU: buffer_updated() + * ER: eof_received() + * CL: connection_lost() + 'u'Interface for stream protocol with manual buffer control. + + Important: this has been added to asyncio in Python 3.7 + *on a provisional basis*! Consider it as an experimental API that + might be changed or removed in Python 3.8. + + Event methods, such as `create_server` and `create_connection`, + accept factories that return protocols that implement this interface. + + The idea of BufferedProtocol is that it allows to manually allocate + and control the receive buffer. Event loops can then use the buffer + provided by the protocol to avoid unnecessary data copies. This + can result in noticeable performance improvement for protocols that + receive big amounts of data. Sophisticated protocols can allocate + the buffer only once at creation time. + + State machine of calls: + + start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end + + * CM: connection_made() + * GB: get_buffer() + * BU: buffer_updated() + * ER: eof_received() + * CL: connection_lost() + 'b'Called to allocate a new receive buffer. + + *sizehint* is a recommended minimal size for the returned + buffer. When set to -1, the buffer size can be arbitrary. + + Must return an object that implements the + :ref:`buffer protocol `. + It is an error to return a zero-sized buffer. + 'u'Called to allocate a new receive buffer. + + *sizehint* is a recommended minimal size for the returned + buffer. When set to -1, the buffer size can be arbitrary. + + Must return an object that implements the + :ref:`buffer protocol `. + It is an error to return a zero-sized buffer. + 'b'Called when the buffer was updated with the received data. + + *nbytes* is the total number of bytes that were written to + the buffer. + 'u'Called when the buffer was updated with the received data. + + *nbytes* is the total number of bytes that were written to + the buffer. + 'b'Interface for datagram protocol.'u'Interface for datagram protocol.'b'Called when some datagram is received.'u'Called when some datagram is received.'b'Called when a send or receive operation raises an OSError. + + (Other than BlockingIOError or InterruptedError.) + 'u'Called when a send or receive operation raises an OSError. + + (Other than BlockingIOError or InterruptedError.) + 'b'Interface for protocol for subprocess calls.'u'Interface for protocol for subprocess calls.'b'Called when the subprocess writes data into stdout/stderr pipe. + + fd is int file descriptor. + data is bytes object. + 'u'Called when the subprocess writes data into stdout/stderr pipe. + + fd is int file descriptor. + data is bytes object. + 'b'Called when a file descriptor associated with the child process is + closed. + + fd is the int file descriptor that was closed. + 'u'Called when a file descriptor associated with the child process is + closed. + + fd is the int file descriptor that was closed. + 'b'Called when subprocess has exited.'u'Called when subprocess has exited.'b'get_buffer() returned an empty buffer'u'get_buffer() returned an empty buffer'u'asyncio.protocols'u'protocols'u'This module provides access to the Unix password database. +It is available on all Unix versions. + +Password database entries are reported as 7-tuples containing the following +items from the password database (see `'), in order: +pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell. +The uid and gid items are integers, all others are strings. An +exception is raised if the entry asked for cannot be found.'getpwallu'pwd.struct_passwd: Results from getpw*() routines. + +This object may be accessed either as a tuple of + (pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell) +or via the object attributes as named in the above tuple.'pw_gecospw_gidpw_namepw_passwdpw_shellpw_uidpwd.struct_passwdstruct_passwdRoutine to "compile" a .py file to a .pyc file. + +This module has intimate knowledge of the format of .pyc files. +PyCompileErrorPycInvalidationModeException raised when an error occurs while attempting to + compile the file. + + To raise this exception, use + + raise PyCompileError(exc_type,exc_value,file[,msg]) + + where + + exc_type: exception type to be used in error message + type name can be accesses as class variable + 'exc_type_name' + + exc_value: exception value to be used in error message + can be accesses as class variable 'exc_value' + + file: name of file being compiled to be used in error message + can be accesses as class variable 'file' + + msg: string message to be written as error message + If no value is given, a default exception message will be + given, consistent with 'standard' py_compile output. + message (or default) can be accesses as class variable + 'msg' + + exc_type_nametbtextFile ""File "%s"Sorry: %s: %sTIMESTAMPCHECKED_HASHUNCHECKED_HASH_get_default_invalidation_modeSOURCE_DATE_EPOCHcfiledfiledoraiseinvalidation_modeByte-compile one Python source file to Python bytecode. + + :param file: The source file name. + :param cfile: The target byte compiled file name. When not given, this + defaults to the PEP 3147/PEP 488 location. + :param dfile: Purported file name, i.e. the file name that shows up in + error messages. Defaults to the source file name. + :param doraise: Flag indicating whether or not an exception should be + raised when a compile error is found. If an exception occurs and this + flag is set to False, a string indicating the nature of the exception + will be printed, and the function will return to the caller. If an + exception occurs and this flag is set to True, a PyCompileError + exception will be raised. + :param optimize: The optimization level for the compiler. Valid values + are -1, 0, 1 and 2. A value of -1 means to use the optimization + level of the current interpreter, as given by -O command line options. + :param invalidation_mode: + :param quiet: Return full output with False or 0, errors only with 1, + and no output with 2. + + :return: Path to the resulting byte compiled file. + + Note that it isn't necessary to byte-compile Python modules for + execution efficiency -- Python itself byte-compiles a module when + it is loaded, and if it can, writes out the bytecode to the + corresponding .pyc file. + + However, if a Python installation is shared between users, it is a + good idea to byte-compile all modules upon installation, since + other users may not be able to write in the source directories, + and thus they won't be able to write the .pyc file, and then + they would be byte-compiling every module each time it is loaded. + This can slow down program start-up considerably. + + See compileall.py for a script/module that uses this module to + byte-compile all installed files (or all files in selected + directories). + + Do note that FileExistsError is raised if cfile ends up pointing at a + non-regular file or symlink. Because the compilation uses a file renaming, + the resulting file would be regular and thus not the same type of file as + it was previously. + {} is a symlink and will be changed into a regular file if import writes a byte-compiled file to it'{} is a symlink and will be changed into a regular file if ''import writes a byte-compiled file to it'{} is a non-regular file and will be changed into a regular one if import writes a byte-compiled file to it'{} is a non-regular file and will be changed into a regular ''one if import writes a byte-compiled file to it'py_excsource_statsCompile several source files. + + The files named in 'args' (or on the command line, if 'args' is + not specified) are compiled and the resulting bytecode is cached + in the normal manner. This function does not search a directory + structure to locate source files; it only compiles files named + explicitly. If '-' is the only parameter in args, the list of + files is taken from standard input. + + # return value to indicate at least one failureb'Routine to "compile" a .py file to a .pyc file. + +This module has intimate knowledge of the format of .pyc files. +'u'Routine to "compile" a .py file to a .pyc file. + +This module has intimate knowledge of the format of .pyc files. +'b'compile'u'compile'b'PyCompileError'u'PyCompileError'b'PycInvalidationMode'u'PycInvalidationMode'b'Exception raised when an error occurs while attempting to + compile the file. + + To raise this exception, use + + raise PyCompileError(exc_type,exc_value,file[,msg]) + + where + + exc_type: exception type to be used in error message + type name can be accesses as class variable + 'exc_type_name' + + exc_value: exception value to be used in error message + can be accesses as class variable 'exc_value' + + file: name of file being compiled to be used in error message + can be accesses as class variable 'file' + + msg: string message to be written as error message + If no value is given, a default exception message will be + given, consistent with 'standard' py_compile output. + message (or default) can be accesses as class variable + 'msg' + + 'u'Exception raised when an error occurs while attempting to + compile the file. + + To raise this exception, use + + raise PyCompileError(exc_type,exc_value,file[,msg]) + + where + + exc_type: exception type to be used in error message + type name can be accesses as class variable + 'exc_type_name' + + exc_value: exception value to be used in error message + can be accesses as class variable 'exc_value' + + file: name of file being compiled to be used in error message + can be accesses as class variable 'file' + + msg: string message to be written as error message + If no value is given, a default exception message will be + given, consistent with 'standard' py_compile output. + message (or default) can be accesses as class variable + 'msg' + + 'b'File ""'u'File ""'b'File "%s"'u'File "%s"'b'Sorry: %s: %s'u'Sorry: %s: %s'b'SOURCE_DATE_EPOCH'u'SOURCE_DATE_EPOCH'b'Byte-compile one Python source file to Python bytecode. + + :param file: The source file name. + :param cfile: The target byte compiled file name. When not given, this + defaults to the PEP 3147/PEP 488 location. + :param dfile: Purported file name, i.e. the file name that shows up in + error messages. Defaults to the source file name. + :param doraise: Flag indicating whether or not an exception should be + raised when a compile error is found. If an exception occurs and this + flag is set to False, a string indicating the nature of the exception + will be printed, and the function will return to the caller. If an + exception occurs and this flag is set to True, a PyCompileError + exception will be raised. + :param optimize: The optimization level for the compiler. Valid values + are -1, 0, 1 and 2. A value of -1 means to use the optimization + level of the current interpreter, as given by -O command line options. + :param invalidation_mode: + :param quiet: Return full output with False or 0, errors only with 1, + and no output with 2. + + :return: Path to the resulting byte compiled file. + + Note that it isn't necessary to byte-compile Python modules for + execution efficiency -- Python itself byte-compiles a module when + it is loaded, and if it can, writes out the bytecode to the + corresponding .pyc file. + + However, if a Python installation is shared between users, it is a + good idea to byte-compile all modules upon installation, since + other users may not be able to write in the source directories, + and thus they won't be able to write the .pyc file, and then + they would be byte-compiling every module each time it is loaded. + This can slow down program start-up considerably. + + See compileall.py for a script/module that uses this module to + byte-compile all installed files (or all files in selected + directories). + + Do note that FileExistsError is raised if cfile ends up pointing at a + non-regular file or symlink. Because the compilation uses a file renaming, + the resulting file would be regular and thus not the same type of file as + it was previously. + 'u'Byte-compile one Python source file to Python bytecode. + + :param file: The source file name. + :param cfile: The target byte compiled file name. When not given, this + defaults to the PEP 3147/PEP 488 location. + :param dfile: Purported file name, i.e. the file name that shows up in + error messages. Defaults to the source file name. + :param doraise: Flag indicating whether or not an exception should be + raised when a compile error is found. If an exception occurs and this + flag is set to False, a string indicating the nature of the exception + will be printed, and the function will return to the caller. If an + exception occurs and this flag is set to True, a PyCompileError + exception will be raised. + :param optimize: The optimization level for the compiler. Valid values + are -1, 0, 1 and 2. A value of -1 means to use the optimization + level of the current interpreter, as given by -O command line options. + :param invalidation_mode: + :param quiet: Return full output with False or 0, errors only with 1, + and no output with 2. + + :return: Path to the resulting byte compiled file. + + Note that it isn't necessary to byte-compile Python modules for + execution efficiency -- Python itself byte-compiles a module when + it is loaded, and if it can, writes out the bytecode to the + corresponding .pyc file. + + However, if a Python installation is shared between users, it is a + good idea to byte-compile all modules upon installation, since + other users may not be able to write in the source directories, + and thus they won't be able to write the .pyc file, and then + they would be byte-compiling every module each time it is loaded. + This can slow down program start-up considerably. + + See compileall.py for a script/module that uses this module to + byte-compile all installed files (or all files in selected + directories). + + Do note that FileExistsError is raised if cfile ends up pointing at a + non-regular file or symlink. Because the compilation uses a file renaming, + the resulting file would be regular and thus not the same type of file as + it was previously. + 'b'{} is a symlink and will be changed into a regular file if import writes a byte-compiled file to it'u'{} is a symlink and will be changed into a regular file if import writes a byte-compiled file to it'b'{} is a non-regular file and will be changed into a regular one if import writes a byte-compiled file to it'u'{} is a non-regular file and will be changed into a regular one if import writes a byte-compiled file to it'b''u''b'Compile several source files. + + The files named in 'args' (or on the command line, if 'args' is + not specified) are compiled and the resulting bytecode is cached + in the normal manner. This function does not search a directory + structure to locate source files; it only compiles files named + explicitly. If '-' is the only parameter in args, the list of + files is taken from standard input. + + 'u'Compile several source files. + + The files named in 'args' (or on the command line, if 'args' is + not specified) are compiled and the resulting bytecode is cached + in the normal manner. This function does not search a directory + structure to locate source files; it only compiles files named + explicitly. If '-' is the only parameter in args, the list of + files is taken from standard input. + + 'u'py_compile'Generate Python documentation in HTML or text for interactive use. + +At the Python interactive prompt, calling help(thing) on a Python object +documents the object, and calling help() starts up an interactive +help session. + +Or, at the shell command line outside of Python: + +Run "pydoc " to show documentation on something. may be +the name of a function, module, package, or a dotted reference to a +class or function within a module or module in a package. If the +argument contains a path segment delimiter (e.g. slash on Unix, +backslash on Windows) it is treated as the path to a Python source file. + +Run "pydoc -k " to search for a keyword in the synopsis lines +of all available modules. + +Run "pydoc -n " to start an HTTP server with the given +hostname (default: localhost) on the local machine. + +Run "pydoc -p " to start an HTTP server on the given port on the +local machine. Port number 0 can be used to get an arbitrary unused port. + +Run "pydoc -b" to start an HTTP server on an arbitrary unused port and +open a Web browser to interactively browse documentation. Combine with +the -n and -p options to control the hostname and port used. + +Run "pydoc -w " to write out the HTML documentation for a module +to a file named ".html". + +Module docs for core modules are assumed to be in + + https://docs.python.org/X.Y/library/ + +This can be overridden by setting the PYTHONDOCS environment variable +to a different URL or to a local directory containing the Library +Reference Manual pages. +26 February 2001Guido van Rossum, for an excellent programming language. +Tommy Burnette, the original creator of manpy. +Paul Prescod, for all his work on onlinehelp. +Richard Chamberlain, for the first implementation of textdoc. +__credits__ReprpathdirsConvert sys.path into a list of absolute, existing, unique paths.normdirsnormdirGet the doc string or comments for an object.^ * +splitdocSplit a doc string into a synopsis line (if any) and the rest.classnameGet a class name and qualify it with a module name if necessary.isdataCheck if an object is of a type that probably means it's data.Do a series of global replacements on a string.cramOmit part of a string if needed to make it fit in a maximum length.pre at 0x[0-9a-f]{6,16}(>+)$_re_stripidstripidRemove the hexadecimal id from a Python object representation._is_bound_method + Returns True if fn is a bound method, regardless of whether + fn was implemented in Python or in C. + allmethodscl_split_listSplit sequence s via predicate, and return pair ([true], [false]). + + The return value is a 2-tuple of lists, + ([x for x in s if predicate(x)], + [x for x in s if not predicate(x)]) + visiblenameDecide whether to show documentation on a variable.Wrap inspect.classify_class_attrs, with fixup for data descriptors.data descriptorreadonly propertysort_attributesSort the attrs list in-place by _fields and then alphabetically by namefield_orderkeyfuncispackageGuess whether a path refers to a package directory.source_synopsissynopsisGet the one-line summary out of a module file.lastupdateloader_cls__temp__ErrorDuringImportErrors that occurred while trying to import something to document it.problem in %s - %s: %simportfileImport a Python source file or compiled file given its path.is_bytecodesafeimportforceloadImport a module; handle errors; return None if the module isn't found. + + If the module *is* found but an exception occurs, it's wrapped in an + ErrorDuringImport exception and reraised. Unlike __import__, if a + package path is specified, the module at the end of the path is returned, + not the package at the beginning. If the optional 'forceload' argument + is 1, we reload the module from disk (unless it's a dynamic extension).subsDocPYTHONDOCShttps://docs.python.org/%d.%d/librarydocumentGenerate documentation for an object.docmoduledocclassdocroutinedocdatadocotherRaise an exception for unimplemented types.don't know how to document object%s of type %sdocpropertygetdoclocget_pathstdlibReturn the location of module docs or None(built-in)doclocsite-packagesxml.etreetest.pydoc_modhttp://https://%s/%sHTMLReprClass for safely making an HTML representation of a Python object.maxlistmaxtuplemaxdictmaxstringmaxotherrepr1repr_repr_stringtestrepr((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)\1repr_strrepr_instance<%s instance>repr_unicodeHTMLDocFormatter class for HTML documentation._repr_instancepageFormat an HTML page. +Python: %s + + +%s +fgcolbgcolFormat a page heading. + + +
 
+ 
%s
%s
+ preludemarginaliagapFormat a section with a heading.

+ + + + + + + + +
 
+%s
%s%s
%s
%s%s%s
bigsectionFormat a section with a big heading.%spreformatFormat literal preformatted text. + +
+multicolumnFormat a list of items into a multi-column list.rows%s
grey%snamelinkMake a link for an identifier, given name-to-URL mappings.
%sclasslinkMake a link for a class.%smodulelinkMake a link for a module.%smodpkglinkmodpkginfoMake a link for a module or package to display in an index.shadowed%s.%s.html%s.html%s (package)filelinkMake a link to source file.%smarkupMark up some plain text, given a context of symbols to look for. + Each context dictionary maps object names to anchor names.here\b((http|ftp)://\S+[\w/]|RFC[- ]?(\d+)|PEP[- ]?(\d+)|(self\.)?(\w+))r'\b((http|ftp)://\S+[\w/]|'r'RFC[- ]?(\d+)|'r'PEP[- ]?(\d+)|'r'(self\.)?(\w+))'rfcpepselfdothttp://www.rfc-editor.org/rfc/rfc%d.txthttp://www.python.org/dev/peps/pep-%04d/self.self.%sformattreeProduce HTML for a class tree as given by inspect.getclasstree().

+
+%s
+
+%s
+Produce HTML documentation for a module object.links%slinkedname%sRevision: version %s
Module Reference#ffffff#7799eeindex
cdict.html#fdict#-%s

%s

+modpkgsPackage Contents#aa55ccModulesclasslistClasses#ee77aaFunctions#eeaa77#55aa55AuthorCreditsProduce HTML documentation for a class object.realnameHorizontalRuleneedonemaybe
+
Method resolution order:
+
%s
+
+spillmdictspilldescriptorsspilldata
%s
+
%s
%s%s
+thisclassinheriteddefined hereinherited from %s:
+Methods %sClass methods %sStatic methods %sReadonly properties %sData descriptors %sData and other attributes %sclass %s%s = class %sdeclargspec%s
 
#000000#ffc8d8Format an argument default value as text.Produce HTML documentation for a function or method object.noteskipdocsimclass from method of %s instance unbound %s methodasync asyncqualifier%s%sreallink%s = %s%s lambda (...)%s
%s
+
%s
%s
%s
+Produce html documentation for a data descriptor.
%s
+
%s
+Produce HTML documentation for a data object.%s = lhsGenerate an HTML index for a directory of modules.TextReprClass for safely making a text representation of a Python object.TextDocFormatter class for text documentation.boldFormat a string in bold by overstriking.Indent text by prepending a given prefix to each line.Format a section with a given heading.clean_contentsRender in text a class tree as returned by inspect.getclasstree().Produce text documentation for a given module object.synop - MODULE REFERENCE + +The following documentation is automatically generated from the Python +source files. It may be incomplete, incorrect or include features that +are considered implementation detail and may vary between Python +implementations. When in doubt, consult the module reference at the +location listed above. +DESCRIPTIONmodpkgs_names (package)PACKAGE CONTENTSsubmodulesSUBMODULESCLASSESFUNCTIONSDATAAUTHORCREDITSFILEProduce text documentation for a given class object.makenameclass = class Method resolution order:subclassesno_of_subclassesMAX_SUBCLASSES_TO_DISPLAYBuilt-in subclasses:subclassname ... and other subclassesMethods %s: +Class methods %s: +Static methods %s: +Readonly properties %s: +Data descriptors %s: +Data and other attributes %s: + | Produce text documentation for a function or method object. = lambda Produce text documentation for a data descriptor.Produce text documentation for a data object.chop_PlainTextDocSubclass of TextDoc which overrides string stylingThe first time this is called, determine what kind of pager to use.getpagerDecide what method to use for paging through text.plainpagerMANPAGERPAGERuse_pagertempfilepagerTERMdumbemacspipepagermore <(less) 2>/dev/nulllessmore "%s"ttypagerRemove boldface formatting from text..Page through text by feeding it to another program.Page through text by invoking a program on a temporary file.TemporaryDirectorytempdirpydoc.out_escape_stdoutPage through text on a text terminal.ttytcgetattrsetcbreakgetcharLINESinc-- more -- tcsetattrTCSAFLUSHSimply print unformatted text. This is the ultimate fallback.describeProduce a short description of the given thing.built-in module package built-in function getset descriptor %s.%s.%smember descriptor %s.%s.%sfunction method locateLocate an object by name or dotted path, importing as necessary.nextmoduleplaintextGiven an object or a path to an object, get the object and its name.No Python documentation found for %r. +Use help() to get the interactive help utility. +Use help(str) for help on the str class.render_docPython Library Documentation: %srendererRender text documentation, given an object or a path to an object. in module Display text documentation, given an object or a path to an object.writedocWrite HTML documentation to a file in the current directory.wrotewritedocsWrite out HTML documentation for all modules in a directory tree.HelperBOOLEANwhile forCLASSES SPECIALMETHODSBASICMETHODSbreak continue whilenonlocal NAMESPACESTRUTHVALUEMODULESSEQUENCEMETHODSCOMPARISONglobal NAMESPACESEXCEPTIONSbreak continue if TRUTHVALUECONTEXTMANAGERS EXCEPTIONS yield_strprefixesSTRINGS<<<>OPERATORSUNARY+=-=*=/=%=&=|=^=<<=>>=**=//=AUGMENTEDASSIGNMENTBITWISECOMPLEX_symbols_inverseOPERATORS FORMATTINGPOWERTUPLES LISTS FUNCTIONSATTRIBUTES FLOAT MODULES OBJECTSSLICINGS DICTIONARYLITERALSdef classPRIVATENAMESPRIVATENAMES SPECIALMETHODSBACKQUOTESTUPLES FUNCTIONS CALLSLISTS SUBSCRIPTS SLICINGSsymbolssymbols_STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS FUNCTIONS CLASSES MODULES FILES inspect'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS ''FUNCTIONS CLASSES MODULES FILES inspect'str UNICODE SEQUENCES STRINGMETHODS FORMATTING TYPES'str UNICODE SEQUENCES STRINGMETHODS ''FORMATTING TYPES'string-methodsSTRINGS FORMATTINGSTRINGMETHODSformatstringsFORMATTINGencodings unicode SEQUENCES STRINGMETHODS FORMATTING TYPES'encodings unicode SEQUENCES STRINGMETHODS 'INTEGER FLOAT COMPLEX TYPESNUMBERSintegersint rangeINTEGERfloatingfloat mathimaginarycomplex cmathtypesseqSTRINGMETHODS FORMATTING range LISTSSEQUENCESDICTIONARIESMAPPINGStypesfunctionsdef TYPEStypesmethodsclass def CLASSES TYPESMETHODSbltin-code-objectscompile FUNCTIONS TYPESCODEOBJECTSbltin-type-objectstypes TYPESTYPEOBJECTSFRAMEOBJECTSTRACEBACKSbltin-null-objectbltin-ellipsis-objectSLICINGSspecialattrsSPECIALATTRIBUTESclass SPECIALMETHODS PRIVATENAMEStypesmodulesPACKAGESoperator-summarylambda or and not in is BOOLEAN COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES LISTS DICTIONARIES'lambda or and not in is BOOLEAN ''COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER ''UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES ''LISTS DICTIONARIES'EXPRESSIONSPRECEDENCEOBJECTSspecialnamesBASICMETHODS ATTRIBUTEMETHODS CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS NUMBERMETHODS CLASSES'BASICMETHODS ATTRIBUTEMETHODS ''CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS ''NUMBERMETHODS CLASSES'SPECIALMETHODScustomizationhash repr str SPECIALMETHODSattribute-accessATTRIBUTES SPECIALMETHODSATTRIBUTEMETHODScallable-typesCALLS SPECIALMETHODSCALLABLEMETHODSsequence-typesSEQUENCES SEQUENCEMETHODS SPECIALMETHODS'SEQUENCES SEQUENCEMETHODS ''SPECIALMETHODS'MAPPINGS SPECIALMETHODSMAPPINGMETHODSnumeric-typesNUMBERS AUGMENTEDASSIGNMENT SPECIALMETHODS'NUMBERS AUGMENTEDASSIGNMENT 'NUMBERMETHODSexecmodelNAMESPACES DYNAMICFEATURES EXCEPTIONSEXECUTIONnamingglobal nonlocal ASSIGNMENT DELETION DYNAMICFEATURESNAMESPACESdynamic-featuresDYNAMICFEATURESSCOPINGFRAMEStry except finally raiseconversionsCONVERSIONSidentifierskeywords SPECIALIDENTIFIERSIDENTIFIERSid-classesSPECIALIDENTIFIERSatom-identifiersatom-literalsSTRINGS NUMBERS TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'STRINGS NUMBERS TUPLELITERALS ''LISTLITERALS DICTIONARYLITERALS'LITERALSTUPLESexprlistsTUPLES LITERALSTUPLELITERALStypesseq-mutableLISTLITERALSLISTSlistsLISTS LITERALStypesmappingDICTIONARYLITERALSDICTIONARIES LITERALSattribute-referencesgetattr hasattr setattr ATTRIBUTEMETHODSATTRIBUTESsubscriptionsSUBSCRIPTSslicingscallsCALLSunaryBINARYshiftingSHIFTINGbitwisecomparisonsEXPRESSIONS BASICMETHODSbooleansEXPRESSIONS TRUTHVALUEASSERTIONassignmentASSIGNMENTaugassignDELETIONRETURNINGIMPORTINGCONDITIONALcompoundfor while break continueLOOPINGif while and or not BASICMETHODSDEBUGGINGcontext-managersCONTEXTMANAGERS<%s.%s instance>_GoInteractive +You are now leaving help and returning to the Python interpreter. +If you want to ask for help on a particular object directly from the +interpreter, you can type "help(object)". Executing "help('string')" +has the same effect as typing a particular string at the help> prompt. +help> Read one line, using input() when appropriate.listkeywordslistsymbolslisttopicslistmodulesmodules showsymbolHelp on %s:showtopic +Welcome to Python {0}'s help utility! + +If this is your first time using Python, you should definitely check out +the tutorial on the Internet at https://docs.python.org/{0}/tutorial/. + +Enter the name of any module, keyword, or topic to get help on writing +Python programs and using Python modules. To quit this help utility and +return to the interpreter, just type "quit". + +To get a list of available modules, keywords, symbols, or topics, type +"modules", "keywords", "symbols", or "topics". Each module also comes +with a one-line summary of what it does; to list the modules whose name +or summary contain a given string such as "spam", type "modules spam". +colw +Here is a list of the Python keywords. Enter any keyword to get more help. + + +Here is a list of the punctuation symbols which Python assigns special meaning +to. Enter any symbol to get more help. + + +Here is a list of available topics. Enter any topic name to get more help. + +more_xrefspydoc_data.topicspydoc_data +Sorry, topic and keyword documentation is not available because the +module "pydoc_data.topics" could not be found. +no documentation found for %s +xrefsRelated help topics: wrapped_text +%s +_gettopicReturn unbuffered tuple of (topic, xrefs). + + If an error occurs here, the exception is caught and displayed by + the url handler. + + This function duplicates the showtopic method but returns its + result directly so it can be formatted for display in an html page. + could not find topic +Here is a list of modules whose name or summary contains '{}'. +If there are any, enter a module name to get more help. + +apropos +Please wait a moment while I gather a list of all available modules... + +.__init__ModuleScanner +Enter any module name to get more help. Or, type "modules spam" to search +for modules whose name or summary contain the string "spam". +An interruptible scanner that searches module synopses.completerPrint all the one-line module summaries that contain a substring._start_serverurlhandlerStart an HTTP server thread on a specific port. + + Start an HTML/text server thread, so HTML or text documents can be + browsed dynamically and interactively with a Web browser. Example use: + + >>> import time + >>> import pydoc + + Define a URL handler. To determine what the client is asking + for, check the URL and content_type. + + Then get or generate some text or HTML code and return it. + + >>> def my_url_handler(url, content_type): + ... text = 'the URL sent was: (%s, %s)' % (url, content_type) + ... return text + + Start server thread on port 0. + If you use port 0, the server will pick a random port number. + You can then use serverthread.port to get the port number. + + >>> port = 0 + >>> serverthread = pydoc._start_server(my_url_handler, port) + + Check that the server is really started. If it is, open browser + and get first page. Use serverthread.url as the starting page. + + >>> if serverthread.serving: + ... import webbrowser + + The next two lines are commented out so a browser doesn't open if + doctest is run on this module. + + #... webbrowser.open(serverthread.url) + #True + + Let the server do its thing. We just need to monitor its status. + Use time.sleep so the loop doesn't hog the CPU. + + >>> starttime = time.monotonic() + >>> timeout = 1 #seconds + + This is a short timeout for testing purposes. + + >>> while serverthread.serving: + ... time.sleep(.01) + ... if serverthread.serving and time.monotonic() - starttime > timeout: + ... serverthread.stop() + ... break + + Print any errors that may have occurred. + + >>> print(serverthread.error) + None + DocHandlerBaseHTTPRequestHandlerdo_GETProcess a request from an HTML browser. + + The URL received is in self.path. + Get an HTML page from self.urlhandler and send it. + content_typesend_responsesend_header%s; charset=UTF-8end_headerswfilelog_messageDocServerHTTPServerserve_until_quitrdhandle_requestserver_closeserver_activateServerThreadservingStart the server.MessageClassdocsvrdocserverserver_porthttp://%s:%d/Stop the server and this thread nicely.01_url_handlerThe pydoc url handler for use with the pydoc server. + + If the content_type is 'text/css', the _pydoc.css style + sheet is read and returned if it exits. + + If the content_type is 'text/html', then the result of + get_html_page(url) is returned. + _HTMLDocpydoc_data/_pydoc.csscss_pathcss_link +Pydoc: %s + +%s%s
%s
+html_navbar%s [%s, %s] +
+ Python %s
%s +
+
+ +
+
+ + +
  +
+ + +
+
+
+ html_indexModule Index page.bltinlinkIndex of Modules

Built-in Modules

pydoc by Ka-Ping Yee<ping@lfw.org>'

pydoc by Ka-Ping Yee''<ping@lfw.org>'Index of Moduleshtml_searchSearch results page.search_resultSearch Resultskey = %s
Search Resultshtml_topicsIndex of topic texts available.%sINDEXTopicshtml_keywordsIndex of keywords.Keywordshtml_topicpageTopic or keyword help page.htmlhelpKEYWORDTOPIC

%s
html_getobjcould not find objecthtml_errorError#bb0000Error - %sget_html_pageGenerate an HTML page for url.complete_urlsearch?keytopic?keyget?keybad pydoc urlpath_hereunknown content type %r for url %sopen_browserStart the enhanced pydoc Web server and open a Web browser. + + Use port '0' to start the server on an arbitrary port. + Set open_browser to False to suppress opening a browser. + webbrowserserverthreadServer commands: [b]rowser, [q]uitserver_help_msgServer ready atserver> Server stoppedispath_get_revised_pathgiven_pathEnsures current directory is on returned path, and argv0 directory is not + + Exception: argv0 dir is left alone if it's also pydoc's directory. + + Returns a new path entry list, or None if no adjustment is needed. + stdlib_dirscript_dirrevised_path_adjust_cli_sys_pathEnsures current directory is on sys.path, and __main__ directory is not. + + Exception: __main__ dir is left alone if it's also pydoc's directory. + cliCommand-line interface (looks at sys.argv to decide what to do).BadUsagebk:n:p:wwritingstart_serverfile %r does not existpydoc - the Python documentation tool + +{cmd} ... + Show text documentation on something. may be the name of a + Python keyword, topic, function, module, or package, or a dotted + reference to a class or function within a module or module in a + package. If contains a '{sep}', it is used as the path to a + Python source file to document. If name is 'keywords', 'topics', + or 'modules', a listing of these things is displayed. + +{cmd} -k + Search for a keyword in the synopsis lines of all available modules. + +{cmd} -n + Start an HTTP server with the given hostname (default: localhost). + +{cmd} -p + Start an HTTP server on the given port on the local machine. Port + number 0 can be used to get an arbitrary unused port. + +{cmd} -b + Start an HTTP server on an arbitrary unused port and open a Web browser + to interactively browse documentation. This option can be used in + combination with -n and/or -p. + +{cmd} -w ... + Write out the HTML documentation for a module to a file in the current + directory. If contains a '{sep}', it is treated as a filename; if + it names a directory, documentation is written for all the contents. +# Known bugs that can't be fixed here:# - synopsis() cannot be prevented from clobbering existing# loaded modules.# - If the __file__ attribute on a module is a relative path and# the current directory is changed with os.chdir(), an incorrect# path will be displayed.# --------------------------------------------------------- common routines# The behaviour of %p is implementation-dependent in terms of case.# all your base are belong to us# Certain special names are redundant or internal.# XXX Remove __initializing__?# Private names are hidden, but special names are displayed.# Namedtuples have public fields and methods with a single leading underscore# only document that which the programmer exported in __all__# This allows data descriptors to be ordered according# to a _fields attribute if present.# ----------------------------------------------------- module manipulation# Look for binary suffixes first, falling back to source.# Now handle the choice.# Must be a source file.# module can't be opened, so skip it# text modules can be directly examined# Must be a binary module, which has to be imported.# XXX We probably don't need to pass in the loader here.# Cache the result.# If forceload is 1 and the module has been previously loaded from# disk, we always have to reload the module. Checking the file's# mtime isn't good enough (e.g. the module could contain a class# that inherits from another module that has changed).# Remove the module from sys.modules and re-import to try# and avoid problems with partially loaded modules.# Also remove any submodules because they won't appear# in the newly loaded module's namespace if they're already# in sys.modules.# Prevent garbage collection.# Did the error occur before or after the module was found?# An error occurred while executing the imported module.# A SyntaxError occurred before we could execute the module.# No such module in the path.# Some other error occurred during the importing process.# ---------------------------------------------------- formatter base class# 'try' clause is to attempt to handle the possibility that inspect# identifies something in a way that pydoc itself has issues handling;# think 'super' and how it is a descriptor (which raises the exception# by lacking a __name__ attribute) and an instance.# -------------------------------------------- HTML documentation generator# Backslashes are only literal in the string and are never# needed to make any special characters, so show a raw string.# ------------------------------------------- HTML formatting utilities# Create a link for methods like 'self.method(...)'# and use for attributes like 'self.attr'# ---------------------------------------------- type-specific routines# ignore the passed-in name# if __all__ exists, believe it. Otherwise use old heuristic.# Cute little class to pump out a horizontal rule between sections.# List the mro, if non-trivial.# Some descriptors may meet a failure in their __get__.# (bug #1785)# The value may not be hashable (e.g., a data attr with# a dict or list value).# Pump out the attrs, segregated by kind.# XXX lambda's won't usually have func_annotations['return']# since the syntax doesn't support but it is possible.# So removing parentheses isn't truly safe.# remove parentheses# ignore a module if its name contains a surrogate character# -------------------------------------------- text documentation generator# ------------------------------------------- text formatting utilities# Detect submodules as sometimes created by C extensions# List the built-in subclasses, if any:# --------------------------------------------------------- user interfaces# pipes completely broken in Windows# We've hereby abandoned whatever text hasn't been written,# but the pager is still in control of the terminal.# Ignore broken pipes caused by quitting the pager program.# Ignore ctl-c like the pager itself does. Otherwise the pager is# left running and the terminal is in raw mode and unusable.# Escape non-encodable characters to avoid encoding errors later# --------------------------------------- interactive interpreter interface# If the passed object is a piece of data or an instance,# document its available methods instead of its value.# These dictionaries map a topic name to either an alias, or a tuple# (label, seealso-items). The "label" is the label of the corresponding# section in the .rst file under Doc/ and an index into the dictionary# in pydoc_data/topics.py.# CAUTION: if you change one of these dictionaries, be sure to adapt the# list of needed labels in Doc/tools/extensions/pyspecific.py and# regenerate the pydoc_data/topics.py file by running# make pydoc-topics# in Doc/ and copying the output file into the Lib/ directory.# Either add symbols to this dictionary or to the symbols dictionary# directly: Whichever is easier. They are merged later.# Make sure significant trailing quoting marks of literals don't# get deleted while cleaning input# special case these keywords since they are objects too# raised by tests for bad coding cookies or BOM# ignore problems during import# --------------------------------------- enhanced Web browser interface# Don't log messages.# explicitly break a reference cycle: DocServer.callback# has indirectly a reference to ServerThread.# Wait until thread.serving is True to make sure we are# really up before returning.# scan for modules# format page# try topics first, then objects.# try objects first, then topics.# Catch any errors and display them in an error page.# Errors outside the url handler are caught by the server.# -------------------------------------------------- command-line interface# Scripts may get the current directory in their path by default if they're# run with the -m switch, or directly from the current directory.# The interactive prompt also allows imports from the current directory.# Accordingly, if the current directory is already present, don't make# any changes to the given_path# Otherwise, add the current directory to the given path, and remove the# script directory (as long as the latter isn't also pydoc's directory.# Note: the tests only cover _get_revised_path, not _adjust_cli_path itselfb'Generate Python documentation in HTML or text for interactive use. + +At the Python interactive prompt, calling help(thing) on a Python object +documents the object, and calling help() starts up an interactive +help session. + +Or, at the shell command line outside of Python: + +Run "pydoc " to show documentation on something. may be +the name of a function, module, package, or a dotted reference to a +class or function within a module or module in a package. If the +argument contains a path segment delimiter (e.g. slash on Unix, +backslash on Windows) it is treated as the path to a Python source file. + +Run "pydoc -k " to search for a keyword in the synopsis lines +of all available modules. + +Run "pydoc -n " to start an HTTP server with the given +hostname (default: localhost) on the local machine. + +Run "pydoc -p " to start an HTTP server on the given port on the +local machine. Port number 0 can be used to get an arbitrary unused port. + +Run "pydoc -b" to start an HTTP server on an arbitrary unused port and +open a Web browser to interactively browse documentation. Combine with +the -n and -p options to control the hostname and port used. + +Run "pydoc -w " to write out the HTML documentation for a module +to a file named ".html". + +Module docs for core modules are assumed to be in + + https://docs.python.org/X.Y/library/ + +This can be overridden by setting the PYTHONDOCS environment variable +to a different URL or to a local directory containing the Library +Reference Manual pages. +'u'Generate Python documentation in HTML or text for interactive use. + +At the Python interactive prompt, calling help(thing) on a Python object +documents the object, and calling help() starts up an interactive +help session. + +Or, at the shell command line outside of Python: + +Run "pydoc " to show documentation on something. may be +the name of a function, module, package, or a dotted reference to a +class or function within a module or module in a package. If the +argument contains a path segment delimiter (e.g. slash on Unix, +backslash on Windows) it is treated as the path to a Python source file. + +Run "pydoc -k " to search for a keyword in the synopsis lines +of all available modules. + +Run "pydoc -n " to start an HTTP server with the given +hostname (default: localhost) on the local machine. + +Run "pydoc -p " to start an HTTP server on the given port on the +local machine. Port number 0 can be used to get an arbitrary unused port. + +Run "pydoc -b" to start an HTTP server on an arbitrary unused port and +open a Web browser to interactively browse documentation. Combine with +the -n and -p options to control the hostname and port used. + +Run "pydoc -w " to write out the HTML documentation for a module +to a file named ".html". + +Module docs for core modules are assumed to be in + + https://docs.python.org/X.Y/library/ + +This can be overridden by setting the PYTHONDOCS environment variable +to a different URL or to a local directory containing the Library +Reference Manual pages. +'b'26 February 2001'u'26 February 2001'b'Guido van Rossum, for an excellent programming language. +Tommy Burnette, the original creator of manpy. +Paul Prescod, for all his work on onlinehelp. +Richard Chamberlain, for the first implementation of textdoc. +'u'Guido van Rossum, for an excellent programming language. +Tommy Burnette, the original creator of manpy. +Paul Prescod, for all his work on onlinehelp. +Richard Chamberlain, for the first implementation of textdoc. +'b'Convert sys.path into a list of absolute, existing, unique paths.'u'Convert sys.path into a list of absolute, existing, unique paths.'b'Get the doc string or comments for an object.'u'Get the doc string or comments for an object.'b'^ * +'u'^ * +'b'Split a doc string into a synopsis line (if any) and the rest.'u'Split a doc string into a synopsis line (if any) and the rest.'b'Get a class name and qualify it with a module name if necessary.'u'Get a class name and qualify it with a module name if necessary.'b'Check if an object is of a type that probably means it's data.'u'Check if an object is of a type that probably means it's data.'b'Do a series of global replacements on a string.'u'Do a series of global replacements on a string.'b'Omit part of a string if needed to make it fit in a maximum length.'u'Omit part of a string if needed to make it fit in a maximum length.'b' at 0x[0-9a-f]{6,16}(>+)$'u' at 0x[0-9a-f]{6,16}(>+)$'b'Remove the hexadecimal id from a Python object representation.'u'Remove the hexadecimal id from a Python object representation.'b' + Returns True if fn is a bound method, regardless of whether + fn was implemented in Python or in C. + 'u' + Returns True if fn is a bound method, regardless of whether + fn was implemented in Python or in C. + 'b'Split sequence s via predicate, and return pair ([true], [false]). + + The return value is a 2-tuple of lists, + ([x for x in s if predicate(x)], + [x for x in s if not predicate(x)]) + 'u'Split sequence s via predicate, and return pair ([true], [false]). + + The return value is a 2-tuple of lists, + ([x for x in s if predicate(x)], + [x for x in s if not predicate(x)]) + 'b'Decide whether to show documentation on a variable.'u'Decide whether to show documentation on a variable.'b'__credits__'u'__credits__'b'__date__'u'__date__'b'__version__'u'__version__'b'Wrap inspect.classify_class_attrs, with fixup for data descriptors.'u'Wrap inspect.classify_class_attrs, with fixup for data descriptors.'b'data descriptor'u'data descriptor'b'readonly property'u'readonly property'b'Sort the attrs list in-place by _fields and then alphabetically by name'u'Sort the attrs list in-place by _fields and then alphabetically by name'b'Guess whether a path refers to a package directory.'u'Guess whether a path refers to a package directory.'b'r"""'u'r"""'b'Get the one-line summary out of a module file.'u'Get the one-line summary out of a module file.'b'__temp__'u'__temp__'b'Errors that occurred while trying to import something to document it.'u'Errors that occurred while trying to import something to document it.'b'problem in %s - %s: %s'u'problem in %s - %s: %s'b'Import a Python source file or compiled file given its path.'u'Import a Python source file or compiled file given its path.'b'Import a module; handle errors; return None if the module isn't found. + + If the module *is* found but an exception occurs, it's wrapped in an + ErrorDuringImport exception and reraised. Unlike __import__, if a + package path is specified, the module at the end of the path is returned, + not the package at the beginning. If the optional 'forceload' argument + is 1, we reload the module from disk (unless it's a dynamic extension).'u'Import a module; handle errors; return None if the module isn't found. + + If the module *is* found but an exception occurs, it's wrapped in an + ErrorDuringImport exception and reraised. Unlike __import__, if a + package path is specified, the module at the end of the path is returned, + not the package at the beginning. If the optional 'forceload' argument + is 1, we reload the module from disk (unless it's a dynamic extension).'b'PYTHONDOCS'u'PYTHONDOCS'b'https://docs.python.org/%d.%d/library'u'https://docs.python.org/%d.%d/library'b'Generate documentation for an object.'u'Generate documentation for an object.'b'Raise an exception for unimplemented types.'u'Raise an exception for unimplemented types.'b'don't know how to document object%s of type %s'u'don't know how to document object%s of type %s'b'stdlib'u'stdlib'b'Return the location of module docs or None'u'Return the location of module docs or None'b'(built-in)'u'(built-in)'b'gc'b'zipimport'b'site-packages'u'site-packages'b'xml.etree'b'test.pydoc_mod'u'test.pydoc_mod'b'http://'u'http://'b'https://'u'https://'b'%s/%s'u'%s/%s'b'Class for safely making an HTML representation of a Python object.'u'Class for safely making an HTML representation of a Python object.'b'repr_'u'repr_'b'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)'u'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)'b'\1'u'\1'b'<%s instance>'u'<%s instance>'b'Formatter class for HTML documentation.'u'Formatter class for HTML documentation.'b'Format an HTML page.'u'Format an HTML page.'b' +Python: %s + + +%s +'u' +Python: %s + + +%s +'b'Format a page heading.'u'Format a page heading.'b' + + +
 
+ 
%s
%s
+ 'u' + + +
 
+ 
%s
%s
+ 'b'Format a section with a heading.'u'Format a section with a heading.'b''u''b''u''b'

+ + + + 'u'

+

 
+%s
+ + + 'b' + + +'u' + + +'b' +'u' +'b' +
 
+%s
%s%s
%s
%s%s
%s
%s%s
%s%s%s
'u' +%s'b'Format a section with a big heading.'u'Format a section with a big heading.'b'%s'u'%s'b'Format literal preformatted text.'u'Format literal preformatted text.'b' + +'u' + +'b'
+'u'
+'b'Format a list of items into a multi-column list.'u'Format a list of items into a multi-column list.'b''u''b'%s
'u'%s
'b'%s'u'%s'b'Make a link for an identifier, given name-to-URL mappings.'u'Make a link for an identifier, given name-to-URL mappings.'b'%s'u'%s'b'Make a link for a class.'u'Make a link for a class.'b'%s'u'%s'b'Make a link for a module.'u'Make a link for a module.'b'%s'u'%s'b'Make a link for a module or package to display in an index.'u'Make a link for a module or package to display in an index.'b'%s.%s.html'u'%s.%s.html'b'%s.html'u'%s.html'b'%s (package)'u'%s (package)'b'Make a link to source file.'u'Make a link to source file.'b'%s'u'%s'b'Mark up some plain text, given a context of symbols to look for. + Each context dictionary maps object names to anchor names.'u'Mark up some plain text, given a context of symbols to look for. + Each context dictionary maps object names to anchor names.'b'\b((http|ftp)://\S+[\w/]|RFC[- ]?(\d+)|PEP[- ]?(\d+)|(self\.)?(\w+))'u'\b((http|ftp)://\S+[\w/]|RFC[- ]?(\d+)|PEP[- ]?(\d+)|(self\.)?(\w+))'b'http://www.rfc-editor.org/rfc/rfc%d.txt'u'http://www.rfc-editor.org/rfc/rfc%d.txt'b'http://www.python.org/dev/peps/pep-%04d/'u'http://www.python.org/dev/peps/pep-%04d/'b'self.'u'self.'b'self.%s'u'self.%s'b'Produce HTML for a class tree as given by inspect.getclasstree().'u'Produce HTML for a class tree as given by inspect.getclasstree().'b'

'u'
'b' +
'u' +'b'
+%s
+'u'
+%s
+'b'
+%s
+'u'
+%s
+'b'Produce HTML documentation for a module object.'u'Produce HTML documentation for a module object.'b'%s'u'%s'b'%s'u'%s'b'Revision: 'u'Revision: 'b'version %s'u'version %s'b'
Module Reference'u'
Module Reference'b'#ffffff'u'#ffffff'b'#7799ee'u'#7799ee'b'index
'u'index
'b'.html#'u'.html#'b'#-'u'#-'b'%s'u'%s'b'

%s

+'u'

%s

+'b'Package Contents'u'Package Contents'b'#aa55cc'u'#aa55cc'b'Modules'u'Modules'b'Classes'u'Classes'b'#ee77aa'u'#ee77aa'b'Functions'u'Functions'b'#eeaa77'u'#eeaa77'b'#55aa55'u'#55aa55'b'Author'u'Author'b'Credits'u'Credits'b'Produce HTML documentation for a class object.'u'Produce HTML documentation for a class object.'b'
+'u'
+'b'
Method resolution order:
+'u'
Method resolution order:
+'b'
%s
+'u'
%s
+'b'
+'u'
+'b'
%s
+'u'
%s
+'b'
%s'u'
%s'b'
%s%s
+'u'
%s%s
+'b'defined here'u'defined here'b'inherited from %s'u'inherited from %s'b':
+'u':
+'b'Methods %s'u'Methods %s'b'Class methods %s'u'Class methods %s'b'Static methods %s'u'Static methods %s'b'Readonly properties %s'u'Readonly properties %s'b'Data descriptors %s'u'Data descriptors %s'b'Data and other attributes %s'u'Data and other attributes %s'b'class %s'u'class %s'b'%s = class %s'u'%s = class %s'b'%s
 
'u'%s
 
'b'#000000'u'#000000'b'#ffc8d8'u'#ffc8d8'b'Format an argument default value as text.'u'Format an argument default value as text.'b'Produce HTML documentation for a function or method object.'u'Produce HTML documentation for a function or method object.'b' from 'u' from 'b' method of %s instance'u' method of %s instance'b' unbound %s method'u' unbound %s method'b'async 'u'async 'b'%s'u'%s'b'%s'u'%s'b'%s = %s'u'%s = %s'b'%s lambda 'u'%s lambda 'b'(...)'u'(...)'b'%s'u'%s'b'
%s
+'u'
%s
+'b'
%s
'u'
%s
'b'
%s
%s
+'u'
%s
%s
+'b'Produce html documentation for a data descriptor.'u'Produce html documentation for a data descriptor.'b'
%s
+'u'
%s
+'b'
%s
+'u'
%s
+'b'Produce HTML documentation for a data object.'u'Produce HTML documentation for a data object.'b'%s = 'u'%s = 'b'Generate an HTML index for a directory of modules.'u'Generate an HTML index for a directory of modules.'b'Class for safely making a text representation of a Python object.'u'Class for safely making a text representation of a Python object.'b'Formatter class for text documentation.'u'Formatter class for text documentation.'b'Format a string in bold by overstriking.'u'Format a string in bold by overstriking.'u' 'b'Indent text by prepending a given prefix to each line.'u'Indent text by prepending a given prefix to each line.'b'Format a section with a given heading.'u'Format a section with a given heading.'b'Render in text a class tree as returned by inspect.getclasstree().'u'Render in text a class tree as returned by inspect.getclasstree().'b'Produce text documentation for a given module object.'u'Produce text documentation for a given module object.'b' - 'u' - 'b'MODULE REFERENCE'u'MODULE REFERENCE'b' + +The following documentation is automatically generated from the Python +source files. It may be incomplete, incorrect or include features that +are considered implementation detail and may vary between Python +implementations. When in doubt, consult the module reference at the +location listed above. +'u' + +The following documentation is automatically generated from the Python +source files. It may be incomplete, incorrect or include features that +are considered implementation detail and may vary between Python +implementations. When in doubt, consult the module reference at the +location listed above. +'b'DESCRIPTION'u'DESCRIPTION'b' (package)'u' (package)'b'PACKAGE CONTENTS'u'PACKAGE CONTENTS'b'SUBMODULES'u'SUBMODULES'b'CLASSES'u'CLASSES'b'FUNCTIONS'u'FUNCTIONS'b'DATA'u'DATA'b'AUTHOR'u'AUTHOR'b'CREDITS'u'CREDITS'b'FILE'u'FILE'b'Produce text documentation for a given class object.'u'Produce text documentation for a given class object.'b'class 'u'class 'b' = class 'u' = class 'b'Method resolution order:'u'Method resolution order:'b'Built-in subclasses:'u'Built-in subclasses:'b' ... and 'u' ... and 'b' other subclasses'u' other subclasses'b'Methods %s: +'u'Methods %s: +'b'Class methods %s: +'u'Class methods %s: +'b'Static methods %s: +'u'Static methods %s: +'b'Readonly properties %s: +'u'Readonly properties %s: +'b'Data descriptors %s: +'u'Data descriptors %s: +'b'Data and other attributes %s: +'u'Data and other attributes %s: +'b' | 'u' | 'b'Produce text documentation for a function or method object.'u'Produce text documentation for a function or method object.'b' = 'u' = 'b' lambda 'u' lambda 'b'Produce text documentation for a data descriptor.'u'Produce text documentation for a data descriptor.'b'Produce text documentation for a data object.'u'Produce text documentation for a data object.'b'Subclass of TextDoc which overrides string styling'u'Subclass of TextDoc which overrides string styling'b'The first time this is called, determine what kind of pager to use.'u'The first time this is called, determine what kind of pager to use.'b'Decide what method to use for paging through text.'u'Decide what method to use for paging through text.'b'isatty'u'isatty'b'MANPAGER'u'MANPAGER'b'PAGER'u'PAGER'b'TERM'u'TERM'b'dumb'u'dumb'b'emacs'u'emacs'b'more <'u'more <'b'system'u'system'b'(less) 2>/dev/null'u'(less) 2>/dev/null'b'less'u'less'b'more "%s"'u'more "%s"'b'more'u'more'b'Remove boldface formatting from text.'u'Remove boldface formatting from text.'b'.'u'.'b'Page through text by feeding it to another program.'u'Page through text by feeding it to another program.'b'Page through text by invoking a program on a temporary file.'u'Page through text by invoking a program on a temporary file.'b'pydoc.out'u'pydoc.out'b'Page through text on a text terminal.'u'Page through text on a text terminal.'b'LINES'u'LINES'b'-- more --'u'-- more --'b' 'u' 'b''u''b'Simply print unformatted text. This is the ultimate fallback.'u'Simply print unformatted text. This is the ultimate fallback.'b'Produce a short description of the given thing.'u'Produce a short description of the given thing.'b'built-in module 'u'built-in module 'b'package 'u'package 'b'built-in function 'u'built-in function 'b'getset descriptor %s.%s.%s'u'getset descriptor %s.%s.%s'b'member descriptor %s.%s.%s'u'member descriptor %s.%s.%s'b'function 'u'function 'b'method 'u'method 'b'Locate an object by name or dotted path, importing as necessary.'u'Locate an object by name or dotted path, importing as necessary.'b'Given an object or a path to an object, get the object and its name.'u'Given an object or a path to an object, get the object and its name.'b'No Python documentation found for %r. +Use help() to get the interactive help utility. +Use help(str) for help on the str class.'u'No Python documentation found for %r. +Use help() to get the interactive help utility. +Use help(str) for help on the str class.'b'Python Library Documentation: %s'u'Python Library Documentation: %s'b'Render text documentation, given an object or a path to an object.'u'Render text documentation, given an object or a path to an object.'b' in module 'u' in module 'b'Display text documentation, given an object or a path to an object.'u'Display text documentation, given an object or a path to an object.'b'Write HTML documentation to a file in the current directory.'u'Write HTML documentation to a file in the current directory.'b'wrote'u'wrote'b'Write out HTML documentation for all modules in a directory tree.'u'Write out HTML documentation for all modules in a directory tree.'b'BOOLEAN'u'BOOLEAN'b'while for'u'while for'b'CLASSES SPECIALMETHODS'u'CLASSES SPECIALMETHODS'b'BASICMETHODS'u'BASICMETHODS'b'break continue while'u'break continue while'b'nonlocal NAMESPACES'u'nonlocal NAMESPACES'b'TRUTHVALUE'u'TRUTHVALUE'b'MODULES'u'MODULES'b'SEQUENCEMETHODS'u'SEQUENCEMETHODS'b'COMPARISON'u'COMPARISON'b'global NAMESPACES'u'global NAMESPACES'b'EXCEPTIONS'u'EXCEPTIONS'b'break continue if TRUTHVALUE'u'break continue if TRUTHVALUE'b'CONTEXTMANAGERS EXCEPTIONS yield'u'CONTEXTMANAGERS EXCEPTIONS yield'b'STRINGS'u'STRINGS'b'<<'u'<<'b'<>'u'<>'b'OPERATORS'u'OPERATORS'b'UNARY'u'UNARY'b'+='u'+='b'-='u'-='b'*='u'*='b'/='u'/='b'%='u'%='b'&='u'&='b'|='u'|='b'^='u'^='b'<<='u'<<='b'>>='u'>>='b'**='u'**='b'//='u'//='b'AUGMENTEDASSIGNMENT'u'AUGMENTEDASSIGNMENT'b'BITWISE'u'BITWISE'u'j'u'J'b'COMPLEX'u'COMPLEX'b'OPERATORS FORMATTING'u'OPERATORS FORMATTING'b'POWER'u'POWER'b'TUPLES LISTS FUNCTIONS'u'TUPLES LISTS FUNCTIONS'b'ATTRIBUTES FLOAT MODULES OBJECTS'u'ATTRIBUTES FLOAT MODULES OBJECTS'b'SLICINGS DICTIONARYLITERALS'u'SLICINGS DICTIONARYLITERALS'b'def class'u'def class'b'PRIVATENAMES'u'PRIVATENAMES'b'PRIVATENAMES SPECIALMETHODS'u'PRIVATENAMES SPECIALMETHODS'b'BACKQUOTES'u'BACKQUOTES'b'TUPLES FUNCTIONS CALLS'u'TUPLES FUNCTIONS CALLS'b'LISTS SUBSCRIPTS SLICINGS'u'LISTS SUBSCRIPTS SLICINGS'b'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS FUNCTIONS CLASSES MODULES FILES inspect'u'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS FUNCTIONS CLASSES MODULES FILES inspect'b'TYPES'u'TYPES'b'strings'u'strings'b'str UNICODE SEQUENCES STRINGMETHODS FORMATTING TYPES'u'str UNICODE SEQUENCES STRINGMETHODS FORMATTING TYPES'b'string-methods'u'string-methods'b'STRINGS FORMATTING'u'STRINGS FORMATTING'b'STRINGMETHODS'u'STRINGMETHODS'b'formatstrings'u'formatstrings'b'FORMATTING'u'FORMATTING'b'encodings unicode SEQUENCES STRINGMETHODS FORMATTING TYPES'u'encodings unicode SEQUENCES STRINGMETHODS FORMATTING TYPES'b'UNICODE'u'UNICODE'b'numbers'b'INTEGER FLOAT COMPLEX TYPES'u'INTEGER FLOAT COMPLEX TYPES'b'NUMBERS'u'NUMBERS'b'integers'u'integers'b'int range'u'int range'b'INTEGER'u'INTEGER'b'floating'u'floating'b'float math'u'float math'b'FLOAT'u'FLOAT'b'imaginary'u'imaginary'b'complex cmath'u'complex cmath'b'typesseq'u'typesseq'b'STRINGMETHODS FORMATTING range LISTS'u'STRINGMETHODS FORMATTING range LISTS'b'SEQUENCES'u'SEQUENCES'b'DICTIONARIES'u'DICTIONARIES'b'MAPPINGS'u'MAPPINGS'b'typesfunctions'u'typesfunctions'b'def TYPES'u'def TYPES'b'typesmethods'u'typesmethods'b'class def CLASSES TYPES'u'class def CLASSES TYPES'b'METHODS'u'METHODS'b'bltin-code-objects'u'bltin-code-objects'b'compile FUNCTIONS TYPES'u'compile FUNCTIONS TYPES'b'CODEOBJECTS'u'CODEOBJECTS'b'bltin-type-objects'u'bltin-type-objects'b'types TYPES'u'types TYPES'b'TYPEOBJECTS'u'TYPEOBJECTS'b'FRAMEOBJECTS'u'FRAMEOBJECTS'b'TRACEBACKS'u'TRACEBACKS'b'bltin-null-object'u'bltin-null-object'b'NONE'u'NONE'b'bltin-ellipsis-object'u'bltin-ellipsis-object'b'SLICINGS'u'SLICINGS'b'specialattrs'u'specialattrs'b'SPECIALATTRIBUTES'u'SPECIALATTRIBUTES'b'class SPECIALMETHODS PRIVATENAMES'u'class SPECIALMETHODS PRIVATENAMES'b'typesmodules'u'typesmodules'b'PACKAGES'u'PACKAGES'b'operator-summary'u'operator-summary'b'lambda or and not in is BOOLEAN COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES LISTS DICTIONARIES'u'lambda or and not in is BOOLEAN COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES LISTS DICTIONARIES'b'EXPRESSIONS'u'EXPRESSIONS'b'PRECEDENCE'u'PRECEDENCE'b'objects'u'objects'b'OBJECTS'u'OBJECTS'b'specialnames'u'specialnames'b'BASICMETHODS ATTRIBUTEMETHODS CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS NUMBERMETHODS CLASSES'u'BASICMETHODS ATTRIBUTEMETHODS CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS NUMBERMETHODS CLASSES'b'SPECIALMETHODS'u'SPECIALMETHODS'b'customization'u'customization'b'hash repr str SPECIALMETHODS'u'hash repr str SPECIALMETHODS'b'attribute-access'u'attribute-access'b'ATTRIBUTES SPECIALMETHODS'u'ATTRIBUTES SPECIALMETHODS'b'ATTRIBUTEMETHODS'u'ATTRIBUTEMETHODS'b'callable-types'u'callable-types'b'CALLS SPECIALMETHODS'u'CALLS SPECIALMETHODS'b'CALLABLEMETHODS'u'CALLABLEMETHODS'b'sequence-types'u'sequence-types'b'SEQUENCES SEQUENCEMETHODS SPECIALMETHODS'u'SEQUENCES SEQUENCEMETHODS SPECIALMETHODS'b'MAPPINGS SPECIALMETHODS'u'MAPPINGS SPECIALMETHODS'b'MAPPINGMETHODS'u'MAPPINGMETHODS'b'numeric-types'u'numeric-types'b'NUMBERS AUGMENTEDASSIGNMENT SPECIALMETHODS'u'NUMBERS AUGMENTEDASSIGNMENT SPECIALMETHODS'b'NUMBERMETHODS'u'NUMBERMETHODS'b'execmodel'u'execmodel'b'NAMESPACES DYNAMICFEATURES EXCEPTIONS'u'NAMESPACES DYNAMICFEATURES EXCEPTIONS'b'EXECUTION'u'EXECUTION'b'naming'u'naming'b'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'u'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'b'NAMESPACES'u'NAMESPACES'b'dynamic-features'u'dynamic-features'b'DYNAMICFEATURES'u'DYNAMICFEATURES'b'SCOPING'u'SCOPING'b'FRAMES'u'FRAMES'b'try except finally raise'u'try except finally raise'b'conversions'u'conversions'b'CONVERSIONS'u'CONVERSIONS'b'identifiers'u'identifiers'b'keywords SPECIALIDENTIFIERS'u'keywords SPECIALIDENTIFIERS'b'IDENTIFIERS'u'IDENTIFIERS'b'id-classes'u'id-classes'b'SPECIALIDENTIFIERS'u'SPECIALIDENTIFIERS'b'atom-identifiers'u'atom-identifiers'b'atom-literals'u'atom-literals'b'STRINGS NUMBERS TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'u'STRINGS NUMBERS TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'b'LITERALS'u'LITERALS'b'TUPLES'u'TUPLES'b'exprlists'u'exprlists'b'TUPLES LITERALS'u'TUPLES LITERALS'b'TUPLELITERALS'u'TUPLELITERALS'b'typesseq-mutable'u'typesseq-mutable'b'LISTLITERALS'u'LISTLITERALS'b'LISTS'u'LISTS'b'lists'u'lists'b'LISTS LITERALS'u'LISTS LITERALS'b'typesmapping'u'typesmapping'b'DICTIONARYLITERALS'u'DICTIONARYLITERALS'b'DICTIONARIES LITERALS'u'DICTIONARIES LITERALS'b'attribute-references'u'attribute-references'b'getattr hasattr setattr ATTRIBUTEMETHODS'u'getattr hasattr setattr ATTRIBUTEMETHODS'b'ATTRIBUTES'u'ATTRIBUTES'b'subscriptions'u'subscriptions'b'SUBSCRIPTS'u'SUBSCRIPTS'b'slicings'u'slicings'b'calls'u'calls'b'CALLS'u'CALLS'b'power'u'power'b'unary'u'unary'b'BINARY'u'BINARY'b'shifting'u'shifting'b'SHIFTING'u'SHIFTING'b'bitwise'u'bitwise'b'comparisons'u'comparisons'b'EXPRESSIONS BASICMETHODS'u'EXPRESSIONS BASICMETHODS'b'booleans'u'booleans'b'EXPRESSIONS TRUTHVALUE'u'EXPRESSIONS TRUTHVALUE'b'ASSERTION'u'ASSERTION'b'assignment'u'assignment'b'ASSIGNMENT'u'ASSIGNMENT'b'augassign'u'augassign'b'DELETION'u'DELETION'b'RETURNING'u'RETURNING'b'IMPORTING'u'IMPORTING'b'CONDITIONAL'u'CONDITIONAL'b'compound'u'compound'b'for while break continue'u'for while break continue'b'LOOPING'u'LOOPING'b'if while and or not BASICMETHODS'u'if while and or not BASICMETHODS'b'debugger'u'debugger'b'pdb'b'DEBUGGING'u'DEBUGGING'b'context-managers'u'context-managers'b'CONTEXTMANAGERS'u'CONTEXTMANAGERS'b'<%s.%s instance>'u'<%s.%s instance>'b' +You are now leaving help and returning to the Python interpreter. +If you want to ask for help on a particular object directly from the +interpreter, you can type "help(object)". Executing "help('string')" +has the same effect as typing a particular string at the help> prompt. +'u' +You are now leaving help and returning to the Python interpreter. +If you want to ask for help on a particular object directly from the +interpreter, you can type "help(object)". Executing "help('string')" +has the same effect as typing a particular string at the help> prompt. +'b'help> 'u'help> 'b'Read one line, using input() when appropriate.'u'Read one line, using input() when appropriate.'b'symbols'u'symbols'b'topics'u'topics'b'modules'u'modules'b'modules 'u'modules 'b'Help on %s:'u'Help on %s:'b' +Welcome to Python {0}'s help utility! + +If this is your first time using Python, you should definitely check out +the tutorial on the Internet at https://docs.python.org/{0}/tutorial/. + +Enter the name of any module, keyword, or topic to get help on writing +Python programs and using Python modules. To quit this help utility and +return to the interpreter, just type "quit". + +To get a list of available modules, keywords, symbols, or topics, type +"modules", "keywords", "symbols", or "topics". Each module also comes +with a one-line summary of what it does; to list the modules whose name +or summary contain a given string such as "spam", type "modules spam". +'u' +Welcome to Python {0}'s help utility! + +If this is your first time using Python, you should definitely check out +the tutorial on the Internet at https://docs.python.org/{0}/tutorial/. + +Enter the name of any module, keyword, or topic to get help on writing +Python programs and using Python modules. To quit this help utility and +return to the interpreter, just type "quit". + +To get a list of available modules, keywords, symbols, or topics, type +"modules", "keywords", "symbols", or "topics". Each module also comes +with a one-line summary of what it does; to list the modules whose name +or summary contain a given string such as "spam", type "modules spam". +'b' +Here is a list of the Python keywords. Enter any keyword to get more help. + +'u' +Here is a list of the Python keywords. Enter any keyword to get more help. + +'b' +Here is a list of the punctuation symbols which Python assigns special meaning +to. Enter any symbol to get more help. + +'u' +Here is a list of the punctuation symbols which Python assigns special meaning +to. Enter any symbol to get more help. + +'b' +Here is a list of available topics. Enter any topic name to get more help. + +'u' +Here is a list of available topics. Enter any topic name to get more help. + +'b' +Sorry, topic and keyword documentation is not available because the +module "pydoc_data.topics" could not be found. +'u' +Sorry, topic and keyword documentation is not available because the +module "pydoc_data.topics" could not be found. +'b'no documentation found for %s +'u'no documentation found for %s +'b'Related help topics: 'u'Related help topics: 'b' +%s +'u' +%s +'b'Return unbuffered tuple of (topic, xrefs). + + If an error occurs here, the exception is caught and displayed by + the url handler. + + This function duplicates the showtopic method but returns its + result directly so it can be formatted for display in an html page. + 'u'Return unbuffered tuple of (topic, xrefs). + + If an error occurs here, the exception is caught and displayed by + the url handler. + + This function duplicates the showtopic method but returns its + result directly so it can be formatted for display in an html page. + 'b'could not find topic'u'could not find topic'b' +Here is a list of modules whose name or summary contains '{}'. +If there are any, enter a module name to get more help. + +'u' +Here is a list of modules whose name or summary contains '{}'. +If there are any, enter a module name to get more help. + +'b' +Please wait a moment while I gather a list of all available modules... + +'u' +Please wait a moment while I gather a list of all available modules... + +'b'.__init__'u'.__init__'b' +Enter any module name to get more help. Or, type "modules spam" to search +for modules whose name or summary contain the string "spam". +'u' +Enter any module name to get more help. Or, type "modules spam" to search +for modules whose name or summary contain the string "spam". +'b'An interruptible scanner that searches module synopses.'u'An interruptible scanner that searches module synopses.'b'Print all the one-line module summaries that contain a substring.'u'Print all the one-line module summaries that contain a substring.'b'Start an HTTP server thread on a specific port. + + Start an HTML/text server thread, so HTML or text documents can be + browsed dynamically and interactively with a Web browser. Example use: + + >>> import time + >>> import pydoc + + Define a URL handler. To determine what the client is asking + for, check the URL and content_type. + + Then get or generate some text or HTML code and return it. + + >>> def my_url_handler(url, content_type): + ... text = 'the URL sent was: (%s, %s)' % (url, content_type) + ... return text + + Start server thread on port 0. + If you use port 0, the server will pick a random port number. + You can then use serverthread.port to get the port number. + + >>> port = 0 + >>> serverthread = pydoc._start_server(my_url_handler, port) + + Check that the server is really started. If it is, open browser + and get first page. Use serverthread.url as the starting page. + + >>> if serverthread.serving: + ... import webbrowser + + The next two lines are commented out so a browser doesn't open if + doctest is run on this module. + + #... webbrowser.open(serverthread.url) + #True + + Let the server do its thing. We just need to monitor its status. + Use time.sleep so the loop doesn't hog the CPU. + + >>> starttime = time.monotonic() + >>> timeout = 1 #seconds + + This is a short timeout for testing purposes. + + >>> while serverthread.serving: + ... time.sleep(.01) + ... if serverthread.serving and time.monotonic() - starttime > timeout: + ... serverthread.stop() + ... break + + Print any errors that may have occurred. + + >>> print(serverthread.error) + None + 'u'Start an HTTP server thread on a specific port. + + Start an HTML/text server thread, so HTML or text documents can be + browsed dynamically and interactively with a Web browser. Example use: + + >>> import time + >>> import pydoc + + Define a URL handler. To determine what the client is asking + for, check the URL and content_type. + + Then get or generate some text or HTML code and return it. + + >>> def my_url_handler(url, content_type): + ... text = 'the URL sent was: (%s, %s)' % (url, content_type) + ... return text + + Start server thread on port 0. + If you use port 0, the server will pick a random port number. + You can then use serverthread.port to get the port number. + + >>> port = 0 + >>> serverthread = pydoc._start_server(my_url_handler, port) + + Check that the server is really started. If it is, open browser + and get first page. Use serverthread.url as the starting page. + + >>> if serverthread.serving: + ... import webbrowser + + The next two lines are commented out so a browser doesn't open if + doctest is run on this module. + + #... webbrowser.open(serverthread.url) + #True + + Let the server do its thing. We just need to monitor its status. + Use time.sleep so the loop doesn't hog the CPU. + + >>> starttime = time.monotonic() + >>> timeout = 1 #seconds + + This is a short timeout for testing purposes. + + >>> while serverthread.serving: + ... time.sleep(.01) + ... if serverthread.serving and time.monotonic() - starttime > timeout: + ... serverthread.stop() + ... break + + Print any errors that may have occurred. + + >>> print(serverthread.error) + None + 'b'Process a request from an HTML browser. + + The URL received is in self.path. + Get an HTML page from self.urlhandler and send it. + 'u'Process a request from an HTML browser. + + The URL received is in self.path. + Get an HTML page from self.urlhandler and send it. + 'b'%s; charset=UTF-8'u'%s; charset=UTF-8'b'Start the server.'u'Start the server.'b'http://%s:%d/'u'http://%s:%d/'b'Stop the server and this thread nicely'u'Stop the server and this thread nicely'b'The pydoc url handler for use with the pydoc server. + + If the content_type is 'text/css', the _pydoc.css style + sheet is read and returned if it exits. + + If the content_type is 'text/html', then the result of + get_html_page(url) is returned. + 'u'The pydoc url handler for use with the pydoc server. + + If the content_type is 'text/css', the _pydoc.css style + sheet is read and returned if it exits. + + If the content_type is 'text/html', then the result of + get_html_page(url) is returned. + 'b'pydoc_data/_pydoc.css'u'pydoc_data/_pydoc.css'b''u''b' +Pydoc: %s + +%s%s
%s
+'u' +Pydoc: %s + +%s%s
%s
+'b'%s [%s, %s]'u'%s [%s, %s]'b' +
+ Python %s
%s +
+
+ +
+
+ + +
  +
+ + +
+
+
+ 'u' +
+ Python %s
%s +
+
+ +
+
+ + +
  +
+ + +
+
+
+ 'b'Module Index page.'u'Module Index page.'b'Index of Modules'u'Index of Modules'b'

'u'

'b'Built-in Modules'u'Built-in Modules'b'

pydoc by Ka-Ping Yee<ping@lfw.org>'u'

pydoc by Ka-Ping Yee<ping@lfw.org>'b'Index of Modules'u'Index of Modules'b'Search results page.'u'Search results page.'b'Search Results'u'Search Results'b'key = %s'u'key = %s'b'
'u'
'b'Search Results'u'Search Results'b'Index of topic texts available.'u'Index of topic texts available.'b'%s'u'%s'b'INDEX'u'INDEX'b'Topics'u'Topics'b'Index of keywords.'u'Index of keywords.'b'Keywords'u'Keywords'b'Topic or keyword help page.'u'Topic or keyword help page.'b'KEYWORD'u'KEYWORD'b'TOPIC'u'TOPIC'b'

%s
'u'
%s
'b'could not find object'u'could not find object'b'Error'u'Error'b'#bb0000'u'#bb0000'b'Error - %s'u'Error - %s'b'Generate an HTML page for url.'u'Generate an HTML page for url.'b'search?key'u'search?key'b'topic?key'u'topic?key'b'get?key'u'get?key'b'bad pydoc url'u'bad pydoc url'b'unknown content type %r for url %s'u'unknown content type %r for url %s'b'Start the enhanced pydoc Web server and open a Web browser. + + Use port '0' to start the server on an arbitrary port. + Set open_browser to False to suppress opening a browser. + 'u'Start the enhanced pydoc Web server and open a Web browser. + + Use port '0' to start the server on an arbitrary port. + Set open_browser to False to suppress opening a browser. + 'b'Server commands: [b]rowser, [q]uit'u'Server commands: [b]rowser, [q]uit'b'Server ready at'u'Server ready at'b'server> 'u'server> 'b'Server stopped'u'Server stopped'b'Ensures current directory is on returned path, and argv0 directory is not + + Exception: argv0 dir is left alone if it's also pydoc's directory. + + Returns a new path entry list, or None if no adjustment is needed. + 'u'Ensures current directory is on returned path, and argv0 directory is not + + Exception: argv0 dir is left alone if it's also pydoc's directory. + + Returns a new path entry list, or None if no adjustment is needed. + 'b'Ensures current directory is on sys.path, and __main__ directory is not. + + Exception: __main__ dir is left alone if it's also pydoc's directory. + 'u'Ensures current directory is on sys.path, and __main__ directory is not. + + Exception: __main__ dir is left alone if it's also pydoc's directory. + 'b'Command-line interface (looks at sys.argv to decide what to do).'u'Command-line interface (looks at sys.argv to decide what to do).'b'bk:n:p:w'u'bk:n:p:w'b'file %r does not exist'u'file %r does not exist'b'pydoc - the Python documentation tool + +{cmd} ... + Show text documentation on something. may be the name of a + Python keyword, topic, function, module, or package, or a dotted + reference to a class or function within a module or module in a + package. If contains a '{sep}', it is used as the path to a + Python source file to document. If name is 'keywords', 'topics', + or 'modules', a listing of these things is displayed. + +{cmd} -k + Search for a keyword in the synopsis lines of all available modules. + +{cmd} -n + Start an HTTP server with the given hostname (default: localhost). + +{cmd} -p + Start an HTTP server on the given port on the local machine. Port + number 0 can be used to get an arbitrary unused port. + +{cmd} -b + Start an HTTP server on an arbitrary unused port and open a Web browser + to interactively browse documentation. This option can be used in + combination with -n and/or -p. + +{cmd} -w ... + Write out the HTML documentation for a module to a file in the current + directory. If contains a '{sep}', it is treated as a filename; if + it names a directory, documentation is written for all the contents. +'u'pydoc - the Python documentation tool + +{cmd} ... + Show text documentation on something. may be the name of a + Python keyword, topic, function, module, or package, or a dotted + reference to a class or function within a module or module in a + package. If contains a '{sep}', it is used as the path to a + Python source file to document. If name is 'keywords', 'topics', + or 'modules', a listing of these things is displayed. + +{cmd} -k + Search for a keyword in the synopsis lines of all available modules. + +{cmd} -n + Start an HTTP server with the given hostname (default: localhost). + +{cmd} -p + Start an HTTP server on the given port on the local machine. Port + number 0 can be used to get an arbitrary unused port. + +{cmd} -b + Start an HTTP server on an arbitrary unused port and open a Web browser + to interactively browse documentation. This option can be used in + combination with -n and/or -p. + +{cmd} -w ... + Write out the HTML documentation for a module to a file in the current + directory. If contains a '{sep}', it is treated as a filename; if + it names a directory, documentation is written for all the contents. +'u'pydoc'u'expat_2.4.7'EXPAT_VERSIONErrorStringu'ExpatError.__weakref__'xml.parsers.expat.ExpatErrorExpatErroru'xmlparser.AttlistDeclHandler'AttlistDeclHandleru'xmlparser.CharacterDataHandler'u'xmlparser.CommentHandler'u'xmlparser.CurrentByteIndex'CurrentByteIndexu'xmlparser.CurrentColumnNumber'CurrentColumnNumberu'xmlparser.CurrentLineNumber'u'xmlparser.DefaultHandler'DefaultHandleru'xmlparser.DefaultHandlerExpand'u'xmlparser.ElementDeclHandler'ElementDeclHandleru'xmlparser.EndCdataSectionHandler'EndCdataSectionHandleru'xmlparser.EndDoctypeDeclHandler'EndDoctypeDeclHandleru'xmlparser.EndElementHandler'u'xmlparser.EndNamespaceDeclHandler'u'xmlparser.EntityDeclHandler'u'xmlparser.ErrorByteIndex'ErrorByteIndexu'xmlparser.ErrorCode'ErrorCodeu'xmlparser.ErrorColumnNumber'u'xmlparser.ErrorLineNumber'ExternalEntityParserCreateu'xmlparser.ExternalEntityRefHandler'ExternalEntityRefHandlerGetBaseGetInputContextu'xmlparser.NotStandaloneHandler'NotStandaloneHandleru'xmlparser.NotationDeclHandler'NotationDeclHandleru'xmlparser.ProcessingInstructionHandler'SetBaseSetParamEntityParsingu'xmlparser.SkippedEntityHandler'SkippedEntityHandleru'xmlparser.StartCdataSectionHandler'StartCdataSectionHandleru'xmlparser.StartDoctypeDeclHandler'StartDoctypeDeclHandleru'xmlparser.StartElementHandler'u'xmlparser.StartNamespaceDeclHandler'u'xmlparser.UnparsedEntityDeclHandler'UnparsedEntityDeclHandlerUseForeignDTDu'xmlparser.XmlDeclHandler'XmlDeclHandleru'XML parser'u'xmlparser.buffer_size'u'xmlparser.buffer_text'u'xmlparser.buffer_used'buffer_usedu'xmlparser.namespace_prefixes'namespace_prefixesu'xmlparser.ordered_attributes'u'xmlparser.specified_attributes'pyexpat.xmlparserXMLParserTypeXML_PARAM_ENTITY_PARSING_ALWAYSXML_PARAM_ENTITY_PARSING_NEVERXML_PARAM_ENTITY_PARSING_UNLESS_STANDALONEu'Python wrapper for Expat parser.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/pyexpat.cpython-38-darwin.so'u'pyexpat'u'parsing aborted'XML_ERROR_ABORTEDu'asynchronous entity'XML_ERROR_ASYNC_ENTITYu'reference to external entity in attribute'XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REFu'reference to invalid character number'XML_ERROR_BAD_CHAR_REFu'reference to binary entity'XML_ERROR_BINARY_ENTITY_REFu'cannot change setting once parsing has begun'XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSINGu'duplicate attribute'XML_ERROR_DUPLICATE_ATTRIBUTEu'entity declared in parameter entity'XML_ERROR_ENTITY_DECLARED_IN_PEu'error in processing external entity reference'XML_ERROR_EXTERNAL_ENTITY_HANDLINGu'requested feature requires XML_DTD support in Expat'XML_ERROR_FEATURE_REQUIRES_XML_DTDu'parsing finished'XML_ERROR_FINISHEDu'incomplete markup in parameter entity'XML_ERROR_INCOMPLETE_PEu'encoding specified in XML declaration is incorrect'XML_ERROR_INCORRECT_ENCODINGu'not well-formed (invalid token)'XML_ERROR_INVALID_TOKENu'junk after document element'XML_ERROR_JUNK_AFTER_DOC_ELEMENTu'XML or text declaration not at start of entity'XML_ERROR_MISPLACED_XML_PIu'document is not standalone'XML_ERROR_NOT_STANDALONEu'parser not suspended'XML_ERROR_NOT_SUSPENDEDu'no element found'XML_ERROR_NO_ELEMENTSu'out of memory'XML_ERROR_NO_MEMORYu'illegal parameter entity reference'XML_ERROR_PARAM_ENTITY_REFu'partial character'XML_ERROR_PARTIAL_CHARu'illegal character(s) in public id'XML_ERROR_PUBLICIDu'recursive entity reference'XML_ERROR_RECURSIVE_ENTITY_REFu'parser suspended'XML_ERROR_SUSPENDEDu'cannot suspend in external parameter entity'XML_ERROR_SUSPEND_PEu'syntax error'XML_ERROR_SYNTAXu'mismatched tag'XML_ERROR_TAG_MISMATCHu'text declaration not well-formed'XML_ERROR_TEXT_DECLu'unbound prefix'XML_ERROR_UNBOUND_PREFIXu'unclosed CDATA section'XML_ERROR_UNCLOSED_CDATA_SECTIONu'unclosed token'XML_ERROR_UNCLOSED_TOKENu'must not undeclare prefix'XML_ERROR_UNDECLARING_PREFIXu'undefined entity'XML_ERROR_UNDEFINED_ENTITYu'unexpected parser state - please send a bug report'XML_ERROR_UNEXPECTED_STATEu'unknown encoding'XML_ERROR_UNKNOWN_ENCODINGu'XML declaration not well-formed'XML_ERROR_XML_DECLu'Constants used to describe error conditions.'u'pyexpat.errors'pyexpat.errorsexpat_CAPIu'sizeof(XML_Char)'u'sizeof(XML_LChar)'u'XML_DTD'u'XML_CONTEXT_BYTES'u'XML_NS'u'XML_BLAP_MAX_AMP'u'XML_BLAP_ACT_THRES'featuresXML_CQUANT_NONEXML_CQUANT_OPTXML_CQUANT_PLUSXML_CQUANT_REPXML_CTYPE_ANYXML_CTYPE_CHOICEXML_CTYPE_EMPTYXML_CTYPE_MIXEDXML_CTYPE_NAMEXML_CTYPE_SEQu'Constants used to interpret content model information.'u'pyexpat.model'pyexpat.modelnative_encodingExport the Python grammar and symbols._GRAMMAR_FILEPatternGrammar.txt_PATTERN_GRAMMAR_FILEInitializer. + + Creates an attribute for each grammar symbol (nonterminal), + whose value is the symbol's type (an int >= 256). + lib2to3python_grammar_no_print_statementpython_grammar_no_print_and_exec_statement# The grammar fileb'Export the Python grammar and symbols.'u'Export the Python grammar and symbols.'b'PatternGrammar.txt'u'PatternGrammar.txt'b'Initializer. + + Creates an attribute for each grammar symbol (nonterminal), + whose value is the symbol's type (an int >= 256). + 'u'Initializer. + + Creates an attribute for each grammar symbol (nonterminal), + whose value is the symbol's type (an int >= 256). + 'b'lib2to3'b'print'u'print'u'lib2to3.pygram'u'pygram' +Python parse tree definitions. + +This is a very concrete parse tree; we need to keep every token and +even the comments and whitespace between tokens. + +There's also a pattern matching implementation here. +0x7FFFFFFFBase + Abstract base class for Node and Leaf. + + This provides some default functionality and boilerplate using the + template pattern. + + A node may be a subnode of at most one parent. + was_changedConstructor that prevents Base from being instantiated.Cannot instantiate Base + Compare two nodes for equality. + + This calls the method _eq(). + + Compare two nodes for equality. + + This is called by __eq__ and __ne__. It is only called if the two nodes + have the same type. This must be implemented by the concrete subclass. + Nodes should be considered equal if they have the same structure, + ignoring the prefix string and other context information. + + Return a cloned (deep) copy of self. + + This must be implemented by the concrete subclass. + post_order + Return a post-order iterator for the tree. + + This must be implemented by the concrete subclass. + pre_order + Return a pre-order iterator for the tree. + + This must be implemented by the concrete subclass. + Replace this node with a new one in the parent.l_childrenget_linenoReturn the line number which generated the invocant node. + Remove the node from the tree. Returns the position of the node in its + parent's children before it was removed. + next_sibling + The node immediately following the invocant in their parent's children + list. If the invocant does not have a next sibling, it is None + + The node immediately preceding the invocant in their parent's children + list. If the invocant does not have a previous sibling, it is None. + get_suffix + Return the string immediately following the invocant node. This is + effectively equivalent to node.next_sibling.prefix + next_sibConcrete implementation for interior nodes.fixers_applied + Initializer. + + Takes a type constant (a symbol number >= 256), a sequence of + child nodes, and an optional context keyword argument. + + As a side effect, the parent pointers of the children are updated. + Return a canonical string representation.%s(%s, %r)__unicode__ + Return a pretty string representation. + + This reproduces the input source exactly. + Compare two nodes for equality.Return a cloned (deep) copy of self.Return a post-order iterator for the tree.Return a pre-order iterator for the tree. + The whitespace and comments preceding this node in the input. + set_child + Equivalent to 'node.children[i] = child'. This method also sets the + child's parent attribute appropriately. + + Equivalent to 'node.children.insert(i, child)'. This method also sets + the child's parent attribute appropriately. + append_child + Equivalent to 'node.children.append(child)'. This method also sets the + child's parent attribute appropriately. + Concrete implementation for leaf nodes. + Initializer. + + Takes a type constant (a token number < 256), a string value, and an + optional context keyword argument. + %s(%r, %r) + The whitespace and comments preceding this token in the input. + grraw_node + Convert raw node information to a Node or Leaf instance. + + This is passed to the parser driver which calls it whenever a reduction of a + grammar rule produces a new complete node, so that the tree is build + strictly bottom-up. + BasePattern + A pattern is a tree matching pattern. + + It looks for a specific node type (token or symbol), and + optionally for a specific content. + + This is an abstract base class. There are three concrete + subclasses: + + - LeafPattern matches a single leaf node; + - NodePattern matches a single node (usually non-leaf); + - WildcardPattern matches a sequence of nodes of variable length. + Constructor that prevents BasePattern from being instantiated.Cannot instantiate BasePattern + A subclass can define this as a hook for optimizations. + + Returns either self or another node with the same effect. + + Does this pattern exactly match a node? + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + Default implementation for non-wildcard patterns. + _submatchmatch_seq + Does this pattern exactly match a sequence of nodes? + + Default implementation for non-wildcard patterns. + generate_matches + Generator yielding all matches for this pattern. + + Default implementation for non-wildcard patterns. + + Initializer. Takes optional type, content, and name. + + The type, if given must be a token type (< 256). If not given, + this matches any *leaf* node; the content may still be required. + + The content, if given, must be a string. + + If a name is given, the matching node is stored in the results + dict under that key. + Override match() to insist on a leaf node. + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + wildcards + Initializer. Takes optional type, content, and name. + + The type, if given, must be a symbol type (>= 256). If the + type is None this matches *any* single node (leaf or not), + except if content is not None, in which it only matches + non-leaf nodes that also match the content pattern. + + The content, if not None, must be a sequence of Patterns that + must match the node's children exactly. If the content is + given, the type must not be None. + + If a name is given, the matching node is stored in the results + dict under that key. + + A wildcard pattern can match zero or more nodes. + + This has all the flexibility needed to implement patterns like: + + .* .+ .? .{m,n} + (a b c | d e | f) + (...)* (...)+ (...)? (...){m,n} + + except it always uses non-greedy matching. + + Initializer. + + Args: + content: optional sequence of subsequences of patterns; + if absent, matches one node; + if present, each subsequence is an alternative [*] + min: optional minimum number of times to match, default 0 + max: optional maximum number of times to match, default HUGE + name: optional name assigned to this match + + [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is + equivalent to (a b c | d e | f g h); if content is None, + this is equivalent to '.' in regular expression terms. + The min and max parameters work as follows: + min=0, max=maxint: .* + min=1, max=maxint: .+ + min=0, max=1: .? + min=1, max=1: . + If content is not None, replace the dot with the parenthesized + list of alternatives, e.g. (a b c | d e | f g h)* + Optimize certain stacked wildcard patterns.Does this pattern exactly match a node?Does this pattern exactly match a sequence of nodes? + Generator yielding matches for a sequence of nodes. + + Args: + nodes: sequence of nodes + + Yields: + (count, results) tuples where: + count: the match comprises nodes[:count]; + results: dict containing named submatches. + bare_name_bare_name_matchessave_stderr_recursive_matches_iterative_matchesHelper to iteratively yield the matches.nodelennew_resultsc0r0r1Special optimized matcher for bare_name.Helper to recursively yield the matches. + Initializer. + + The argument is either a pattern or None. If it is None, this + only matches an empty sequence (effectively '$' in regex + lingo). If it is not None, this matches whenever the argument + pattern doesn't have any matches. + + Generator yielding matches for a sequence of patterns and nodes. + + Args: + patterns: a sequence of patterns + nodes: a sequence of nodes + + Yields: + (count, results) tuples where: + count: the entire sequence of patterns matches nodes[:count]; + results: dict containing named submatches. + # maximum repeat count, default max# Default values for instance variables# int: token number (< 256) or symbol number (>= 256)# Parent node pointer, or None# Tuple of subnodes# Can't use index(); we need to test by identity# Whitespace and comments preceding this token in the input# Line where this token starts in the input# Column where this token tarts in the input# If there's exactly one child, return that child instead of# creating a new node.# Defaults for instance variables# Node type (token if < 256, symbol if >= 256)# Optional content matching pattern# Optional name used to store match in results dict# Protect against alterations# Check sanity of alternatives# Can't have zero alternatives# Can have empty alternatives# Shortcut for special case (see __init__.__doc__)# The reason for this is that hitting the recursion limit usually# results in some ugly messages about how RuntimeErrors are being# ignored. We only have to do this on CPython, though, because other# implementations don't have this nasty bug in the first place.# We fall back to the iterative pattern matching scheme if the recursive# scheme hits the recursion limit.# generate matches that use just one alt from self.content# for each match, iterate down the nodes# stop if the entire set of nodes has been matched# We never match a node in its entirety# We only match an empty sequence of nodes in its entirety# Return a match if there is an empty sequence# Return a match if the argument pattern has no matchesb' +Python parse tree definitions. + +This is a very concrete parse tree; we need to keep every token and +even the comments and whitespace between tokens. + +There's also a pattern matching implementation here. +'u' +Python parse tree definitions. + +This is a very concrete parse tree; we need to keep every token and +even the comments and whitespace between tokens. + +There's also a pattern matching implementation here. +'b' + Abstract base class for Node and Leaf. + + This provides some default functionality and boilerplate using the + template pattern. + + A node may be a subnode of at most one parent. + 'u' + Abstract base class for Node and Leaf. + + This provides some default functionality and boilerplate using the + template pattern. + + A node may be a subnode of at most one parent. + 'b'Constructor that prevents Base from being instantiated.'u'Constructor that prevents Base from being instantiated.'b'Cannot instantiate Base'u'Cannot instantiate Base'b' + Compare two nodes for equality. + + This calls the method _eq(). + 'u' + Compare two nodes for equality. + + This calls the method _eq(). + 'b' + Compare two nodes for equality. + + This is called by __eq__ and __ne__. It is only called if the two nodes + have the same type. This must be implemented by the concrete subclass. + Nodes should be considered equal if they have the same structure, + ignoring the prefix string and other context information. + 'u' + Compare two nodes for equality. + + This is called by __eq__ and __ne__. It is only called if the two nodes + have the same type. This must be implemented by the concrete subclass. + Nodes should be considered equal if they have the same structure, + ignoring the prefix string and other context information. + 'b' + Return a cloned (deep) copy of self. + + This must be implemented by the concrete subclass. + 'u' + Return a cloned (deep) copy of self. + + This must be implemented by the concrete subclass. + 'b' + Return a post-order iterator for the tree. + + This must be implemented by the concrete subclass. + 'u' + Return a post-order iterator for the tree. + + This must be implemented by the concrete subclass. + 'b' + Return a pre-order iterator for the tree. + + This must be implemented by the concrete subclass. + 'u' + Return a pre-order iterator for the tree. + + This must be implemented by the concrete subclass. + 'b'Replace this node with a new one in the parent.'u'Replace this node with a new one in the parent.'b'Return the line number which generated the invocant node.'u'Return the line number which generated the invocant node.'b' + Remove the node from the tree. Returns the position of the node in its + parent's children before it was removed. + 'u' + Remove the node from the tree. Returns the position of the node in its + parent's children before it was removed. + 'b' + The node immediately following the invocant in their parent's children + list. If the invocant does not have a next sibling, it is None + 'u' + The node immediately following the invocant in their parent's children + list. If the invocant does not have a next sibling, it is None + 'b' + The node immediately preceding the invocant in their parent's children + list. If the invocant does not have a previous sibling, it is None. + 'u' + The node immediately preceding the invocant in their parent's children + list. If the invocant does not have a previous sibling, it is None. + 'b' + Return the string immediately following the invocant node. This is + effectively equivalent to node.next_sibling.prefix + 'u' + Return the string immediately following the invocant node. This is + effectively equivalent to node.next_sibling.prefix + 'b'Concrete implementation for interior nodes.'u'Concrete implementation for interior nodes.'b' + Initializer. + + Takes a type constant (a symbol number >= 256), a sequence of + child nodes, and an optional context keyword argument. + + As a side effect, the parent pointers of the children are updated. + 'u' + Initializer. + + Takes a type constant (a symbol number >= 256), a sequence of + child nodes, and an optional context keyword argument. + + As a side effect, the parent pointers of the children are updated. + 'b'Return a canonical string representation.'u'Return a canonical string representation.'b'%s(%s, %r)'u'%s(%s, %r)'b' + Return a pretty string representation. + + This reproduces the input source exactly. + 'u' + Return a pretty string representation. + + This reproduces the input source exactly. + 'b'Compare two nodes for equality.'u'Compare two nodes for equality.'b'Return a cloned (deep) copy of self.'u'Return a cloned (deep) copy of self.'b'Return a post-order iterator for the tree.'u'Return a post-order iterator for the tree.'b'Return a pre-order iterator for the tree.'u'Return a pre-order iterator for the tree.'b' + The whitespace and comments preceding this node in the input. + 'u' + The whitespace and comments preceding this node in the input. + 'b' + Equivalent to 'node.children[i] = child'. This method also sets the + child's parent attribute appropriately. + 'u' + Equivalent to 'node.children[i] = child'. This method also sets the + child's parent attribute appropriately. + 'b' + Equivalent to 'node.children.insert(i, child)'. This method also sets + the child's parent attribute appropriately. + 'u' + Equivalent to 'node.children.insert(i, child)'. This method also sets + the child's parent attribute appropriately. + 'b' + Equivalent to 'node.children.append(child)'. This method also sets the + child's parent attribute appropriately. + 'u' + Equivalent to 'node.children.append(child)'. This method also sets the + child's parent attribute appropriately. + 'b'Concrete implementation for leaf nodes.'u'Concrete implementation for leaf nodes.'b' + Initializer. + + Takes a type constant (a token number < 256), a string value, and an + optional context keyword argument. + 'u' + Initializer. + + Takes a type constant (a token number < 256), a string value, and an + optional context keyword argument. + 'b'%s(%r, %r)'u'%s(%r, %r)'b' + The whitespace and comments preceding this token in the input. + 'u' + The whitespace and comments preceding this token in the input. + 'b' + Convert raw node information to a Node or Leaf instance. + + This is passed to the parser driver which calls it whenever a reduction of a + grammar rule produces a new complete node, so that the tree is build + strictly bottom-up. + 'u' + Convert raw node information to a Node or Leaf instance. + + This is passed to the parser driver which calls it whenever a reduction of a + grammar rule produces a new complete node, so that the tree is build + strictly bottom-up. + 'b' + A pattern is a tree matching pattern. + + It looks for a specific node type (token or symbol), and + optionally for a specific content. + + This is an abstract base class. There are three concrete + subclasses: + + - LeafPattern matches a single leaf node; + - NodePattern matches a single node (usually non-leaf); + - WildcardPattern matches a sequence of nodes of variable length. + 'u' + A pattern is a tree matching pattern. + + It looks for a specific node type (token or symbol), and + optionally for a specific content. + + This is an abstract base class. There are three concrete + subclasses: + + - LeafPattern matches a single leaf node; + - NodePattern matches a single node (usually non-leaf); + - WildcardPattern matches a sequence of nodes of variable length. + 'b'Constructor that prevents BasePattern from being instantiated.'u'Constructor that prevents BasePattern from being instantiated.'b'Cannot instantiate BasePattern'u'Cannot instantiate BasePattern'b' + A subclass can define this as a hook for optimizations. + + Returns either self or another node with the same effect. + 'u' + A subclass can define this as a hook for optimizations. + + Returns either self or another node with the same effect. + 'b' + Does this pattern exactly match a node? + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + Default implementation for non-wildcard patterns. + 'u' + Does this pattern exactly match a node? + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + Default implementation for non-wildcard patterns. + 'b' + Does this pattern exactly match a sequence of nodes? + + Default implementation for non-wildcard patterns. + 'u' + Does this pattern exactly match a sequence of nodes? + + Default implementation for non-wildcard patterns. + 'b' + Generator yielding all matches for this pattern. + + Default implementation for non-wildcard patterns. + 'u' + Generator yielding all matches for this pattern. + + Default implementation for non-wildcard patterns. + 'b' + Initializer. Takes optional type, content, and name. + + The type, if given must be a token type (< 256). If not given, + this matches any *leaf* node; the content may still be required. + + The content, if given, must be a string. + + If a name is given, the matching node is stored in the results + dict under that key. + 'u' + Initializer. Takes optional type, content, and name. + + The type, if given must be a token type (< 256). If not given, + this matches any *leaf* node; the content may still be required. + + The content, if given, must be a string. + + If a name is given, the matching node is stored in the results + dict under that key. + 'b'Override match() to insist on a leaf node.'u'Override match() to insist on a leaf node.'b' + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + 'u' + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + 'b' + Initializer. Takes optional type, content, and name. + + The type, if given, must be a symbol type (>= 256). If the + type is None this matches *any* single node (leaf or not), + except if content is not None, in which it only matches + non-leaf nodes that also match the content pattern. + + The content, if not None, must be a sequence of Patterns that + must match the node's children exactly. If the content is + given, the type must not be None. + + If a name is given, the matching node is stored in the results + dict under that key. + 'u' + Initializer. Takes optional type, content, and name. + + The type, if given, must be a symbol type (>= 256). If the + type is None this matches *any* single node (leaf or not), + except if content is not None, in which it only matches + non-leaf nodes that also match the content pattern. + + The content, if not None, must be a sequence of Patterns that + must match the node's children exactly. If the content is + given, the type must not be None. + + If a name is given, the matching node is stored in the results + dict under that key. + 'b' + A wildcard pattern can match zero or more nodes. + + This has all the flexibility needed to implement patterns like: + + .* .+ .? .{m,n} + (a b c | d e | f) + (...)* (...)+ (...)? (...){m,n} + + except it always uses non-greedy matching. + 'u' + A wildcard pattern can match zero or more nodes. + + This has all the flexibility needed to implement patterns like: + + .* .+ .? .{m,n} + (a b c | d e | f) + (...)* (...)+ (...)? (...){m,n} + + except it always uses non-greedy matching. + 'b' + Initializer. + + Args: + content: optional sequence of subsequences of patterns; + if absent, matches one node; + if present, each subsequence is an alternative [*] + min: optional minimum number of times to match, default 0 + max: optional maximum number of times to match, default HUGE + name: optional name assigned to this match + + [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is + equivalent to (a b c | d e | f g h); if content is None, + this is equivalent to '.' in regular expression terms. + The min and max parameters work as follows: + min=0, max=maxint: .* + min=1, max=maxint: .+ + min=0, max=1: .? + min=1, max=1: . + If content is not None, replace the dot with the parenthesized + list of alternatives, e.g. (a b c | d e | f g h)* + 'u' + Initializer. + + Args: + content: optional sequence of subsequences of patterns; + if absent, matches one node; + if present, each subsequence is an alternative [*] + min: optional minimum number of times to match, default 0 + max: optional maximum number of times to match, default HUGE + name: optional name assigned to this match + + [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is + equivalent to (a b c | d e | f g h); if content is None, + this is equivalent to '.' in regular expression terms. + The min and max parameters work as follows: + min=0, max=maxint: .* + min=1, max=maxint: .+ + min=0, max=1: .? + min=1, max=1: . + If content is not None, replace the dot with the parenthesized + list of alternatives, e.g. (a b c | d e | f g h)* + 'b'Optimize certain stacked wildcard patterns.'u'Optimize certain stacked wildcard patterns.'b'Does this pattern exactly match a node?'u'Does this pattern exactly match a node?'b'Does this pattern exactly match a sequence of nodes?'u'Does this pattern exactly match a sequence of nodes?'b' + Generator yielding matches for a sequence of nodes. + + Args: + nodes: sequence of nodes + + Yields: + (count, results) tuples where: + count: the match comprises nodes[:count]; + results: dict containing named submatches. + 'u' + Generator yielding matches for a sequence of nodes. + + Args: + nodes: sequence of nodes + + Yields: + (count, results) tuples where: + count: the match comprises nodes[:count]; + results: dict containing named submatches. + 'b'bare_name'u'bare_name'b'getrefcount'u'getrefcount'b'Helper to iteratively yield the matches.'u'Helper to iteratively yield the matches.'b'Special optimized matcher for bare_name.'u'Special optimized matcher for bare_name.'b'Helper to recursively yield the matches.'u'Helper to recursively yield the matches.'b' + Initializer. + + The argument is either a pattern or None. If it is None, this + only matches an empty sequence (effectively '$' in regex + lingo). If it is not None, this matches whenever the argument + pattern doesn't have any matches. + 'u' + Initializer. + + The argument is either a pattern or None. If it is None, this + only matches an empty sequence (effectively '$' in regex + lingo). If it is not None, this matches whenever the argument + pattern doesn't have any matches. + 'b' + Generator yielding matches for a sequence of patterns and nodes. + + Args: + patterns: a sequence of patterns + nodes: a sequence of nodes + + Yields: + (count, results) tuples where: + count: the entire sequence of patterns matches nodes[:count]; + results: dict containing named submatches. + 'u' + Generator yielding matches for a sequence of patterns and nodes. + + Args: + patterns: a sequence of patterns + nodes: a sequence of nodes + + Yields: + (count, results) tuples where: + count: the entire sequence of patterns matches nodes[:count]; + results: dict containing named submatches. + 'u'lib2to3.pytree'u'pytree'A multi-producer, multi-consumer queue.PriorityQueueLifoQueueException raised by Queue.get(block=0)/get_nowait().Exception raised by Queue.put(block=0)/put_nowait().Create a queue object with a given maximum size. + + If maxsize is <= 0, the queue size is infinite. + _initmutexnot_emptynot_fullall_tasks_doneunfinished_tasksIndicate that a formerly enqueued task is complete. + + Used by Queue consumer threads. For each get() used to fetch a task, + a subsequent call to task_done() tells the queue that the processing + on the task is complete. + + If a join() is currently blocking, it will resume when all items + have been processed (meaning that a task_done() call was received + for every item that had been put() into the queue). + + Raises a ValueError if called more times than there were items + placed in the queue. + unfinishedtask_done() called too many timesBlocks until all items in the Queue have been gotten and processed. + + The count of unfinished tasks goes up whenever an item is added to the + queue. The count goes down whenever a consumer thread calls task_done() + to indicate the item was retrieved and all work on it is complete. + + When the count of unfinished tasks drops to zero, join() unblocks. + Return the approximate size of the queue (not reliable!)._qsizeReturn True if the queue is empty, False otherwise (not reliable!). + + This method is likely to be removed at some point. Use qsize() == 0 + as a direct substitute, but be aware that either approach risks a race + condition where a queue can grow before the result of empty() or + qsize() can be used. + + To create code that needs to wait for all queued tasks to be + completed, the preferred technique is to use the join() method. + Return True if the queue is full, False otherwise (not reliable!). + + This method is likely to be removed at some point. Use qsize() >= n + as a direct substitute, but be aware that either approach risks a race + condition where a queue can shrink before the result of full() or + qsize() can be used. + Put an item into the queue. + + If optional args 'block' is true and 'timeout' is None (the default), + block if necessary until a free slot is available. If 'timeout' is + a non-negative number, it blocks at most 'timeout' seconds and raises + the Full exception if no free slot was available within that time. + Otherwise ('block' is false), put an item on the queue if a free slot + is immediately available, else raise the Full exception ('timeout' + is ignored in that case). + 'timeout' must be a non-negative number_putRemove and return an item from the queue. + + If optional args 'block' is true and 'timeout' is None (the default), + block if necessary until an item is available. If 'timeout' is + a non-negative number, it blocks at most 'timeout' seconds and raises + the Empty exception if no item was available within that time. + Otherwise ('block' is false), return an item if one is immediately + available, else raise the Empty exception ('timeout' is ignored + in that case). + _getPut an item into the queue without blocking. + + Only enqueue the item if a free slot is immediately available. + Otherwise raise the Full exception. + Remove and return an item from the queue without blocking. + + Only get an item if one is immediately available. Otherwise + raise the Empty exception. + Variant of Queue that retrieves open entries in priority order (lowest first). + + Entries are typically tuples of the form: (priority number, data). + Variant of Queue that retrieves most recently added entries first._PySimpleQueueSimple, unbounded FIFO queue. + + This pure Python implementation is not reentrant. + Put the item on the queue. + + The optional 'block' and 'timeout' arguments are ignored, as this method + never blocks. They are provided for compatibility with the Queue class. + Put an item into the queue without blocking. + + This is exactly equivalent to `put(item)` and is only provided + for compatibility with the Queue class. + Return True if the queue is empty, False otherwise (not reliable!).# mutex must be held whenever the queue is mutating. All methods# that acquire mutex must release it before returning. mutex# is shared between the three conditions, so acquiring and# releasing the conditions also acquires and releases mutex.# Notify not_empty whenever an item is added to the queue; a# thread waiting to get is notified then.# Notify not_full whenever an item is removed from the queue;# a thread waiting to put is notified then.# Notify all_tasks_done whenever the number of unfinished tasks# drops to zero; thread waiting to join() is notified to resume# Override these methods to implement other queue organizations# (e.g. stack or priority queue).# These will only be called with appropriate locks held# Initialize the queue representation# Put a new item in the queue# Get an item from the queue# Note: while this pure Python version provides fairness# (by using a threading.Semaphore which is itself fair, being based# on threading.Condition), fairness is not part of the API contract.# This allows the C version to use a different implementation.b'A multi-producer, multi-consumer queue.'u'A multi-producer, multi-consumer queue.'b'Empty'u'Empty'b'Full'u'Full'b'PriorityQueue'u'PriorityQueue'b'LifoQueue'u'LifoQueue'b'SimpleQueue'u'SimpleQueue'b'Exception raised by Queue.get(block=0)/get_nowait().'b'Exception raised by Queue.put(block=0)/put_nowait().'u'Exception raised by Queue.put(block=0)/put_nowait().'b'Create a queue object with a given maximum size. + + If maxsize is <= 0, the queue size is infinite. + 'u'Create a queue object with a given maximum size. + + If maxsize is <= 0, the queue size is infinite. + 'b'Indicate that a formerly enqueued task is complete. + + Used by Queue consumer threads. For each get() used to fetch a task, + a subsequent call to task_done() tells the queue that the processing + on the task is complete. + + If a join() is currently blocking, it will resume when all items + have been processed (meaning that a task_done() call was received + for every item that had been put() into the queue). + + Raises a ValueError if called more times than there were items + placed in the queue. + 'u'Indicate that a formerly enqueued task is complete. + + Used by Queue consumer threads. For each get() used to fetch a task, + a subsequent call to task_done() tells the queue that the processing + on the task is complete. + + If a join() is currently blocking, it will resume when all items + have been processed (meaning that a task_done() call was received + for every item that had been put() into the queue). + + Raises a ValueError if called more times than there were items + placed in the queue. + 'b'task_done() called too many times'u'task_done() called too many times'b'Blocks until all items in the Queue have been gotten and processed. + + The count of unfinished tasks goes up whenever an item is added to the + queue. The count goes down whenever a consumer thread calls task_done() + to indicate the item was retrieved and all work on it is complete. + + When the count of unfinished tasks drops to zero, join() unblocks. + 'u'Blocks until all items in the Queue have been gotten and processed. + + The count of unfinished tasks goes up whenever an item is added to the + queue. The count goes down whenever a consumer thread calls task_done() + to indicate the item was retrieved and all work on it is complete. + + When the count of unfinished tasks drops to zero, join() unblocks. + 'b'Return the approximate size of the queue (not reliable!).'u'Return the approximate size of the queue (not reliable!).'b'Return True if the queue is empty, False otherwise (not reliable!). + + This method is likely to be removed at some point. Use qsize() == 0 + as a direct substitute, but be aware that either approach risks a race + condition where a queue can grow before the result of empty() or + qsize() can be used. + + To create code that needs to wait for all queued tasks to be + completed, the preferred technique is to use the join() method. + 'u'Return True if the queue is empty, False otherwise (not reliable!). + + This method is likely to be removed at some point. Use qsize() == 0 + as a direct substitute, but be aware that either approach risks a race + condition where a queue can grow before the result of empty() or + qsize() can be used. + + To create code that needs to wait for all queued tasks to be + completed, the preferred technique is to use the join() method. + 'b'Return True if the queue is full, False otherwise (not reliable!). + + This method is likely to be removed at some point. Use qsize() >= n + as a direct substitute, but be aware that either approach risks a race + condition where a queue can shrink before the result of full() or + qsize() can be used. + 'u'Return True if the queue is full, False otherwise (not reliable!). + + This method is likely to be removed at some point. Use qsize() >= n + as a direct substitute, but be aware that either approach risks a race + condition where a queue can shrink before the result of full() or + qsize() can be used. + 'b'Put an item into the queue. + + If optional args 'block' is true and 'timeout' is None (the default), + block if necessary until a free slot is available. If 'timeout' is + a non-negative number, it blocks at most 'timeout' seconds and raises + the Full exception if no free slot was available within that time. + Otherwise ('block' is false), put an item on the queue if a free slot + is immediately available, else raise the Full exception ('timeout' + is ignored in that case). + 'u'Put an item into the queue. + + If optional args 'block' is true and 'timeout' is None (the default), + block if necessary until a free slot is available. If 'timeout' is + a non-negative number, it blocks at most 'timeout' seconds and raises + the Full exception if no free slot was available within that time. + Otherwise ('block' is false), put an item on the queue if a free slot + is immediately available, else raise the Full exception ('timeout' + is ignored in that case). + 'b''timeout' must be a non-negative number'u''timeout' must be a non-negative number'b'Remove and return an item from the queue. + + If optional args 'block' is true and 'timeout' is None (the default), + block if necessary until an item is available. If 'timeout' is + a non-negative number, it blocks at most 'timeout' seconds and raises + the Empty exception if no item was available within that time. + Otherwise ('block' is false), return an item if one is immediately + available, else raise the Empty exception ('timeout' is ignored + in that case). + 'u'Remove and return an item from the queue. + + If optional args 'block' is true and 'timeout' is None (the default), + block if necessary until an item is available. If 'timeout' is + a non-negative number, it blocks at most 'timeout' seconds and raises + the Empty exception if no item was available within that time. + Otherwise ('block' is false), return an item if one is immediately + available, else raise the Empty exception ('timeout' is ignored + in that case). + 'b'Put an item into the queue without blocking. + + Only enqueue the item if a free slot is immediately available. + Otherwise raise the Full exception. + 'u'Put an item into the queue without blocking. + + Only enqueue the item if a free slot is immediately available. + Otherwise raise the Full exception. + 'b'Remove and return an item from the queue without blocking. + + Only get an item if one is immediately available. Otherwise + raise the Empty exception. + 'u'Remove and return an item from the queue without blocking. + + Only get an item if one is immediately available. Otherwise + raise the Empty exception. + 'b'Variant of Queue that retrieves open entries in priority order (lowest first). + + Entries are typically tuples of the form: (priority number, data). + 'u'Variant of Queue that retrieves open entries in priority order (lowest first). + + Entries are typically tuples of the form: (priority number, data). + 'b'Variant of Queue that retrieves most recently added entries first.'u'Variant of Queue that retrieves most recently added entries first.'b'Simple, unbounded FIFO queue. + + This pure Python implementation is not reentrant. + 'u'Simple, unbounded FIFO queue. + + This pure Python implementation is not reentrant. + 'b'Put the item on the queue. + + The optional 'block' and 'timeout' arguments are ignored, as this method + never blocks. They are provided for compatibility with the Queue class. + 'u'Put the item on the queue. + + The optional 'block' and 'timeout' arguments are ignored, as this method + never blocks. They are provided for compatibility with the Queue class. + 'b'Put an item into the queue without blocking. + + This is exactly equivalent to `put(item)` and is only provided + for compatibility with the Queue class. + 'u'Put an item into the queue without blocking. + + This is exactly equivalent to `put(item)` and is only provided + for compatibility with the Queue class. + 'b'Return True if the queue is empty, False otherwise (not reliable!).'u'Return True if the queue is empty, False otherwise (not reliable!).'register_after_forkis_exiting_maxsize_rlock_opid_wlock_semQueue._after_fork()_notempty_jointhread_joincancelledQueue _start_thread_semlockQueue.join_thread()Queue {0!r} not closedcancel_join_threadQueue.cancel_join_thread()Queue._start_thread()_feedQueueFeederThreaddoing self._thread.start()... done self._thread.start()_finalize_join_finalize_closetwrjoining queue thread... queue thread joined... queue thread already deadnotemptytelling queue thread to quitwritelockignore_epipequeue_semstarting thread to feed data to pipenacquirenreleasenwaitbpopleftwacquirewreleasefeeder thread got sentinel -- exitingerror in queue thread: %s + Private API hook called when feeding data in the background thread + raises an exception. For overriding by concurrent.futures. + _unfinished_tasks_cond# Module implementing queues# multiprocessing/queues.py# Queue type using a pipe, buffer and thread# Can raise ImportError (see issues #3770 and #23400)# For use by concurrent.futures# unserialize the data after having released the lock# Raises NotImplementedError on Mac OSX because of broken sem_getvalue()# Start thread which transfers data from buffer to pipe# Send sentinel to the thread queue object when garbage collected# serialize the data before acquiring the lock# Since this runs in a daemon thread the resources it uses# may be become unusable while the process is cleaning up.# We ignore errors which happen after the process has# started to cleanup.# Since the object has not been sent in the queue, we need# to decrease the size of the queue. The error acts as# if the object had been silently removed from the queue# and this step is necessary to have a properly working# queue.# A queue type which also supports join() and task_done() methods# Note that if you do not call task_done() for each finished task then# eventually the counter's semaphore may overflow causing Bad Things# to happen.# Simplified Queue type -- really just a locked pipe# writes to a message oriented win32 pipe are atomicb'JoinableQueue'u'JoinableQueue'b'Queue._after_fork()'u'Queue._after_fork()'b'Queue 'u'Queue 'b'Queue.join_thread()'u'Queue.join_thread()'b'Queue {0!r} not closed'u'Queue {0!r} not closed'b'Queue.cancel_join_thread()'u'Queue.cancel_join_thread()'b'Queue._start_thread()'u'Queue._start_thread()'b'QueueFeederThread'u'QueueFeederThread'b'doing self._thread.start()'u'doing self._thread.start()'b'... done self._thread.start()'u'... done self._thread.start()'b'joining queue thread'u'joining queue thread'b'... queue thread joined'u'... queue thread joined'b'... queue thread already dead'u'... queue thread already dead'b'telling queue thread to quit'u'telling queue thread to quit'b'starting thread to feed data to pipe'u'starting thread to feed data to pipe'b'feeder thread got sentinel -- exiting'u'feeder thread got sentinel -- exiting'b'error in queue thread: %s'u'error in queue thread: %s'b' + Private API hook called when feeding data in the background thread + raises an exception. For overriding by concurrent.futures. + 'u' + Private API hook called when feeding data in the background thread + raises an exception. For overriding by concurrent.futures. + 'u'multiprocessing.queues'u'queues'QueueFullQueueEmptyRaised when Queue.get_nowait() is called on an empty Queue.Raised when the Queue.put_nowait() method is called on a full Queue.A queue, useful for coordinating producer and consumer coroutines. + + If maxsize is less than or equal to zero, the queue size is infinite. If it + is an integer greater than 0, then "await put()" will block when the + queue reaches maxsize, until an item is removed by get(). + + Unlike the standard library Queue, you can reliably know this Queue's size + with qsize(), since your single-threaded asyncio application won't be + interrupted between calling qsize() and doing an operation on the Queue. + _getters_putters_wakeup_nextmaxsize= _queue= _getters[ _putters[ tasks=Number of items in the queue.Number of items allowed in the queue.Return True if the queue is empty, False otherwise.Return True if there are maxsize items in the queue. + + Note: if the Queue was initialized with maxsize=0 (the default), + then full() is never True. + Put an item into the queue. + + Put an item into the queue. If the queue is full, wait until a free + slot is available before adding item. + putterPut an item into the queue without blocking. + + If no free slot is immediately available, raise QueueFull. + Remove and return an item from the queue. + + If queue is empty, wait until an item is available. + Remove and return an item from the queue. + + Return an item if one is immediately available, else raise QueueEmpty. + Indicate that a formerly enqueued task is complete. + + Used by queue consumers. For each get() used to fetch a task, + a subsequent call to task_done() tells the queue that the processing + on the task is complete. + + If a join() is currently blocking, it will resume when all items have + been processed (meaning that a task_done() call was received for every + item that had been put() into the queue). + + Raises ValueError if called more times than there were items placed in + the queue. + Block until all items in the queue have been gotten and processed. + + The count of unfinished tasks goes up whenever an item is added to the + queue. The count goes down whenever a consumer calls task_done() to + indicate that the item was retrieved and all work on it is complete. + When the count of unfinished tasks drops to zero, join() unblocks. + A subclass of Queue; retrieves entries in priority order (lowest first). + + Entries are typically tuples of the form: (priority number, data). + A subclass of Queue that retrieves most recently added entries first.# Futures.# These three are overridable in subclasses.# End of the overridable methods.# Wake up the next waiter (if any) that isn't cancelled.# Just in case putter is not done yet.# Clean self._putters from canceled putters.# The putter could be removed from self._putters by a# previous get_nowait call.# We were woken up by get_nowait(), but can't take# the call. Wake up the next in line.# Just in case getter is not done yet.# Clean self._getters from canceled getters.# The getter could be removed from self._getters by a# previous put_nowait call.# We were woken up by put_nowait(), but can't takeb'QueueFull'u'QueueFull'b'QueueEmpty'u'QueueEmpty'b'Raised when Queue.get_nowait() is called on an empty Queue.'u'Raised when Queue.get_nowait() is called on an empty Queue.'b'Raised when the Queue.put_nowait() method is called on a full Queue.'u'Raised when the Queue.put_nowait() method is called on a full Queue.'b'A queue, useful for coordinating producer and consumer coroutines. + + If maxsize is less than or equal to zero, the queue size is infinite. If it + is an integer greater than 0, then "await put()" will block when the + queue reaches maxsize, until an item is removed by get(). + + Unlike the standard library Queue, you can reliably know this Queue's size + with qsize(), since your single-threaded asyncio application won't be + interrupted between calling qsize() and doing an operation on the Queue. + 'u'A queue, useful for coordinating producer and consumer coroutines. + + If maxsize is less than or equal to zero, the queue size is infinite. If it + is an integer greater than 0, then "await put()" will block when the + queue reaches maxsize, until an item is removed by get(). + + Unlike the standard library Queue, you can reliably know this Queue's size + with qsize(), since your single-threaded asyncio application won't be + interrupted between calling qsize() and doing an operation on the Queue. + 'b'maxsize='u'maxsize='b'_queue'b' _queue='u' _queue='b' _getters['u' _getters['b' _putters['u' _putters['b' tasks='u' tasks='b'Number of items in the queue.'u'Number of items in the queue.'b'Number of items allowed in the queue.'u'Number of items allowed in the queue.'b'Return True if the queue is empty, False otherwise.'u'Return True if the queue is empty, False otherwise.'b'Return True if there are maxsize items in the queue. + + Note: if the Queue was initialized with maxsize=0 (the default), + then full() is never True. + 'u'Return True if there are maxsize items in the queue. + + Note: if the Queue was initialized with maxsize=0 (the default), + then full() is never True. + 'b'Put an item into the queue. + + Put an item into the queue. If the queue is full, wait until a free + slot is available before adding item. + 'u'Put an item into the queue. + + Put an item into the queue. If the queue is full, wait until a free + slot is available before adding item. + 'b'Put an item into the queue without blocking. + + If no free slot is immediately available, raise QueueFull. + 'u'Put an item into the queue without blocking. + + If no free slot is immediately available, raise QueueFull. + 'b'Remove and return an item from the queue. + + If queue is empty, wait until an item is available. + 'u'Remove and return an item from the queue. + + If queue is empty, wait until an item is available. + 'b'Remove and return an item from the queue. + + Return an item if one is immediately available, else raise QueueEmpty. + 'u'Remove and return an item from the queue. + + Return an item if one is immediately available, else raise QueueEmpty. + 'b'Indicate that a formerly enqueued task is complete. + + Used by queue consumers. For each get() used to fetch a task, + a subsequent call to task_done() tells the queue that the processing + on the task is complete. + + If a join() is currently blocking, it will resume when all items have + been processed (meaning that a task_done() call was received for every + item that had been put() into the queue). + + Raises ValueError if called more times than there were items placed in + the queue. + 'u'Indicate that a formerly enqueued task is complete. + + Used by queue consumers. For each get() used to fetch a task, + a subsequent call to task_done() tells the queue that the processing + on the task is complete. + + If a join() is currently blocking, it will resume when all items have + been processed (meaning that a task_done() call was received for every + item that had been put() into the queue). + + Raises ValueError if called more times than there were items placed in + the queue. + 'b'Block until all items in the queue have been gotten and processed. + + The count of unfinished tasks goes up whenever an item is added to the + queue. The count goes down whenever a consumer calls task_done() to + indicate that the item was retrieved and all work on it is complete. + When the count of unfinished tasks drops to zero, join() unblocks. + 'u'Block until all items in the queue have been gotten and processed. + + The count of unfinished tasks goes up whenever an item is added to the + queue. The count goes down whenever a consumer calls task_done() to + indicate that the item was retrieved and all work on it is complete. + When the count of unfinished tasks drops to zero, join() unblocks. + 'b'A subclass of Queue; retrieves entries in priority order (lowest first). + + Entries are typically tuples of the form: (priority number, data). + 'u'A subclass of Queue; retrieves entries in priority order (lowest first). + + Entries are typically tuples of the form: (priority number, data). + 'b'A subclass of Queue that retrieves most recently added entries first.'u'A subclass of Queue that retrieves most recently added entries first.'u'asyncio.queues'Conversions to/from quoted-printable transport encoding as per RFC 1521.ESCAPE0123456789ABCDEFHEXneedsquotingDecide whether a particular byte ordinal needs to be quoted. + + The 'quotetabs' flag indicates whether embedded tabs and spaces should be + quoted. Note that line-ending tabs and spaces are always encoded, as per + RFC 1521. + Quote a single character.Read 'input', apply quoted-printable encoding, and write to 'output'. + + 'input' and 'output' are binary file objects. The 'quotetabs' flag + indicates whether embedded tabs and spaces should be quoted. Note that + line-ending tabs and spaces are always encoded, as per RFC 1521. + The 'header' flag indicates whether we are encoding spaces as _ as per RFC + 1522.odatalineEndprevlineoutlinethisline= +infpoutfpRead 'input', apply quoted-printable decoding, and write to 'output'. + 'input' and 'output' are binary file objects. + If 'header' is true, decode underscore as space (per RFC 1522). ishexunhexReturn true if the byte ordinal 'c' is a hexadecimal digit in ASCII.Get the integer value of a hexadecimal number.non-hex digit usage: quopri [-t | -d] [file] ...-t: quote tabs-d: decode; default encodedecotabs-t and -d are mutually exclusive%s: can't open (%s) +# (Dec 1991 version).# if header, we have to escape _ because _ is used to escape space# RFC 1521 requires that the line ending in a space or tab must have# that trailing character encoded.# Strip off any readline induced trailing newline# Calculate the un-length-limited encoded line# First, write out the previous line# Now see if we need any soft line breaks because of RFC-imposed# length limitations. Then do the thisline->prevline dance.# Don't forget to include the soft line break `=' sign in the# length calculation!# Write out the current line# Write out the last line, without a trailing newline# Strip trailing whitespace# Bad escape sequence -- leave it in# Other helper functionsb'Conversions to/from quoted-printable transport encoding as per RFC 1521.'u'Conversions to/from quoted-printable transport encoding as per RFC 1521.'b'encodestring'u'encodestring'b'0123456789ABCDEF'b'Decide whether a particular byte ordinal needs to be quoted. + + The 'quotetabs' flag indicates whether embedded tabs and spaces should be + quoted. Note that line-ending tabs and spaces are always encoded, as per + RFC 1521. + 'u'Decide whether a particular byte ordinal needs to be quoted. + + The 'quotetabs' flag indicates whether embedded tabs and spaces should be + quoted. Note that line-ending tabs and spaces are always encoded, as per + RFC 1521. + 'b'Quote a single character.'u'Quote a single character.'b'Read 'input', apply quoted-printable encoding, and write to 'output'. + + 'input' and 'output' are binary file objects. The 'quotetabs' flag + indicates whether embedded tabs and spaces should be quoted. Note that + line-ending tabs and spaces are always encoded, as per RFC 1521. + The 'header' flag indicates whether we are encoding spaces as _ as per RFC + 1522.'u'Read 'input', apply quoted-printable encoding, and write to 'output'. + + 'input' and 'output' are binary file objects. The 'quotetabs' flag + indicates whether embedded tabs and spaces should be quoted. Note that + line-ending tabs and spaces are always encoded, as per RFC 1521. + The 'header' flag indicates whether we are encoding spaces as _ as per RFC + 1522.'b'= +'b'Read 'input', apply quoted-printable decoding, and write to 'output'. + 'input' and 'output' are binary file objects. + If 'header' is true, decode underscore as space (per RFC 1522).'u'Read 'input', apply quoted-printable decoding, and write to 'output'. + 'input' and 'output' are binary file objects. + If 'header' is true, decode underscore as space (per RFC 1522).'b' 'b'Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII.'u'Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII.'b'Get the integer value of a hexadecimal number.'u'Get the integer value of a hexadecimal number.'b'non-hex digit 'u'non-hex digit 'b'td'u'td'b'usage: quopri [-t | -d] [file] ...'u'usage: quopri [-t | -d] [file] ...'b'-t: quote tabs'u'-t: quote tabs'b'-d: decode; default encode'u'-d: decode; default encode'b'-t and -d are mutually exclusive'u'-t and -d are mutually exclusive'b'%s: can't open (%s) +'u'%s: can't open (%s) +'Quoted-printable content transfer encoding per RFCs 2045-2047. + +This module handles the content transfer encoding method defined in RFC 2045 +to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to +safely encode text that is in a character set similar to the 7-bit US ASCII +character set, but that includes some 8-bit characters that are normally not +allowed in email bodies or headers. + +Quoted-printable is very space-inefficient for encoding binary files; use the +email.base64mime module for that instead. + +This module provides an interface to encode and decode both headers and bodies +with quoted-printable encoding. + +RFC 2045 defines a method for including character set information in an +`encoded-word' in a header. This method is commonly used for 8-bit real names +in To:/From:/Cc: etc. fields, as well as Subject: lines. + +This module does not do the line wrapping or end-of-line character +conversion necessary for proper internationalized headers; it only +does dumb encoding and decoding. To deal with the various line +wrapping issues, use the email.header module. +body_lengthhexdigits=%02X_QUOPRI_MAP_QUOPRI_HEADER_MAP_QUOPRI_BODY_MAP !"#$%&'()*+,-./0123456789:;<>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ b' !"#$%&\'()*+,-./0123456789:;<>'b'?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`'b'abcdefghijklmnopqrstuvwxyz{|}~\t'header_checkoctetReturn True if the octet should be escaped with header quopri.body_checkReturn True if the octet should be escaped with body quopri.Return a header quoted-printable encoding length. + + Note that this does not include any RFC 2047 chrome added by + `header_encode()`. + + :param bytearray: An array of bytes (a.k.a. octets). + :return: The length in bytes of the byte array when it is encoded with + quoted-printable for headers. + Return a body quoted-printable encoding length. + + :param bytearray: An array of bytes (a.k.a. octets). + :return: The length in bytes of the byte array when it is encoded with + quoted-printable for bodies. + Turn a string in the form =AB to the ASCII character with value 0xabEncode a single header line with quoted-printable (like) encoding. + + Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but + used specifically for email header fields to allow charsets with mostly 7 + bit characters (and some 8 bit) to remain more or less readable in non-RFC + 2045 aware mail clients. + + charset names the character set to use in the RFC 2046 header. It + defaults to iso-8859-1. + =?%s?q?%s?=_QUOPRI_BODY_ENCODE_MAPEncode with quoted-printable, wrapping at maxlinelen characters. + + Each line of encoded text will end with eol, which defaults to "\n". Set + this to "\r\n" if you will be using the result of this function directly + in an email. + + Each line will be wrapped at, at most, maxlinelen characters before the + eol string (maxlinelen defaults to 76 characters, the maximum value + permitted by RFC 2045). Long lines will have the 'soft line break' + quoted-printable character "=" appended to them, so the decoded text will + be identical to the original text. + + The minimum maxlinelen is 4 to have room for a quoted character ("=XX") + followed by a soft line break. Smaller values will generate a + ValueError. + + maxlinelen must be at least 4soft_breakmaxlinelen1encoded_bodylaststartroomDecode a quoted-printable string. + + Lines are separated with eol, which defaults to \n. + _unquote_matchTurn a match in the form =AB to the ASCII character with value 0xabDecode a string encoded with RFC 2045 MIME header `Q' encoding. + + This function does not parse a full MIME header value encoded with + quoted-printable (like =?iso-8859-1?q?Hello_World?=) -- please use + the high level email.header class for that functionality. + =[a-fA-F0-9]{2}# Build a mapping of octets to the expansion of that octet. Since we're only# going to have 256 of these things, this isn't terribly inefficient# space-wise. Remember that headers and bodies have different sets of safe# characters. Initialize both maps with the full expansion, and then override# the safe bytes with the more compact form.# Safe header bytes which need no encoding.# Headers have one other special encoding; spaces become underscores.# Safe body bytes which need no encoding.# Return empty headers as an empty string.# Iterate over every byte, encoding if necessary.# Now add the RFC chrome to each encoded chunk and glue the chunks# together.# quote special characters# leave space for the '=' at the end of a line# break up the line into pieces no longer than maxlinelen - 1# make sure we don't break up an escape sequence# handle rest of line, special case if line ends in whitespace# It's a whitespace character at end-of-line, and we have room# for the three-character quoted encoding.# There's room for the whitespace character and a soft break.# There's room only for a soft break. The quoted whitespace# will be the only content on the subsequent line.# add back final newline if present# BAW: I'm not sure if the intent was for the signature of this function to be# the same as base64MIME.decode() or not...# BAW: see comment in encode() above. Again, we're building up the# decoded string with string concatenation, which could be done much more# efficiently.# Otherwise, c == "=". Are we at the end of the line? If so, add# a soft line break.# Decode if in form =AB# Otherwise, not in form =AB, pass literally# Special case if original string did not end with eol# Header decoding is done a bit differentlyb'Quoted-printable content transfer encoding per RFCs 2045-2047. + +This module handles the content transfer encoding method defined in RFC 2045 +to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to +safely encode text that is in a character set similar to the 7-bit US ASCII +character set, but that includes some 8-bit characters that are normally not +allowed in email bodies or headers. + +Quoted-printable is very space-inefficient for encoding binary files; use the +email.base64mime module for that instead. + +This module provides an interface to encode and decode both headers and bodies +with quoted-printable encoding. + +RFC 2045 defines a method for including character set information in an +`encoded-word' in a header. This method is commonly used for 8-bit real names +in To:/From:/Cc: etc. fields, as well as Subject: lines. + +This module does not do the line wrapping or end-of-line character +conversion necessary for proper internationalized headers; it only +does dumb encoding and decoding. To deal with the various line +wrapping issues, use the email.header module. +'u'Quoted-printable content transfer encoding per RFCs 2045-2047. + +This module handles the content transfer encoding method defined in RFC 2045 +to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to +safely encode text that is in a character set similar to the 7-bit US ASCII +character set, but that includes some 8-bit characters that are normally not +allowed in email bodies or headers. + +Quoted-printable is very space-inefficient for encoding binary files; use the +email.base64mime module for that instead. + +This module provides an interface to encode and decode both headers and bodies +with quoted-printable encoding. + +RFC 2045 defines a method for including character set information in an +`encoded-word' in a header. This method is commonly used for 8-bit real names +in To:/From:/Cc: etc. fields, as well as Subject: lines. + +This module does not do the line wrapping or end-of-line character +conversion necessary for proper internationalized headers; it only +does dumb encoding and decoding. To deal with the various line +wrapping issues, use the email.header module. +'b'body_length'u'body_length'b'header_decode'u'header_decode'b'=%02X'u'=%02X'b' !"#$%&'()*+,-./0123456789:;<>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ 'b'Return True if the octet should be escaped with header quopri.'u'Return True if the octet should be escaped with header quopri.'b'Return True if the octet should be escaped with body quopri.'u'Return True if the octet should be escaped with body quopri.'b'Return a header quoted-printable encoding length. + + Note that this does not include any RFC 2047 chrome added by + `header_encode()`. + + :param bytearray: An array of bytes (a.k.a. octets). + :return: The length in bytes of the byte array when it is encoded with + quoted-printable for headers. + 'u'Return a header quoted-printable encoding length. + + Note that this does not include any RFC 2047 chrome added by + `header_encode()`. + + :param bytearray: An array of bytes (a.k.a. octets). + :return: The length in bytes of the byte array when it is encoded with + quoted-printable for headers. + 'b'Return a body quoted-printable encoding length. + + :param bytearray: An array of bytes (a.k.a. octets). + :return: The length in bytes of the byte array when it is encoded with + quoted-printable for bodies. + 'u'Return a body quoted-printable encoding length. + + :param bytearray: An array of bytes (a.k.a. octets). + :return: The length in bytes of the byte array when it is encoded with + quoted-printable for bodies. + 'b'Turn a string in the form =AB to the ASCII character with value 0xab'u'Turn a string in the form =AB to the ASCII character with value 0xab'b'Encode a single header line with quoted-printable (like) encoding. + + Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but + used specifically for email header fields to allow charsets with mostly 7 + bit characters (and some 8 bit) to remain more or less readable in non-RFC + 2045 aware mail clients. + + charset names the character set to use in the RFC 2046 header. It + defaults to iso-8859-1. + 'u'Encode a single header line with quoted-printable (like) encoding. + + Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but + used specifically for email header fields to allow charsets with mostly 7 + bit characters (and some 8 bit) to remain more or less readable in non-RFC + 2045 aware mail clients. + + charset names the character set to use in the RFC 2046 header. It + defaults to iso-8859-1. + 'b'=?%s?q?%s?='u'=?%s?q?%s?='b'Encode with quoted-printable, wrapping at maxlinelen characters. + + Each line of encoded text will end with eol, which defaults to "\n". Set + this to "\r\n" if you will be using the result of this function directly + in an email. + + Each line will be wrapped at, at most, maxlinelen characters before the + eol string (maxlinelen defaults to 76 characters, the maximum value + permitted by RFC 2045). Long lines will have the 'soft line break' + quoted-printable character "=" appended to them, so the decoded text will + be identical to the original text. + + The minimum maxlinelen is 4 to have room for a quoted character ("=XX") + followed by a soft line break. Smaller values will generate a + ValueError. + + 'u'Encode with quoted-printable, wrapping at maxlinelen characters. + + Each line of encoded text will end with eol, which defaults to "\n". Set + this to "\r\n" if you will be using the result of this function directly + in an email. + + Each line will be wrapped at, at most, maxlinelen characters before the + eol string (maxlinelen defaults to 76 characters, the maximum value + permitted by RFC 2045). Long lines will have the 'soft line break' + quoted-printable character "=" appended to them, so the decoded text will + be identical to the original text. + + The minimum maxlinelen is 4 to have room for a quoted character ("=XX") + followed by a soft line break. Smaller values will generate a + ValueError. + + 'b'maxlinelen must be at least 4'u'maxlinelen must be at least 4'b'Decode a quoted-printable string. + + Lines are separated with eol, which defaults to \n. + 'u'Decode a quoted-printable string. + + Lines are separated with eol, which defaults to \n. + 'b'Turn a match in the form =AB to the ASCII character with value 0xab'u'Turn a match in the form =AB to the ASCII character with value 0xab'b'Decode a string encoded with RFC 2045 MIME header `Q' encoding. + + This function does not parse a full MIME header value encoded with + quoted-printable (like =?iso-8859-1?q?Hello_World?=) -- please use + the high level email.header class for that functionality. + 'u'Decode a string encoded with RFC 2045 MIME header `Q' encoding. + + This function does not parse a full MIME header value encoded with + quoted-printable (like =?iso-8859-1?q?Hello_World?=) -- please use + the high level email.header class for that functionality. + 'b'=[a-fA-F0-9]{2}'u'=[a-fA-F0-9]{2}'u'email.quoprimime'Random variable generators. + + integers + -------- + uniform within range + + sequences + --------- + pick random element + pick random sample + pick weighted random sample + generate random permutation + + distributions on the real line: + ------------------------------ + uniform + triangular + normal (Gaussian) + lognormal + negative exponential + gamma + beta + pareto + Weibull + + distributions on the circle (angles 0 to 2pi) + --------------------------------------------- + circular uniform + von Mises + +General notes on the underlying Mersenne Twister core generator: + +* The period is 2**19937-1. +* It is one of the most extensively tested generators in existence. +* The random() method is implemented in C, executes in a single Python step, + and is, therefore, threadsafe. + +_pi_e_ceil_sqrt_acos_cos_sin_urandom_Set_Sequence_accumulateuniformrandintsamplerandrangeshufflenormalvariatelognormvariateexpovariatevonmisesvariategammavariatetriangulargaussbetavariateparetovariateweibullvariateSystemRandomNV_MAGICCONSTTWOPILOG44.5SG_MAGICCONSTBPFRECIP_BPFRandom number generator base class used by bound module functions. + + Used to instantiate instances of Random to get generators that don't + share state. + + Class Random can also be subclassed if you want to use a different basic + generator of your own devising: in that case, override the following + methods: random(), seed(), getstate(), and setstate(). + Optionally, implement a getrandbits() method so that randrange() + can cover arbitrarily large ranges. + + Initialize an instance. + + Optional argument x controls seeding, as for Random.seed(). + gauss_nextControl how subclasses generate random integers. + + The algorithm a subclass can use depends on the random() and/or + getrandbits() implementation available to it and determines + whether it can generate random integers from arbitrarily large + ranges. + _randbelow_randbelow_with_getrandbits_randbelow_without_getrandbitsInitialize internal state from hashable object. + + None or no argument seeds from current time or from an operating + system specific randomness source if available. + + If *a* is an int, all bits are used. + + For version 2 (the default), all of the bits are used if *a* is a str, + bytes, or bytearray. For version 1 (provided for reproducing random + sequences from older versions of Python), the algorithm for str and + bytes generates a narrower range of seeds. + + 0xFFFFFFFFFFFFFFFFReturn internal state; can be passed to setstate() later.Restore internal state from object returned by getstate().internalstatestate with version %s passed to Random.setstate() of version %s"state with version %s passed to ""Random.setstate() of version %s"Choose a random item from range(start, stop[, step]). + + This fixes the problem with randint() which includes the + endpoint; in Python this is usually not what you want. + + istartnon-integer arg 1 for randrange()empty range for randrange()istopnon-integer stop for randrange()empty range for randrange() (%d, %d, %d)istepnon-integer step for randrange()zero step for randrange()Return random integer in range [a, b], including both end points. + Return a random int in the range [0,n). Raises ValueError if n==0.Return a random int in the range [0,n). Raises ValueError if n==0. + + The implementation does not use getrandbits, but only random. + Underlying random() generator does not supply +enough bits to choose from a population range this large. +To remove the range limitation, add a getrandbits() method."Underlying random() generator does not supply \n""enough bits to choose from a population range this large.\n""To remove the range limitation, add a getrandbits() method."Boundary cannot be zeroChoose a random element from a non-empty sequence.Cannot choose from an empty sequenceShuffle list x in place, and return None. + + Optional argument random is a 0-argument function returning a + random float in [0.0, 1.0); if it is the default None, the + standard random.random will be used. + + randbelowpopulationChooses k unique random elements from a population sequence or set. + + Returns a new list containing elements from the population while + leaving the original population unchanged. The resulting list is + in selection order so that all sub-slices will also be valid random + samples. This allows raffle winners (the sample) to be partitioned + into grand prize and second place winners (the subslices). + + Members of the population need not be hashable or unique. If the + population contains repeats, then each occurrence is a possible + selection in the sample. + + To choose a sample in a range of integers, use range as an argument. + This is especially fast and space efficient for sampling from a + large population: sample(range(10000000), 60) + Population must be a sequence or set. For dicts, use list(d).Sample larger than population or is negativesetsizeselectedselected_addweightscum_weightsReturn a k sized list of population elements chosen with replacement. + + If the relative weights or cumulative weights are not specified, + the selections are made with equal probability. + + Cannot specify both weights and cumulative weightsThe number of weights does not match the populationtotalGet a random number in the range [a, b) or [a, b] depending on rounding.lowhighTriangular distribution. + + Continuous distribution bounded by given lower and upper limits, + and having a given mode value in-between. + + http://en.wikipedia.org/wiki/Triangular_distribution + + Normal distribution. + + mu is the mean, and sigma is the standard deviation. + + Log normal distribution. + + If you take the natural logarithm of this distribution, you'll get a + normal distribution with mean mu and standard deviation sigma. + mu can have any value, and sigma must be greater than zero. + + lambdExponential distribution. + + lambd is 1.0 divided by the desired mean. It should be + nonzero. (The parameter would be called "lambda", but that is + a reserved word in Python.) Returned values range from 0 to + positive infinity if lambd is positive, and from negative + infinity to 0 if lambd is negative. + + Circular data distribution. + + mu is the mean angle, expressed in radians between 0 and 2*pi, and + kappa is the concentration parameter, which must be greater than or + equal to zero. If kappa is equal to zero, this distribution reduces + to a uniform random angle over the range 0 to 2*pi. + + 1e-061e-6Gamma distribution. Not the gamma function! + + Conditions on the parameters are alpha > 0 and beta > 0. + + The probability distribution function is: + + x ** (alpha - 1) * math.exp(-x / beta) + pdf(x) = -------------------------------------- + math.gamma(alpha) * beta ** alpha + + gammavariate: alpha and beta must be > 0.0ainvbbb1e-071e-70.9999999.9999999Gaussian distribution. + + mu is the mean, and sigma is the standard deviation. This is + slightly faster than the normalvariate() function. + + Not thread-safe without a lock around calls. + + x2pig2radBeta distribution. + + Conditions on the parameters are alpha > 0 and beta > 0. + Returned values range between 0 and 1. + + Pareto distribution. alpha is the shape parameter.Weibull distribution. + + alpha is the scale parameter and beta is the shape parameter. + + Alternate random number generator using sources provided + by the operating system (such as /dev/urandom on Unix or + CryptGenRandom on Windows). + + Not available on all systems (see os.urandom() for details). + Get the next random number in the range [0.0, 1.0).getrandbits(k) -> x. Generates an int with k random bits.number of bits must be greater than zeronumbytesStub method. Not used for a system random number generator._notimplementedMethod should not be called for a system random number generator.System entropy source does not have state._test_generatorsqsum10000000000.01e10smallestlargestsec,avgstddevavg %g, stddev %g, min %g, max %g +0.9200.0_inst# hashlib is pretty heavy to load, try lean internal module first# fallback to official implementation# Number of bits in a float# Translated by Guido van Rossum from C source provided by# Adrian Baddeley. Adapted by Raymond Hettinger for use with# the Mersenne Twister and os.urandom() core generators.# used by getstate/setstate# just inherit it# In version 2, the state was saved as signed ints, which causes# inconsistencies between 32/64-bit systems. The state is# really unsigned 32-bit ints, so we convert negative ints from# version 2 to positive longs for version 3.## ---- Methods below this point do not need to be overridden when## ---- subclassing for the purpose of using a different core generator.## -------------------- pickle support -------------------# Issue 17489: Since __reduce__ was defined to fix #759889 this is no# longer called; we leave it here because it has been here since random was# rewritten back in 2001 and why risk breaking something.# for pickle## -------------------- integer methods -------------------# This code is a bit messy to make it fast for the# common case while still doing adequate error checking.# stop argument supplied.# Non-unit step argument supplied.# don't use (n-1) here because n can be 1# 0 <= r < 2**k# int(limit * maxsize) % n == 0## -------------------- sequence methods -------------------# pick an element in x[:i+1] with which to exchange x[i]# Sampling without replacement entails tracking either potential# selections (the pool) in a list or previous selections in a set.# When the number of selections is small compared to the# population, then tracking selections is efficient, requiring# only a small set and an occasional reselection. For# a larger number of selections, the pool tracking method is# preferred since the list takes less space than the# set and it doesn't suffer from frequent reselections.# The number of calls to _randbelow() is kept at or near k, the# theoretical minimum. This is important because running time# is dominated by _randbelow() and because it extracts the# least entropy from the underlying random number generators.# Memory requirements are kept to the smaller of a k-length# set or an n-length list.# There are other sampling algorithms that do not require# auxiliary memory, but they were rejected because they made# too many calls to _randbelow(), making them slower and# causing them to eat more entropy than necessary.# size of a small set minus size of an empty list# table size for big sets# An n-length list is smaller than a k-length set# invariant: non-selected at [0,n-i)# move non-selected item into vacancy# convert to float for a small speed improvement# convert to float## -------------------- real-valued distributions -------------------## -------------------- uniform distribution -------------------## -------------------- triangular --------------------## -------------------- normal distribution --------------------# mu = mean, sigma = standard deviation# Uses Kinderman and Monahan method. Reference: Kinderman,# A.J. and Monahan, J.F., "Computer generation of random# variables using the ratio of uniform deviates", ACM Trans# Math Software, 3, (1977), pp257-260.## -------------------- lognormal distribution --------------------## -------------------- exponential distribution --------------------# lambd: rate lambd = 1/mean# ('lambda' is a Python reserved word)# we use 1-random() instead of random() to preclude the# possibility of taking the log of zero.## -------------------- von Mises distribution --------------------# mu: mean angle (in radians between 0 and 2*pi)# kappa: concentration parameter kappa (>= 0)# if kappa = 0 generate uniform random angle# Based upon an algorithm published in: Fisher, N.I.,# "Statistical Analysis of Circular Data", Cambridge# University Press, 1993.# Thanks to Magnus Kessler for a correction to the# implementation of step 4.## -------------------- gamma distribution --------------------# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2# Warning: a few older sources define the gamma distribution in terms# of alpha > -1.0# Uses R.C.H. Cheng, "The generation of Gamma# variables with non-integral shape parameters",# Applied Statistics, (1977), 26, No. 1, p71-74# expovariate(1/beta)# alpha is between 0 and 1 (exclusive)# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle## -------------------- Gauss (faster alternative) --------------------# When x and y are two variables from [0, 1), uniformly# distributed, then# cos(2*pi*x)*sqrt(-2*log(1-y))# sin(2*pi*x)*sqrt(-2*log(1-y))# are two *independent* variables with normal distribution# (mu = 0, sigma = 1).# (Lambert Meertens)# (corrected version; bug discovered by Mike Miller, fixed by LM)# Multithreading note: When two threads call this function# simultaneously, it is possible that they will receive the# same return value. The window is very small though. To# avoid this, you have to use a lock around all calls. (I# didn't want to slow this down in the serial case by using a# lock here.)## -------------------- beta --------------------## See## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html## for Ivan Frohne's insightful analysis of why the original implementation:## def betavariate(self, alpha, beta):## # Discrete Event Simulation in C, pp 87-88.## y = self.expovariate(alpha)## z = self.expovariate(1.0/beta)## return z/(y+z)## was dead wrong, and how it probably got that way.# This version due to Janne Sinkkonen, and matches all the std# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").## -------------------- Pareto --------------------# Jain, pg. 495## -------------------- Weibull --------------------# Jain, pg. 499; bug fix courtesy Bill Arms## --------------- Operating System Random Source ------------------# bits / 8 and rounded up# trim excess bits## -------------------- test program --------------------# Create one instance, seeded from current time, and export its methods# as module-level functions. The functions share state across all uses#(both in the user's code and in the Python libraries), but that's fine# for most programs and is easier for the casual user than making them# instantiate their own Random() instance.b'Random variable generators. + + integers + -------- + uniform within range + + sequences + --------- + pick random element + pick random sample + pick weighted random sample + generate random permutation + + distributions on the real line: + ------------------------------ + uniform + triangular + normal (Gaussian) + lognormal + negative exponential + gamma + beta + pareto + Weibull + + distributions on the circle (angles 0 to 2pi) + --------------------------------------------- + circular uniform + von Mises + +General notes on the underlying Mersenne Twister core generator: + +* The period is 2**19937-1. +* It is one of the most extensively tested generators in existence. +* The random() method is implemented in C, executes in a single Python step, + and is, therefore, threadsafe. + +'u'Random variable generators. + + integers + -------- + uniform within range + + sequences + --------- + pick random element + pick random sample + pick weighted random sample + generate random permutation + + distributions on the real line: + ------------------------------ + uniform + triangular + normal (Gaussian) + lognormal + negative exponential + gamma + beta + pareto + Weibull + + distributions on the circle (angles 0 to 2pi) + --------------------------------------------- + circular uniform + von Mises + +General notes on the underlying Mersenne Twister core generator: + +* The period is 2**19937-1. +* It is one of the most extensively tested generators in existence. +* The random() method is implemented in C, executes in a single Python step, + and is, therefore, threadsafe. + +'b'Random'u'Random'b'seed'u'seed'b'random'u'random'b'uniform'u'uniform'b'randint'u'randint'b'sample'u'sample'b'randrange'u'randrange'b'shuffle'u'shuffle'b'normalvariate'u'normalvariate'b'lognormvariate'u'lognormvariate'b'expovariate'u'expovariate'b'vonmisesvariate'u'vonmisesvariate'b'gammavariate'u'gammavariate'b'triangular'u'triangular'b'gauss'u'gauss'b'betavariate'u'betavariate'b'paretovariate'u'paretovariate'b'weibullvariate'u'weibullvariate'b'getstate'u'getstate'b'setstate'u'setstate'b'getrandbits'u'getrandbits'b'SystemRandom'u'SystemRandom'b'Random number generator base class used by bound module functions. + + Used to instantiate instances of Random to get generators that don't + share state. + + Class Random can also be subclassed if you want to use a different basic + generator of your own devising: in that case, override the following + methods: random(), seed(), getstate(), and setstate(). + Optionally, implement a getrandbits() method so that randrange() + can cover arbitrarily large ranges. + + 'u'Random number generator base class used by bound module functions. + + Used to instantiate instances of Random to get generators that don't + share state. + + Class Random can also be subclassed if you want to use a different basic + generator of your own devising: in that case, override the following + methods: random(), seed(), getstate(), and setstate(). + Optionally, implement a getrandbits() method so that randrange() + can cover arbitrarily large ranges. + + 'b'Initialize an instance. + + Optional argument x controls seeding, as for Random.seed(). + 'u'Initialize an instance. + + Optional argument x controls seeding, as for Random.seed(). + 'b'Control how subclasses generate random integers. + + The algorithm a subclass can use depends on the random() and/or + getrandbits() implementation available to it and determines + whether it can generate random integers from arbitrarily large + ranges. + 'u'Control how subclasses generate random integers. + + The algorithm a subclass can use depends on the random() and/or + getrandbits() implementation available to it and determines + whether it can generate random integers from arbitrarily large + ranges. + 'b'_randbelow'u'_randbelow'b'Initialize internal state from hashable object. + + None or no argument seeds from current time or from an operating + system specific randomness source if available. + + If *a* is an int, all bits are used. + + For version 2 (the default), all of the bits are used if *a* is a str, + bytes, or bytearray. For version 1 (provided for reproducing random + sequences from older versions of Python), the algorithm for str and + bytes generates a narrower range of seeds. + + 'u'Initialize internal state from hashable object. + + None or no argument seeds from current time or from an operating + system specific randomness source if available. + + If *a* is an int, all bits are used. + + For version 2 (the default), all of the bits are used if *a* is a str, + bytes, or bytearray. For version 1 (provided for reproducing random + sequences from older versions of Python), the algorithm for str and + bytes generates a narrower range of seeds. + + 'b'Return internal state; can be passed to setstate() later.'u'Return internal state; can be passed to setstate() later.'b'Restore internal state from object returned by getstate().'u'Restore internal state from object returned by getstate().'b'state with version %s passed to Random.setstate() of version %s'u'state with version %s passed to Random.setstate() of version %s'b'Choose a random item from range(start, stop[, step]). + + This fixes the problem with randint() which includes the + endpoint; in Python this is usually not what you want. + + 'u'Choose a random item from range(start, stop[, step]). + + This fixes the problem with randint() which includes the + endpoint; in Python this is usually not what you want. + + 'b'non-integer arg 1 for randrange()'u'non-integer arg 1 for randrange()'b'empty range for randrange()'u'empty range for randrange()'b'non-integer stop for randrange()'u'non-integer stop for randrange()'b'empty range for randrange() (%d, %d, %d)'u'empty range for randrange() (%d, %d, %d)'b'non-integer step for randrange()'u'non-integer step for randrange()'b'zero step for randrange()'u'zero step for randrange()'b'Return random integer in range [a, b], including both end points. + 'u'Return random integer in range [a, b], including both end points. + 'b'Return a random int in the range [0,n). Raises ValueError if n==0.'u'Return a random int in the range [0,n). Raises ValueError if n==0.'b'Return a random int in the range [0,n). Raises ValueError if n==0. + + The implementation does not use getrandbits, but only random. + 'u'Return a random int in the range [0,n). Raises ValueError if n==0. + + The implementation does not use getrandbits, but only random. + 'b'Underlying random() generator does not supply +enough bits to choose from a population range this large. +To remove the range limitation, add a getrandbits() method.'u'Underlying random() generator does not supply +enough bits to choose from a population range this large. +To remove the range limitation, add a getrandbits() method.'b'Boundary cannot be zero'u'Boundary cannot be zero'b'Choose a random element from a non-empty sequence.'u'Choose a random element from a non-empty sequence.'b'Cannot choose from an empty sequence'u'Cannot choose from an empty sequence'b'Shuffle list x in place, and return None. + + Optional argument random is a 0-argument function returning a + random float in [0.0, 1.0); if it is the default None, the + standard random.random will be used. + + 'u'Shuffle list x in place, and return None. + + Optional argument random is a 0-argument function returning a + random float in [0.0, 1.0); if it is the default None, the + standard random.random will be used. + + 'b'Chooses k unique random elements from a population sequence or set. + + Returns a new list containing elements from the population while + leaving the original population unchanged. The resulting list is + in selection order so that all sub-slices will also be valid random + samples. This allows raffle winners (the sample) to be partitioned + into grand prize and second place winners (the subslices). + + Members of the population need not be hashable or unique. If the + population contains repeats, then each occurrence is a possible + selection in the sample. + + To choose a sample in a range of integers, use range as an argument. + This is especially fast and space efficient for sampling from a + large population: sample(range(10000000), 60) + 'u'Chooses k unique random elements from a population sequence or set. + + Returns a new list containing elements from the population while + leaving the original population unchanged. The resulting list is + in selection order so that all sub-slices will also be valid random + samples. This allows raffle winners (the sample) to be partitioned + into grand prize and second place winners (the subslices). + + Members of the population need not be hashable or unique. If the + population contains repeats, then each occurrence is a possible + selection in the sample. + + To choose a sample in a range of integers, use range as an argument. + This is especially fast and space efficient for sampling from a + large population: sample(range(10000000), 60) + 'b'Population must be a sequence or set. For dicts, use list(d).'u'Population must be a sequence or set. For dicts, use list(d).'b'Sample larger than population or is negative'u'Sample larger than population or is negative'b'Return a k sized list of population elements chosen with replacement. + + If the relative weights or cumulative weights are not specified, + the selections are made with equal probability. + + 'u'Return a k sized list of population elements chosen with replacement. + + If the relative weights or cumulative weights are not specified, + the selections are made with equal probability. + + 'b'Cannot specify both weights and cumulative weights'u'Cannot specify both weights and cumulative weights'b'The number of weights does not match the population'u'The number of weights does not match the population'b'Get a random number in the range [a, b) or [a, b] depending on rounding.'u'Get a random number in the range [a, b) or [a, b] depending on rounding.'b'Triangular distribution. + + Continuous distribution bounded by given lower and upper limits, + and having a given mode value in-between. + + http://en.wikipedia.org/wiki/Triangular_distribution + + 'u'Triangular distribution. + + Continuous distribution bounded by given lower and upper limits, + and having a given mode value in-between. + + http://en.wikipedia.org/wiki/Triangular_distribution + + 'b'Normal distribution. + + mu is the mean, and sigma is the standard deviation. + + 'u'Normal distribution. + + mu is the mean, and sigma is the standard deviation. + + 'b'Log normal distribution. + + If you take the natural logarithm of this distribution, you'll get a + normal distribution with mean mu and standard deviation sigma. + mu can have any value, and sigma must be greater than zero. + + 'u'Log normal distribution. + + If you take the natural logarithm of this distribution, you'll get a + normal distribution with mean mu and standard deviation sigma. + mu can have any value, and sigma must be greater than zero. + + 'b'Exponential distribution. + + lambd is 1.0 divided by the desired mean. It should be + nonzero. (The parameter would be called "lambda", but that is + a reserved word in Python.) Returned values range from 0 to + positive infinity if lambd is positive, and from negative + infinity to 0 if lambd is negative. + + 'u'Exponential distribution. + + lambd is 1.0 divided by the desired mean. It should be + nonzero. (The parameter would be called "lambda", but that is + a reserved word in Python.) Returned values range from 0 to + positive infinity if lambd is positive, and from negative + infinity to 0 if lambd is negative. + + 'b'Circular data distribution. + + mu is the mean angle, expressed in radians between 0 and 2*pi, and + kappa is the concentration parameter, which must be greater than or + equal to zero. If kappa is equal to zero, this distribution reduces + to a uniform random angle over the range 0 to 2*pi. + + 'u'Circular data distribution. + + mu is the mean angle, expressed in radians between 0 and 2*pi, and + kappa is the concentration parameter, which must be greater than or + equal to zero. If kappa is equal to zero, this distribution reduces + to a uniform random angle over the range 0 to 2*pi. + + 'b'Gamma distribution. Not the gamma function! + + Conditions on the parameters are alpha > 0 and beta > 0. + + The probability distribution function is: + + x ** (alpha - 1) * math.exp(-x / beta) + pdf(x) = -------------------------------------- + math.gamma(alpha) * beta ** alpha + + 'u'Gamma distribution. Not the gamma function! + + Conditions on the parameters are alpha > 0 and beta > 0. + + The probability distribution function is: + + x ** (alpha - 1) * math.exp(-x / beta) + pdf(x) = -------------------------------------- + math.gamma(alpha) * beta ** alpha + + 'b'gammavariate: alpha and beta must be > 0.0'u'gammavariate: alpha and beta must be > 0.0'b'Gaussian distribution. + + mu is the mean, and sigma is the standard deviation. This is + slightly faster than the normalvariate() function. + + Not thread-safe without a lock around calls. + + 'u'Gaussian distribution. + + mu is the mean, and sigma is the standard deviation. This is + slightly faster than the normalvariate() function. + + Not thread-safe without a lock around calls. + + 'b'Beta distribution. + + Conditions on the parameters are alpha > 0 and beta > 0. + Returned values range between 0 and 1. + + 'u'Beta distribution. + + Conditions on the parameters are alpha > 0 and beta > 0. + Returned values range between 0 and 1. + + 'b'Pareto distribution. alpha is the shape parameter.'u'Pareto distribution. alpha is the shape parameter.'b'Weibull distribution. + + alpha is the scale parameter and beta is the shape parameter. + + 'u'Weibull distribution. + + alpha is the scale parameter and beta is the shape parameter. + + 'b'Alternate random number generator using sources provided + by the operating system (such as /dev/urandom on Unix or + CryptGenRandom on Windows). + + Not available on all systems (see os.urandom() for details). + 'u'Alternate random number generator using sources provided + by the operating system (such as /dev/urandom on Unix or + CryptGenRandom on Windows). + + Not available on all systems (see os.urandom() for details). + 'b'Get the next random number in the range [0.0, 1.0).'u'Get the next random number in the range [0.0, 1.0).'b'getrandbits(k) -> x. Generates an int with k random bits.'u'getrandbits(k) -> x. Generates an int with k random bits.'b'number of bits must be greater than zero'u'number of bits must be greater than zero'b'Stub method. Not used for a system random number generator.'u'Stub method. Not used for a system random number generator.'b'Method should not be called for a system random number generator.'u'Method should not be called for a system random number generator.'b'System entropy source does not have state.'u'System entropy source does not have state.'b'sec,'u'sec,'b'avg %g, stddev %g, min %g, max %g +'u'avg %g, stddev %g, min %g, max %g +'Support for regular expressions (RE). + +This module provides regular expression matching operations similar to +those found in Perl. It supports both 8-bit and Unicode strings; both +the pattern and the strings being processed can contain null bytes and +characters outside the US ASCII range. + +Regular expressions can contain both special and ordinary characters. +Most ordinary characters, like "A", "a", or "0", are the simplest +regular expressions; they simply match themselves. You can +concatenate ordinary characters, so last matches the string 'last'. + +The special characters are: + "." Matches any character except a newline. + "^" Matches the start of the string. + "$" Matches the end of the string or just before the newline at + the end of the string. + "*" Matches 0 or more (greedy) repetitions of the preceding RE. + Greedy means that it will match as many repetitions as possible. + "+" Matches 1 or more (greedy) repetitions of the preceding RE. + "?" Matches 0 or 1 (greedy) of the preceding RE. + *?,+?,?? Non-greedy versions of the previous three special characters. + {m,n} Matches from m to n repetitions of the preceding RE. + {m,n}? Non-greedy version of the above. + "\\" Either escapes special characters or signals a special sequence. + [] Indicates a set of characters. + A "^" as the first character indicates a complementing set. + "|" A|B, creates an RE that will match either A or B. + (...) Matches the RE inside the parentheses. + The contents can be retrieved or matched later in the string. + (?aiLmsux) The letters set the corresponding flags defined below. + (?:...) Non-grouping version of regular parentheses. + (?P...) The substring matched by the group is accessible by name. + (?P=name) Matches the text matched earlier by the group named name. + (?#...) A comment; ignored. + (?=...) Matches if ... matches next, but doesn't consume the string. + (?!...) Matches if ... doesn't match next. + (?<=...) Matches if preceded by ... (must be fixed length). + (?...) The substring matched by the group is accessible by name. + (?P=name) Matches the text matched earlier by the group named name. + (?#...) A comment; ignored. + (?=...) Matches if ... matches next, but doesn't consume the string. + (?!...) Matches if ... doesn't match next. + (?<=...) Matches if preceded by ... (must be fixed length). + (?...) The substring matched by the group is accessible by name. + (?P=name) Matches the text matched earlier by the group named name. + (?#...) A comment; ignored. + (?=...) Matches if ... matches next, but doesn't consume the string. + (?!...) Matches if ... doesn't match next. + (?<=...) Matches if preceded by ... (must be fixed length). + (? fixer list. head_nodeseveryheadsnode_type_accept_typeget_fixers_from_package + Return the fully qualified names for fixers in the package pkg_name. + fix_name_detect_future_featureshave_docstringadvancetokFixerErrorA fixer could not be loaded.write_unchanged_files_default_optionsFixCLASS_PREFIXFILE_PREFIXfixer_namesexplicitInitializer. + + Args: + fixer_names: a list of fixers to import + options: a dict with configuration. + explicit: a list of fixers to run even if they are explicit. + fixer_logget_fixersBMbmi_pre_orderbmi_post_orderBM_compatiblebmi_pre_order_headsbmi_post_order_headsInspects the options to load the requested patterns and handlers. + + Returns: + (pre_order, post_order), where pre_order is the list of fixers that + want a pre-order AST traversal, and post_order is the list that want + post-order traversal. + pre_order_fixerspost_order_fixersfix_mod_pathfix_classCan't find %s.%sSkipping optional fixer: %slog_debugAdding transformation: %sIllegal fixer order: %rrun_orderkey_funclog_errorCalled when an error occurs.Hook to log a message.print_outputold_textnew_textCalled with the old version, new version, and filename of a + refactored file.refactordoctests_onlyRefactor a list of files and directories.dir_or_filerefactor_dirrefactor_filedir_nameDescends down a directory and refactor every Python file found. + + Python files are assumed to have a .py extension. + + Files and subdirectories starting with '.' are skipped. + py_extdirnamesDescending into %s_read_python_source + Do our best to decode a Python source file correctly. + Can't open %s: %sRefactors a file.Refactoring doctests in %srefactor_docstringprocessed_fileNo doctest changes in %srefactor_stringNo changes in %sRefactor a given input string. + + Args: + data: a string holding the code to be refactored. + name: a human-readable name for use in error/log messages. + + Returns: + An AST corresponding to the refactored input stream; None if + there were errors during the parse. + Can't parse %s: %s: %sfuture_featuresRefactoring %srefactor_treerefactor_stdinRefactoring doctests in stdinNo doctest changes in stdinNo changes in stdinRefactors a parse tree (modifying the tree in place). + + For compatible patterns the bottom matcher module is + used. Otherwise the tree is traversed node-to-node for + matches. + + Args: + tree: a pytree.Node instance representing the root of the tree + to be refactored. + name: a human-readable name for this tree. + + Returns: + True if the tree was modified, False otherwise. + start_treetraverse_bymatch_setkeep_line_ordertransformnew_matchesfxrfinish_treetraversalTraverse an AST, applying a set of fixers to each node. + + This is a helper method for refactor_tree(). + + Args: + fixers: a list of fixer instances. + traversal: a generator that yields AST nodes. + + Returns: + None + + Called when a file has been refactored and there may be changes. + No changes to %sNot writing changes to %sWrites a string to a file. + + It first shows a unified diff between the old text and the new text, and + then rewrites the file; the latter is only done if the write option is + set. + Can't create %s: %sCan't write %s: %sWrote changes to %sPS1PS2Refactors a docstring, looking for doctests. + + This returns a modified version of the input string. It looks + for doctests, which start with a ">>>" prompt, and may be + continued with "..." prompts, as long as the "..." is indented + the same as the ">>>". + + (Unfortunately we can't use the doctest module's parser, + since, like most parsers, it is not geared towards preserving + the original source.) + block_linenorefactor_doctestRefactors one doctest. + + A doctest is given as a block of lines, the first of which starts + with ">>>" (possibly indented), while the remaining lines start + with "..." (identically indented). + + parse_blockSource: %sCan't parse docstring in %s line %s: %s: %sclippedneed to beNo files %s modified.Files that %s modified:Warnings/messages while refactoring:There was 1 error:There were %d errors:Parses a block into a tree. + + This is necessary to get correct line number / offset information + in the parser diagnostics and embedded into the parse tree. + wrap_toksWraps a tokenize stream to systematically modify start/end.gen_linesline0col0col1Generates lines as expected by tokenize from a list of lines. + + This strips the first len(indent + self.PS1) characters off each line. + prefix1prefix2line=%r, prefix=%rMultiprocessingUnsupportedMultiprocessRefactoringTooloutput_locknum_processesalready doing multiple processes_child# NodePatters must either have no type and no content# or a type and content -- so they don't get any farther# Always return leafs# Negated Patterns don't have a type# Recurse on each node in content# The prefix for fixer classes# The prefix for modules with a fixer within# When this is True, the refactor*() methods will call write_file() for# files processed even if they were not changed during refactoring. If# and only if the refactor method's write parameter was True.# List of files that were or should be modified# Bottom Matcher incompatible fixers# remove fixers that will be handled by the bottom-up# matcher# Modify dirnames in-place to remove subdirs with leading dots# Reading the file failed.# Silence certain parse errors# The [:-1] is to take off the \n we added earlier#use traditional matching for the incompatible fixers# obtain a set of candidate nodes#sort by depth; apply fixers from bottom(of the AST) to top#some fixers(eg fix_imports) must be applied#with the original file's line order# this node has been cut off from a# previous transformation ; skip# do not apply the same fixer again#new.fixers_applied.append(fixer)# do not apply the fixer again to# this or any subnode# update the original match set for# the added code# Undo the adjustment of the line numbers in wrap_toks() below.# Don't bother updating the columns; this is too complicated# since line_text would also have to be updated and it would# still break for tokens spanning lines. Let the user guess# that the column numbers for doctests are relative to the# end of the prompt string (PS1 or PS2).b'Refactoring framework. + +Used as a main program, this can refactor any number of files and/or +recursively descend down directories. Imported as a module, this +provides infrastructure to write your own refactoring tool. +'u'Refactoring framework. + +Used as a main program, this can refactor any number of files and/or +recursively descend down directories. Imported as a module, this +provides infrastructure to write your own refactoring tool. +'b'Return a sorted list of all available fix names in the given package.'u'Return a sorted list of all available fix names in the given package.'b'fix_'u'fix_'b' Accepts a pytree Pattern Node and returns a set + of the pattern types which will match first. 'u' Accepts a pytree Pattern Node and returns a set + of the pattern types which will match first. 'b'Oh no! I don't understand pattern %s'u'Oh no! I don't understand pattern %s'b' Accepts a list of fixers and returns a dictionary + of head node type --> fixer list. 'u' Accepts a list of fixers and returns a dictionary + of head node type --> fixer list. 'b' + Return the fully qualified names for fixers in the package pkg_name. + 'u' + Return the fully qualified names for fixers in the package pkg_name. + 'b'__future__'b'A fixer could not be loaded.'u'A fixer could not be loaded.'b'write_unchanged_files'u'write_unchanged_files'b'Fix'u'Fix'b'Initializer. + + Args: + fixer_names: a list of fixers to import + options: a dict with configuration. + explicit: a list of fixers to run even if they are explicit. + 'u'Initializer. + + Args: + fixer_names: a list of fixers to import + options: a dict with configuration. + explicit: a list of fixers to run even if they are explicit. + 'b'Inspects the options to load the requested patterns and handlers. + + Returns: + (pre_order, post_order), where pre_order is the list of fixers that + want a pre-order AST traversal, and post_order is the list that want + post-order traversal. + 'u'Inspects the options to load the requested patterns and handlers. + + Returns: + (pre_order, post_order), where pre_order is the list of fixers that + want a pre-order AST traversal, and post_order is the list that want + post-order traversal. + 'b'Can't find %s.%s'u'Can't find %s.%s'b'Skipping optional fixer: %s'u'Skipping optional fixer: %s'b'Adding transformation: %s'u'Adding transformation: %s'b'pre'u'pre'b'Illegal fixer order: %r'u'Illegal fixer order: %r'b'run_order'u'run_order'b'Called when an error occurs.'u'Called when an error occurs.'b'Hook to log a message.'u'Hook to log a message.'b'Called with the old version, new version, and filename of a + refactored file.'u'Called with the old version, new version, and filename of a + refactored file.'b'Refactor a list of files and directories.'u'Refactor a list of files and directories.'b'Descends down a directory and refactor every Python file found. + + Python files are assumed to have a .py extension. + + Files and subdirectories starting with '.' are skipped. + 'u'Descends down a directory and refactor every Python file found. + + Python files are assumed to have a .py extension. + + Files and subdirectories starting with '.' are skipped. + 'b'Descending into %s'u'Descending into %s'b' + Do our best to decode a Python source file correctly. + 'u' + Do our best to decode a Python source file correctly. + 'b'Can't open %s: %s'u'Can't open %s: %s'b'Refactors a file.'u'Refactors a file.'b'Refactoring doctests in %s'u'Refactoring doctests in %s'b'No doctest changes in %s'u'No doctest changes in %s'b'No changes in %s'u'No changes in %s'b'Refactor a given input string. + + Args: + data: a string holding the code to be refactored. + name: a human-readable name for use in error/log messages. + + Returns: + An AST corresponding to the refactored input stream; None if + there were errors during the parse. + 'u'Refactor a given input string. + + Args: + data: a string holding the code to be refactored. + name: a human-readable name for use in error/log messages. + + Returns: + An AST corresponding to the refactored input stream; None if + there were errors during the parse. + 'b'Can't parse %s: %s: %s'u'Can't parse %s: %s: %s'b'Refactoring %s'u'Refactoring %s'b'Refactoring doctests in stdin'u'Refactoring doctests in stdin'b'No doctest changes in stdin'u'No doctest changes in stdin'b'No changes in stdin'u'No changes in stdin'b'Refactors a parse tree (modifying the tree in place). + + For compatible patterns the bottom matcher module is + used. Otherwise the tree is traversed node-to-node for + matches. + + Args: + tree: a pytree.Node instance representing the root of the tree + to be refactored. + name: a human-readable name for this tree. + + Returns: + True if the tree was modified, False otherwise. + 'u'Refactors a parse tree (modifying the tree in place). + + For compatible patterns the bottom matcher module is + used. Otherwise the tree is traversed node-to-node for + matches. + + Args: + tree: a pytree.Node instance representing the root of the tree + to be refactored. + name: a human-readable name for this tree. + + Returns: + True if the tree was modified, False otherwise. + 'b'Traverse an AST, applying a set of fixers to each node. + + This is a helper method for refactor_tree(). + + Args: + fixers: a list of fixer instances. + traversal: a generator that yields AST nodes. + + Returns: + None + 'u'Traverse an AST, applying a set of fixers to each node. + + This is a helper method for refactor_tree(). + + Args: + fixers: a list of fixer instances. + traversal: a generator that yields AST nodes. + + Returns: + None + 'b' + Called when a file has been refactored and there may be changes. + 'u' + Called when a file has been refactored and there may be changes. + 'b'No changes to %s'u'No changes to %s'b'Not writing changes to %s'u'Not writing changes to %s'b'Writes a string to a file. + + It first shows a unified diff between the old text and the new text, and + then rewrites the file; the latter is only done if the write option is + set. + 'u'Writes a string to a file. + + It first shows a unified diff between the old text and the new text, and + then rewrites the file; the latter is only done if the write option is + set. + 'b'Can't create %s: %s'u'Can't create %s: %s'b'Can't write %s: %s'u'Can't write %s: %s'b'Wrote changes to %s'u'Wrote changes to %s'b'Refactors a docstring, looking for doctests. + + This returns a modified version of the input string. It looks + for doctests, which start with a ">>>" prompt, and may be + continued with "..." prompts, as long as the "..." is indented + the same as the ">>>". + + (Unfortunately we can't use the doctest module's parser, + since, like most parsers, it is not geared towards preserving + the original source.) + 'u'Refactors a docstring, looking for doctests. + + This returns a modified version of the input string. It looks + for doctests, which start with a ">>>" prompt, and may be + continued with "..." prompts, as long as the "..." is indented + the same as the ">>>". + + (Unfortunately we can't use the doctest module's parser, + since, like most parsers, it is not geared towards preserving + the original source.) + 'b'Refactors one doctest. + + A doctest is given as a block of lines, the first of which starts + with ">>>" (possibly indented), while the remaining lines start + with "..." (identically indented). + + 'u'Refactors one doctest. + + A doctest is given as a block of lines, the first of which starts + with ">>>" (possibly indented), while the remaining lines start + with "..." (identically indented). + + 'b'Source: %s'u'Source: %s'b'Can't parse docstring in %s line %s: %s: %s'u'Can't parse docstring in %s line %s: %s: %s'b'need to be'u'need to be'b'No files %s modified.'u'No files %s modified.'b'Files that %s modified:'u'Files that %s modified:'b'Warnings/messages while refactoring:'u'Warnings/messages while refactoring:'b'There was 1 error:'u'There was 1 error:'b'There were %d errors:'u'There were %d errors:'b'Parses a block into a tree. + + This is necessary to get correct line number / offset information + in the parser diagnostics and embedded into the parse tree. + 'u'Parses a block into a tree. + + This is necessary to get correct line number / offset information + in the parser diagnostics and embedded into the parse tree. + 'b'Wraps a tokenize stream to systematically modify start/end.'u'Wraps a tokenize stream to systematically modify start/end.'b'Generates lines as expected by tokenize from a list of lines. + + This strips the first len(indent + self.PS1) characters off each line. + 'u'Generates lines as expected by tokenize from a list of lines. + + This strips the first len(indent + self.PS1) characters off each line. + 'b'line=%r, prefix=%r'u'line=%r, prefix=%r'b'already doing multiple processes'u'already doing multiple processes'u'lib2to3.refactor'u'refactor'Redo the builtin repr() (representation) but with limits on most sizes.Decorator to make a repr function return fillvalue for a recursive callrepr_runningmaxlevelmaxarraymaxsetmaxfrozensetmaxdequemaxlong_repr_iterablemaxitertrailnewlevel%s%s%srepr_tuplerepr_listrepr_arrayarray('%s')array('%s', [repr_setset()_possibly_sortedrepr_frozensetfrozenset()frozenset({repr_dequedeque([repr_dictkeyreprvalreprrepr_int<%s instance at %#x>aRepr# Can't use functools.wraps() here because of bootstrap issues# XXX Hope this isn't too slow...# Bugs in x.__repr__() can cause arbitrary# exceptions -- then make up something# Since not all sequences of items can be sorted and comparison# functions may raise arbitrary exceptions, return an unsorted# sequence in that case.b'Redo the builtin repr() (representation) but with limits on most sizes.'u'Redo the builtin repr() (representation) but with limits on most sizes.'b'Repr'u'Repr'b'recursive_repr'u'recursive_repr'b'Decorator to make a repr function return fillvalue for a recursive call'u'Decorator to make a repr function return fillvalue for a recursive call'b'%s%s%s'u'%s%s%s'b'array('%s')'u'array('%s')'b'array('%s', ['u'array('%s', ['b'set()'u'set()'b'frozenset()'u'frozenset()'b'frozenset({'u'frozenset({'b'deque(['u'deque(['b'<%s instance at %#x>'u'<%s instance at %#x>'An extensible library for opening URLs using a variety of protocols + +The simplest way to use this module is to call the urlopen function, +which accepts a string containing a URL or a Request object (described +below). It opens the URL and returns the results as file-like +object; the returned object has some extra methods described below. + +The OpenerDirector manages a collection of Handler objects that do +all the actual work. Each Handler implements a particular protocol or +option. The OpenerDirector is a composite object that invokes the +Handlers needed to open the requested URL. For example, the +HTTPHandler performs HTTP GET and POST requests and deals with +non-error returns. The HTTPRedirectHandler automatically deals with +HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler +deals with digest authentication. + +urlopen(url, data=None) -- Basic usage is the same as original +urllib. pass the url and optionally data to post to an HTTP URL, and +get a file-like object back. One difference is that you can also pass +a Request instance instead of URL. Raises a URLError (subclass of +OSError); for HTTP errors, raises an HTTPError, which can also be +treated as a valid response. + +build_opener -- Function that creates a new OpenerDirector instance. +Will install the default handlers. Accepts one or more Handlers as +arguments, either instances or Handler classes that it will +instantiate. If one of the argument is a subclass of the default +handler, the argument will be installed instead of the default. + +install_opener -- Installs a new opener as the default opener. + +objects of interest: + +OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages +the Handler classes, while dealing with requests and responses. + +Request -- An object that encapsulates the state of a request. The +state can be as simple as the URL. It can also include extra HTTP +headers, e.g. a User-Agent. + +BaseHandler -- + +internals: +BaseHandler and parent +_call_chain conventions + +Example usage: + +import urllib.request + +# set up authentication info +authinfo = urllib.request.HTTPBasicAuthHandler() +authinfo.add_password(realm='PDQ Application', + uri='https://mahler:8092/site-updates.py', + user='klem', + passwd='geheim$parole') + +proxy_support = urllib.request.ProxyHandler({"http" : "http://ahad-haam:3128"}) + +# build a new opener that adds authentication and caching FTP handlers +opener = urllib.request.build_opener(proxy_support, authinfo, + urllib.request.CacheFTPHandler) + +# install it +urllib.request.install_opener(opener) + +f = urllib.request.urlopen('http://www.python.org/') +addclosehookRequestOpenerDirectorBaseHandlerHTTPDefaultErrorHandlerHTTPRedirectHandlerHTTPCookieProcessorProxyHandlerHTTPPasswordMgrHTTPPasswordMgrWithDefaultRealmHTTPPasswordMgrWithPriorAuthAbstractBasicAuthHandlerHTTPBasicAuthHandlerProxyBasicAuthHandlerAbstractDigestAuthHandlerHTTPDigestAuthHandlerProxyDigestAuthHandlerFTPHandlerCacheFTPHandlerDataHandlerUnknownHandlerHTTPErrorProcessorinstall_openerURLopenerFancyURLopener_openercafilecapathcadefaultOpen the URL url, which can be either a string or a Request object. + + *data* must be an object specifying additional data to be sent to + the server, or None if no such data is needed. See Request for + details. + + urllib.request module uses HTTP/1.1 and includes a "Connection:close" + header in its HTTP requests. + + The optional *timeout* parameter specifies a timeout in seconds for + blocking operations like the connection attempt (if not specified, the + global default timeout setting will be used). This only works for HTTP, + HTTPS and FTP connections. + + If *context* is specified, it must be a ssl.SSLContext instance describing + the various SSL options. See HTTPSConnection for more details. + + The optional *cafile* and *capath* parameters specify a set of trusted CA + certificates for HTTPS requests. cafile should point to a single file + containing a bundle of CA certificates, whereas capath should point to a + directory of hashed certificate files. More information can be found in + ssl.SSLContext.load_verify_locations(). + + The *cadefault* parameter is ignored. + + This function always returns an object which can work as a context + manager and has methods such as + + * geturl() - return the URL of the resource retrieved, commonly used to + determine if a redirect was followed + + * info() - return the meta-information of the page, such as headers, in the + form of an email.message_from_string() instance (see Quick Reference to + HTTP Headers) + + * getcode() - return the HTTP status code of the response. Raises URLError + on errors. + + For HTTP and HTTPS URLs, this function returns a http.client.HTTPResponse + object slightly modified. In addition to the three new methods above, the + msg attribute contains the same information as the reason attribute --- + the reason phrase returned by the server --- instead of the response + headers as it is specified in the documentation for HTTPResponse. + + For FTP, file, and data URLs and requests explicitly handled by legacy + URLopener and FancyURLopener classes, this function returns a + urllib.response.addinfourl object. + + Note that None may be returned if no handler handles the request (though + the default installed global OpenerDirector uses UnknownHandler to ensure + this never happens). + + In addition, if proxy settings are detected (for example, when a *_proxy + environment variable like http_proxy is set), ProxyHandler is default + installed and makes sure the requests are handled through the proxy. + + cafile, capath and cadefault are deprecated, use a custom context instead."cafile, capath and cadefault are deprecated, use a ""custom context instead."You can't pass both context and any of cafile, capath, and cadefault"You can't pass both context and any of cafile, capath, and ""cadefault"SSL support not availablecreate_default_contextPurposeSERVER_AUTHHTTPSHandlerhttps_handler_url_tempfilesreporthook + Retrieve a URL into a temporary location on disk. + + Requires a URL argument. If a filename is passed, it is used as + the temporary file location. The reporthook argument should be + a callable that accepts a block number, a read size, and the + total file size of the URL target. The data argument should be + valid URL encoded data. + + If a filename is passed and the URL points to a local resource, + the result is a copy from local file to new file. + + Returns a tuple containing the path to the newly created + data file as well as the resulting HTTPMessage object. + url_typetfpblocknumretrieval incomplete: got only %i out of %i bytesClean up temporary files from urlretrieve calls.temp_file:\d+$_cut_port_rerequest_hostReturn request-host, as defined by RFC 2965. + + Variation from RFC: returned value is lowercased, for convenient + comparison. + + full_urlget_headerorigin_req_hostunverifiableunredirected_hdrs{}#{}_full_urlhas_headerremove_headerunknown url type: %rget_methodReturn a string indicating the HTTP request method.default_methodget_full_urlset_proxyhas_proxyadd_unredirected_headerheader_itemsPython-urllib/%sclient_versionUser-agenthandle_openhandle_errorprocess_responseprocess_requestadd_handleradd_parentexpected BaseHandler instance, got %raddedredirect_requestdo_openproxy_open_call_chainmeth_namefullurl_requesturllib.Request_responsedefault_openunknown_openhttp_error_%shttp_errorig_argshttp_error_defaultCreate an opener object from a list of handlers. + + The opener will use several default handlers, including support + for HTTP, FTP and when applicable HTTPS. + + If any of the handlers passed as arguments are subclasses of the + default handlers, the default handlers will not be used. + default_classeshandler_orderProcess HTTP error responses.http_responsehttps_responsemax_repeatsmax_redirectionsnewurlReturn a Request or None in response to a redirect. + + This is called by the http_error_30x methods when a + redirection response is received. If a redirection should + take place, return a new Request to allow http_error_30x to + perform the redirect. Otherwise, raise HTTPError if no-one + else should try to handle this url. Return None if you can't + but another Handler might. + %20CONTENT_HEADERShttp_error_302urlparts%s - Redirection to url '%s' is not allowedpunctuationredirect_dictvisitedinf_msghttp_error_301http_error_303http_error_307The HTTP server returned a redirect error that would lead to an infinite loop. +The last 30x error message was: +"The HTTP server returned a redirect error that would ""lead to an infinite loop.\n""The last 30x error message was:\n"_parse_proxyReturn (scheme, user, password, host/port) given a URL or an authority. + + If a URL is supplied, it must have an authority (host:port) component. + According to RFC 3986, having an authority component means the URL must + have two slashes after the scheme. + r_schemeauthorityproxy URL with no authority: %rhost_separatorhostportproxiesproxies must be a mapping%s_openorig_typeproxy_typeproxy_bypassuser_passcredsProxy-authorizationadd_passwordrealmreduce_urireduced_urifind_user_passwordauthuridomainsreduced_authuriurisauthinfois_suburiAccept authority or URI and extract only the authority and path.dport%s:%dCheck if test is below base in a URI tree + + Both args must be URIs in reduced form. + is_authenticatedupdate_authenticated(?:^|,)[ ]*([^ ,]+)[ ]+realm=(["']?)([^"']*)\2'(?:^|,)''[ \t]*''([^ \t,]+)''[ \t]+''realm=(["\']?)([^"\']*)\\2'rxpassword_mgr_parse_realmfound_challengeBasic Auth Realm was unquotedhttp_error_auth_reqedauthrequnsupportedbasicretry_http_basic_authAbstractBasicAuthHandler does not support the following scheme: %r"AbstractBasicAuthHandler does not ""support the following scheme: %r"pwauth_headerhttp_request{0}:{1}auth_strBasic {}https_requesthttp_error_401www-authenticatehttp_error_407proxy-authenticate_randombytesretriednonce_countlast_noncereset_retry_countdigest auth failedretry_http_digest_authAbstractDigestAuthHandler does not support the following scheme: '%s'"AbstractDigestAuthHandler does not support"" the following scheme: '%s'"challengeparse_keqv_listparse_http_listchalget_authorizationDigest %sauth_valget_cnoncenonce%s:%s:%s:qopopaqueget_algorithm_implsKDget_entity_digestentdig%s:%s:%sA1A2respdig%08xncvaluecnonce%s:%s:%s:%s:%snoncebitqop '%s' is not supported.username="%s", realm="%s", nonce="%s", uri="%s", response="%s"'username="%s", realm="%s", nonce="%s", uri="%s", ''response="%s"', opaque="%s", digest="%s", algorithm="%s", qop=auth, nc=%s, cnonce="%s"SHAUnsupported digest authentication algorithm %r"Unsupported digest authentication ""algorithm %r"An authentication protocol defined by RFC 2069 + + Digest authentication improves on basic authentication because it + does not transmit passwords in the clear. + 490retryProxy-AuthorizationAbstractHTTPHandler_debuglevelset_http_debugleveldo_request_no host givenPOST data should be bytes, an iterable of bytes, or a file object. It cannot be of type str."POST data should be bytes, an iterable of bytes, ""or a file object. It cannot be of type str."Transfer-encodingsel_hostsel_pathhttp_classhttp_conn_argsReturn an HTTPResponse object for the request, using http_class. + + http_class must implement the HTTPConnection API from http.client. + tunnel_headersproxy_auth_hdrhttp_open_check_hostnamehttps_opencookiejarCookieJaradd_cookie_headerextract_cookiesunknown url type: %sParse list of key=value strings where keys are not duplicated.parsedParse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Neither commas nor quotes count if they are escaped. + Only double-quotes count, not single-quotes. + curfile_openfile:// scheme is supported only on localhostopen_local_filemimetypeslocalfilestatsformatdateusegmtmtypeContent-type: %s +Content-length: %d +Last-modified: %s +_safe_gethostbynamefile://origurlfile not on local hostftp_openftplibftp error: no host givenconnect_ftpfwretrfileretrlenContent-type: %s +Content-length: %d +ftp error: %rftpwrapperpersistentsoonestmax_connssetTimeoutsetMaxConnscheck_cachedata_openmediatype;base64text/plain;charset=US-ASCIIContent-type: %s +Content-length: %d +MAXFTPCACHEnturl2pathOS-specific conversion from a relative URL of the 'file' scheme + to a file system path; not recommended for general use.OS-specific conversion from a file system path to a relative URL + of the 'file' scheme; not recommended for general use.ftpcacheClass to open URLs. + This is a class rather than just a subroutine because we may need + more than one set of global protocol-specific options. + Note -- this is a base class for those who don't want the + automatic handling of errors type 302 (relocated) and 401 + (authorization needed).__tempfiles%(class)s style of invoking requests is deprecated. Use newer urlopen functions/methods"%(class)s style of invoking requests is deprecated. ""Use newer urlopen functions/methods"Accept*/*__unlinktempcacheaddheaderAdd a header to be used by the HTTP interface only + e.g. u.addheader('Accept', 'sound/basic')Use URLopener().open(file) instead of open(file, 'r').%/:=&?~#+!$,;'@()*[]|urltypeproxyhostopen_open_unknown_proxyopen_unknownsocket errorOverridable interface to open unknown URL type.url errorunknown url typeinvalid proxy for %sretrieveretrieve(url) returns (filename, headers) for a local object + or (tempfilename, headers) for a remote object.url1_open_generic_httpconnection_factoryMake an HTTP connection using connection_class. + + This is an internal method that should be called from + open_http() or open_https(). + + Arguments: + - connection_factory should take a host name and return an + HTTPConnection instance. + - url is the url to retrieval or a host, relative-path pair. + - data is payload for a POST request or None. + user_passwdproxy_passwdrealhost%s://%s%shttp errorproxy_authBasic %shttp protocol error: bad status linehttp:http_erroropen_httpUse HTTP protocol.Handle http errors. + + Derived class can override this, or provide specific handlers + named http_error_DDD where DDD is the 3-digit error code.http_error_%dDefault error handler: close the connection and raise OSError._https_connectionopen_httpsUse HTTPS protocol.open_fileUse local file or FTP depending on form of URL.file error: proxy support for file protocol currently not implementedlocalhost/Use local file.localnameContent-Type: %s +Content-Length: %d +Last-modified: %s +urlfilethishostlocal file url may start with / or file:. Unknown url of type: %slocal file error: not on local hostopen_ftpUse FTP protocol.ftp error: proxy support for ftp protocol currently not implementedftp:Content-Type: %s +Content-Length: %d +ftperrorsftp error %ropen_dataUse "data" URL.data error: proxy support for data protocol currently not implementeddata errorbad data URLDate: %s%a, %d %b %Y %H:%M:%S GMTContent-type: %sContent-Length: %dDerived class with handlers for errors we can handle (perhaps).auth_cachemaxtriesDefault error handling -- don't raise an exception.Error 302 -- relocated (temporarily).http_error_500Internal Server Error: Redirect Recursionredirect_internal Redirection to url '%s' is not allowed.Error 301 -- also relocated (permanently).Error 303 -- also relocated (essentially identical to 302).Error 307 -- relocated, but turn POST into error.Error 401 -- authentication required. + This function supports Basic authentication only.[ ]*([^ ]+)[ ]+realm="([^"]*)"retry__basic_authError 407 -- proxy authentication required. + This function supports Basic authentication only.retry_proxy_retry_proxy_http_basic_authproxyselectorget_user_passwd%s:%s@%sretry_proxy_https_basic_authretry_https_basic_authprompt_user_passwdOverride this in a GUI environment!getpassEnter username for %s at %s: Enter password for %s in %s at %s: _localhostReturn the IP address of the magic hostname 'localhost'._thishostReturn the IP addresses of the current host._ftperrorsReturn the set of errors raised by the FTP class._noheadersnoheadersReturn an empty email Message object.Class used by open_ftp() for cache of open FTP connections.refcountkeepalivebusyendtransfer550LIST file_closeftpobjreal_closegetproxies_environmentReturn a dictionary of scheme -> proxy server URL mappings. + + Scan the environment for variables named _proxy; + this seems to be the standard convention. If you need a + different way, you can pass a proxies dictionary to the + [Fancy]URLopener constructor. + + REQUEST_METHODproxy_bypass_environmentTest if proxies should not be used for a particular host. + + Checks the proxy dict for the value of no_proxy, which should + be a list of comma separated DNS suffixes, or '*' for all hosts. + + no_proxyhostonly_proxy_bypass_macosx_sysconfproxy_settings + Return True iff this host shouldn't be accessed using a proxy + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + + proxy_settings come from _scproxy._get_proxy_settings or get mocked ie: + { 'exclude_simple': bool, + 'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.1', '10.0/16'] + } + ip2numipAddrexclude_simplehostIP(\d+(?:\.\d+)*)(/\d+)?proxy_bypass_macosx_sysconfgetproxies_macosx_sysconfReturn a dictionary of scheme -> proxy server URL mappings. + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + Return True, if host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or from the MacOSX framework SystemConfiguration. + + getproxies_registryReturn a dictionary of scheme -> proxy server URL mappings. + + Win32 uses the registry to store proxies. + + Software\Microsoft\Windows\CurrentVersion\Internet SettingsinternetSettingsProxyEnableproxyEnableProxyServerproxyServer(?:[^/:]+)://%s://%shttp://%shttps://%sftp://%sCloseReturn a dictionary of scheme -> proxy server URL mappings. + + Returns settings gathered from the environment, if specified, + or the registry. + + proxy_bypass_registryProxyOverrideproxyOverriderawHostgetfqdnfqdn\.Return True, if host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or the registry. + + # XXX issues:# If an authentication error handler that tries to perform# authentication for some reason but fails, how should the error be# signalled? The client needs to know the HTTP error code. But if# the handler knows that the problem was, e.g., that it didn't know# that hash algo that requested in the challenge, it would be good to# pass that information along to the client, too.# ftp errors aren't handled cleanly# check digest against correct (i.e. non-apache) implementation# Possible extensions:# complex proxies XXX not sure what exactly was meant by this# abstract factory for opener# check for SSL# Classes# Functions# Legacy interface# Just return the local path and the "headers" for file://# URLs. No sense in performing a copy unless requested.# Handle temporary file setup.# copied from cookielib.py# remove port, if present# unwrap('') --> 'type://host/path'# issue 16464# if we change data we need to remove content-length header# (cause it's most probably calculated for previous value)# useful for something like authentication# will not be added to a redirected request# self.handlers is retained only for backward compatibility# manage the individual handlers# oops, coincidental match# Only exists for backwards compatibility.# Handlers raise an exception if no one else should try to handle# the request, or return None if they can't but another handler# could. Otherwise, they return the response.# accept a URL or a Request object# pre-process request# post-process response# XXX http[s] protocols are special-cased# https is not different than http# YUCK!# XXX probably also want an abstract factory that knows when it makes# sense to skip a superclass in favor of a subclass and when it might# make sense to include both# Only exists for backwards compatibility# Try to preserve the old behavior of having custom classes# inserted after default ones (works only for custom user# classes which are not aware of handler_order).# after all other processing# According to RFC 2616, "2xx" code indicates that the client's# request was successfully received, understood, and accepted.# maximum number of redirections to any single URL# this is needed because of the state that cookies introduce# maximum total number of redirections (regardless of URL) before# assuming we're in a loop# Strictly (according to RFC 2616), 301 or 302 in response to# a POST MUST NOT cause a redirection without confirmation# from the user (of urllib.request, in this case). In practice,# essentially all clients do redirect in this case, so we do# the same.# Be conciliant with URIs containing a space. This is mainly# redundant with the more complete encoding done in http_error_302(),# but it is kept for compatibility with other callers.# Implementation note: To avoid the server sending us into an# infinite loop, the request object needs to track what URLs we# have already seen. Do this by adding a handler-specific# attribute to the Request object.# Some servers (incorrectly) return multiple Location headers# (so probably same goes for URI). Use first header.# fix a possible malformed URL# For security reasons we don't allow redirection to anything other# than http, https or ftp.# http.client.parse_headers() decodes as ISO-8859-1. Recover the# original bytes and percent-encode non-ASCII bytes, and any special# characters such as the space.# XXX Probably want to forget about the state of the current# request, although that might interact poorly with other# handlers that also use handler-specific request attributes# loop detection# .redirect_dict has a key url if url was previously visited.# Don't close the fp until we are sure that we won't use it# with HTTPError.# authority# URL# We have an authority, so for RFC 3986-compliant URLs (by ss 3.# and 3.3.), path is empty or starts with '/'# Proxies must be in front# let other handlers take care of it# need to start over, because the other handlers don't# grok the proxy's URL type# e.g. if we have a constructor arg proxies like so:# {'http': 'ftp://proxy.example.com'}, we may end up turning# a request for http://acme.example.com/a into one for# ftp://proxy.example.com/a# uri could be a single URI or a sequence# note HTTP URLs do not have a userinfo component# URI# host or host:port# Add a default for prior auth requests# XXX this allows for multiple auth-schemes, but will stupidly pick# the last one with a realm specified.# allow for double- and single-quoted realm values# (single quotes are a violation of the RFC, but appear in the wild)# start of the string or ','# optional whitespaces# scheme like "Basic"# mandatory whitespaces# realm=xxx# realm='xxx'# realm="xxx"# XXX could pre-emptively send auth info already accepted (RFC 2617,# end of section 2, and section 1.2 immediately after "credentials"# production).# parse WWW-Authenticate header: accept multiple challenges per header# host may be an authority (without userinfo) or a URL with an# no header found# Use the first matching Basic challenge.# Ignore following challenges even if they use the Basic# scheme.# http_error_auth_reqed requires that there is no userinfo component in# authority. Assume there isn't one, since urllib.request does not (and# should not, RFC 3986 s. 3.2.1) support requests for URLs containing# userinfo.# Return n random bytes.# Digest authentication is specified in RFC 2617.# XXX The client does not inspect the Authentication-Info header# in a successful response.# XXX It should be possible to test this implementation against# a mock server that just generates a static set of challenges.# XXX qop="auth-int" supports is shaky# Don't fail endlessly - if we failed once, we'll probably# fail a second time. Hm. Unless the Password Manager is# prompting for the information. Crap. This isn't great# but it's better than the current 'repeat until recursion# depth exceeded' approach # The cnonce-value is an opaque# quoted string value provided by the client and used by both client# and server to avoid chosen plaintext attacks, to provide mutual# authentication, and to provide some message integrity protection.# This isn't a fabulous effort, but it's probably Good Enough.# mod_digest doesn't send an opaque, even though it isn't# supposed to be optional# XXX not implemented yet# XXX selector: what about proxies and full urls# NOTE: As per RFC 2617, when server sends "auth,auth-int", the client could use either `auth`# or `auth-int` to the response back. we use `auth` to send the response back.# XXX handle auth-int.# XXX should the partial digests be encoded too?# lambdas assume digest modules are imported at the top level# XXX MD5-sess# before Basic auth# POST# will parse host:port# TODO(jhylton): Should this be redesigned to handle# persistent connections?# We want to make an HTTP/1.1 request, but the addinfourl# class isn't prepared to deal with a persistent connection.# It will try to read all remaining data from the socket,# which will block while the server waits for the next request.# So make sure the connection gets closed after the (only)# request.# Proxy-Authorization should not be sent to origin# server.# timeout error# If the server does not send us a 'Connection: close' header,# HTTPConnection assumes the socket should be left open. Manually# mark the socket to be closed when this response object goes away.# This line replaces the .msg attribute of the HTTPResponse# with .headers, because urllib clients expect the response to# have the reason in .msg. It would be good to mark this# attribute is deprecated and get then to use info() or# .headers.# append last part# Use local file or FTP depending on form of URL# names for the localhost# not entirely sure what the rules are here# username/password handling# XXX would be nice to have pluggable cache strategies# XXX this stuff is definitely not thread safe# first check for old ones# then check the size# data URLs as specified in RFC 2397.# ignores POSTed data# syntax:# even base64 encoded data URLs might be quoted so unquote in any case:# Code move from the old urllib module# Trim the ftp cache beyond this size# Helper for non-unix systems# Constructor# See cleanup()# Undocumented feature: if you assign {} to tempcache,# it is used to cache files retrieved with# self.retrieve(). This is not enabled by default# since it does not work for changing documents (and I# haven't got the logic to check expiration headers# yet).# Undocumented feature: you can use a different# ftp cache by assigning to the .ftpcache member;# in case you want logically independent URL openers# XXX This is not threadsafe. Bah.# This code sometimes runs when the rest of this module# has already been deleted, so it can't use any globals# or import anything.# External interface# Signal special case to open_*()# raise exception if actual size does not match content-length header# Each method named open_ knows how to open that type of URL# check whether the proxy contains authorization information# now we proceed with the url we want to obtain# Add Connection:close as we don't support persistent connections yet.# This helps in closing the socket and avoiding ResourceWarning# something went wrong with the HTTP status line# First check if there's a specific handler for this error# XXX thread unsafe!# Prune the cache, rather arbitrarily# ignore POSTed data# XXX is this encoding/decoding ok?#f.fileno = None # needed for addinfourl# In case the server sent a relative URL, join with original:# For security reasons, we don't allow redirection to anything other# than http, https and ftp.# We are using newer HTTPError with older redirect_internal method# This older method will get deprecated in 3.3# Utility classes# Try to retrieve as a file# Set transfer mode to ASCII!# Try a directory listing. Verify that directory exists.# Pass back both a suitably decorated object and a retrieval length# Proxy handling# in order to prefer lowercase variables, process environment in# two passes: first matches any, second pass matches lowercase only# CVE-2016-1000110 - If we are running as CGI script, forget HTTP_PROXY# (non-all-lowercase) as it may be set from the web server by a "Proxy:"# header from the client# If "proxy" is lowercase, it will still be used thanks to the next block# don't bypass, if no_proxy isn't specified# '*' is special case for always bypass# strip port off host# check if the host ends with any of the DNS suffixes# ignore leading dots# otherwise, don't bypass# This code tests an OSX specific data structure but is testable on all# Check for simple host names:# Items in the list are strings like these: *.local, 169.254/16# System libraries ignore invalid prefix lengths# Std module, so should be around - but you never know!# Returned as Unicode but problems if not converted to ASCII# Per-protocol settings# See if address has a type:// prefix# Use one setting for all protocols# Either registry key not found etc, or the value in an# unexpected format.# proxies already set up to be empty so nothing to do# Std modules, so should be around - but you never know!# ^^^^ Returned as Unicode but problems if not converted to ASCII# try to make a host list from name and IP address.# make a check value list from the registry entry: replace the# '' string by the localhost entry and the corresponding# canonical entry.# now check if we match one of the registry values.# mask dots# change glob sequence# change glob char# By default use environment variablesb'An extensible library for opening URLs using a variety of protocols + +The simplest way to use this module is to call the urlopen function, +which accepts a string containing a URL or a Request object (described +below). It opens the URL and returns the results as file-like +object; the returned object has some extra methods described below. + +The OpenerDirector manages a collection of Handler objects that do +all the actual work. Each Handler implements a particular protocol or +option. The OpenerDirector is a composite object that invokes the +Handlers needed to open the requested URL. For example, the +HTTPHandler performs HTTP GET and POST requests and deals with +non-error returns. The HTTPRedirectHandler automatically deals with +HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler +deals with digest authentication. + +urlopen(url, data=None) -- Basic usage is the same as original +urllib. pass the url and optionally data to post to an HTTP URL, and +get a file-like object back. One difference is that you can also pass +a Request instance instead of URL. Raises a URLError (subclass of +OSError); for HTTP errors, raises an HTTPError, which can also be +treated as a valid response. + +build_opener -- Function that creates a new OpenerDirector instance. +Will install the default handlers. Accepts one or more Handlers as +arguments, either instances or Handler classes that it will +instantiate. If one of the argument is a subclass of the default +handler, the argument will be installed instead of the default. + +install_opener -- Installs a new opener as the default opener. + +objects of interest: + +OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages +the Handler classes, while dealing with requests and responses. + +Request -- An object that encapsulates the state of a request. The +state can be as simple as the URL. It can also include extra HTTP +headers, e.g. a User-Agent. + +BaseHandler -- + +internals: +BaseHandler and parent +_call_chain conventions + +Example usage: + +import urllib.request + +# set up authentication info +authinfo = urllib.request.HTTPBasicAuthHandler() +authinfo.add_password(realm='PDQ Application', + uri='https://mahler:8092/site-updates.py', + user='klem', + passwd='geheim$parole') + +proxy_support = urllib.request.ProxyHandler({"http" : "http://ahad-haam:3128"}) + +# build a new opener that adds authentication and caching FTP handlers +opener = urllib.request.build_opener(proxy_support, authinfo, + urllib.request.CacheFTPHandler) + +# install it +urllib.request.install_opener(opener) + +f = urllib.request.urlopen('http://www.python.org/') +'u'An extensible library for opening URLs using a variety of protocols + +The simplest way to use this module is to call the urlopen function, +which accepts a string containing a URL or a Request object (described +below). It opens the URL and returns the results as file-like +object; the returned object has some extra methods described below. + +The OpenerDirector manages a collection of Handler objects that do +all the actual work. Each Handler implements a particular protocol or +option. The OpenerDirector is a composite object that invokes the +Handlers needed to open the requested URL. For example, the +HTTPHandler performs HTTP GET and POST requests and deals with +non-error returns. The HTTPRedirectHandler automatically deals with +HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler +deals with digest authentication. + +urlopen(url, data=None) -- Basic usage is the same as original +urllib. pass the url and optionally data to post to an HTTP URL, and +get a file-like object back. One difference is that you can also pass +a Request instance instead of URL. Raises a URLError (subclass of +OSError); for HTTP errors, raises an HTTPError, which can also be +treated as a valid response. + +build_opener -- Function that creates a new OpenerDirector instance. +Will install the default handlers. Accepts one or more Handlers as +arguments, either instances or Handler classes that it will +instantiate. If one of the argument is a subclass of the default +handler, the argument will be installed instead of the default. + +install_opener -- Installs a new opener as the default opener. + +objects of interest: + +OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages +the Handler classes, while dealing with requests and responses. + +Request -- An object that encapsulates the state of a request. The +state can be as simple as the URL. It can also include extra HTTP +headers, e.g. a User-Agent. + +BaseHandler -- + +internals: +BaseHandler and parent +_call_chain conventions + +Example usage: + +import urllib.request + +# set up authentication info +authinfo = urllib.request.HTTPBasicAuthHandler() +authinfo.add_password(realm='PDQ Application', + uri='https://mahler:8092/site-updates.py', + user='klem', + passwd='geheim$parole') + +proxy_support = urllib.request.ProxyHandler({"http" : "http://ahad-haam:3128"}) + +# build a new opener that adds authentication and caching FTP handlers +opener = urllib.request.build_opener(proxy_support, authinfo, + urllib.request.CacheFTPHandler) + +# install it +urllib.request.install_opener(opener) + +f = urllib.request.urlopen('http://www.python.org/') +'b'Request'u'Request'b'OpenerDirector'u'OpenerDirector'b'BaseHandler'u'BaseHandler'b'HTTPDefaultErrorHandler'u'HTTPDefaultErrorHandler'b'HTTPRedirectHandler'u'HTTPRedirectHandler'b'HTTPCookieProcessor'u'HTTPCookieProcessor'b'ProxyHandler'u'ProxyHandler'b'HTTPPasswordMgr'u'HTTPPasswordMgr'b'HTTPPasswordMgrWithDefaultRealm'u'HTTPPasswordMgrWithDefaultRealm'b'HTTPPasswordMgrWithPriorAuth'u'HTTPPasswordMgrWithPriorAuth'b'AbstractBasicAuthHandler'u'AbstractBasicAuthHandler'b'HTTPBasicAuthHandler'u'HTTPBasicAuthHandler'b'ProxyBasicAuthHandler'u'ProxyBasicAuthHandler'b'AbstractDigestAuthHandler'u'AbstractDigestAuthHandler'b'HTTPDigestAuthHandler'u'HTTPDigestAuthHandler'b'ProxyDigestAuthHandler'u'ProxyDigestAuthHandler'b'HTTPHandler'u'HTTPHandler'b'FTPHandler'u'FTPHandler'b'CacheFTPHandler'u'CacheFTPHandler'b'DataHandler'u'DataHandler'b'UnknownHandler'u'UnknownHandler'b'HTTPErrorProcessor'u'HTTPErrorProcessor'b'install_opener'u'install_opener'b'build_opener'u'build_opener'b'URLopener'u'URLopener'b'FancyURLopener'u'FancyURLopener'b'Open the URL url, which can be either a string or a Request object. + + *data* must be an object specifying additional data to be sent to + the server, or None if no such data is needed. See Request for + details. + + urllib.request module uses HTTP/1.1 and includes a "Connection:close" + header in its HTTP requests. + + The optional *timeout* parameter specifies a timeout in seconds for + blocking operations like the connection attempt (if not specified, the + global default timeout setting will be used). This only works for HTTP, + HTTPS and FTP connections. + + If *context* is specified, it must be a ssl.SSLContext instance describing + the various SSL options. See HTTPSConnection for more details. + + The optional *cafile* and *capath* parameters specify a set of trusted CA + certificates for HTTPS requests. cafile should point to a single file + containing a bundle of CA certificates, whereas capath should point to a + directory of hashed certificate files. More information can be found in + ssl.SSLContext.load_verify_locations(). + + The *cadefault* parameter is ignored. + + This function always returns an object which can work as a context + manager and has methods such as + + * geturl() - return the URL of the resource retrieved, commonly used to + determine if a redirect was followed + + * info() - return the meta-information of the page, such as headers, in the + form of an email.message_from_string() instance (see Quick Reference to + HTTP Headers) + + * getcode() - return the HTTP status code of the response. Raises URLError + on errors. + + For HTTP and HTTPS URLs, this function returns a http.client.HTTPResponse + object slightly modified. In addition to the three new methods above, the + msg attribute contains the same information as the reason attribute --- + the reason phrase returned by the server --- instead of the response + headers as it is specified in the documentation for HTTPResponse. + + For FTP, file, and data URLs and requests explicitly handled by legacy + URLopener and FancyURLopener classes, this function returns a + urllib.response.addinfourl object. + + Note that None may be returned if no handler handles the request (though + the default installed global OpenerDirector uses UnknownHandler to ensure + this never happens). + + In addition, if proxy settings are detected (for example, when a *_proxy + environment variable like http_proxy is set), ProxyHandler is default + installed and makes sure the requests are handled through the proxy. + + 'u'Open the URL url, which can be either a string or a Request object. + + *data* must be an object specifying additional data to be sent to + the server, or None if no such data is needed. See Request for + details. + + urllib.request module uses HTTP/1.1 and includes a "Connection:close" + header in its HTTP requests. + + The optional *timeout* parameter specifies a timeout in seconds for + blocking operations like the connection attempt (if not specified, the + global default timeout setting will be used). This only works for HTTP, + HTTPS and FTP connections. + + If *context* is specified, it must be a ssl.SSLContext instance describing + the various SSL options. See HTTPSConnection for more details. + + The optional *cafile* and *capath* parameters specify a set of trusted CA + certificates for HTTPS requests. cafile should point to a single file + containing a bundle of CA certificates, whereas capath should point to a + directory of hashed certificate files. More information can be found in + ssl.SSLContext.load_verify_locations(). + + The *cadefault* parameter is ignored. + + This function always returns an object which can work as a context + manager and has methods such as + + * geturl() - return the URL of the resource retrieved, commonly used to + determine if a redirect was followed + + * info() - return the meta-information of the page, such as headers, in the + form of an email.message_from_string() instance (see Quick Reference to + HTTP Headers) + + * getcode() - return the HTTP status code of the response. Raises URLError + on errors. + + For HTTP and HTTPS URLs, this function returns a http.client.HTTPResponse + object slightly modified. In addition to the three new methods above, the + msg attribute contains the same information as the reason attribute --- + the reason phrase returned by the server --- instead of the response + headers as it is specified in the documentation for HTTPResponse. + + For FTP, file, and data URLs and requests explicitly handled by legacy + URLopener and FancyURLopener classes, this function returns a + urllib.response.addinfourl object. + + Note that None may be returned if no handler handles the request (though + the default installed global OpenerDirector uses UnknownHandler to ensure + this never happens). + + In addition, if proxy settings are detected (for example, when a *_proxy + environment variable like http_proxy is set), ProxyHandler is default + installed and makes sure the requests are handled through the proxy. + + 'b'cafile, capath and cadefault are deprecated, use a custom context instead.'u'cafile, capath and cadefault are deprecated, use a custom context instead.'b'You can't pass both context and any of cafile, capath, and cadefault'u'You can't pass both context and any of cafile, capath, and cadefault'b'SSL support not available'u'SSL support not available'b' + Retrieve a URL into a temporary location on disk. + + Requires a URL argument. If a filename is passed, it is used as + the temporary file location. The reporthook argument should be + a callable that accepts a block number, a read size, and the + total file size of the URL target. The data argument should be + valid URL encoded data. + + If a filename is passed and the URL points to a local resource, + the result is a copy from local file to new file. + + Returns a tuple containing the path to the newly created + data file as well as the resulting HTTPMessage object. + 'u' + Retrieve a URL into a temporary location on disk. + + Requires a URL argument. If a filename is passed, it is used as + the temporary file location. The reporthook argument should be + a callable that accepts a block number, a read size, and the + total file size of the URL target. The data argument should be + valid URL encoded data. + + If a filename is passed and the URL points to a local resource, + the result is a copy from local file to new file. + + Returns a tuple containing the path to the newly created + data file as well as the resulting HTTPMessage object. + 'b'retrieval incomplete: got only %i out of %i bytes'u'retrieval incomplete: got only %i out of %i bytes'b'Clean up temporary files from urlretrieve calls.'u'Clean up temporary files from urlretrieve calls.'b':\d+$'u':\d+$'b'Return request-host, as defined by RFC 2965. + + Variation from RFC: returned value is lowercased, for convenient + comparison. + + 'u'Return request-host, as defined by RFC 2965. + + Variation from RFC: returned value is lowercased, for convenient + comparison. + + 'b'{}#{}'u'{}#{}'b'unknown url type: %r'u'unknown url type: %r'b'Return a string indicating the HTTP request method.'u'Return a string indicating the HTTP request method.'b'Python-urllib/%s'u'Python-urllib/%s'b'User-agent'u'User-agent'b'add_parent'u'add_parent'b'expected BaseHandler instance, got %r'u'expected BaseHandler instance, got %r'b'redirect_request'u'redirect_request'b'do_open'u'do_open'b'proxy_open'u'proxy_open'b'response'u'response'b'request'u'request'b'_request'u'_request'b'urllib.Request'u'urllib.Request'b'_response'u'_response'b'default_open'u'default_open'b'_open'u'_open'b'unknown_open'u'unknown_open'b'http_error_%s'u'http_error_%s'b'_error'u'_error'b'http_error_default'u'http_error_default'b'Create an opener object from a list of handlers. + + The opener will use several default handlers, including support + for HTTP, FTP and when applicable HTTPS. + + If any of the handlers passed as arguments are subclasses of the + default handlers, the default handlers will not be used. + 'u'Create an opener object from a list of handlers. + + The opener will use several default handlers, including support + for HTTP, FTP and when applicable HTTPS. + + If any of the handlers passed as arguments are subclasses of the + default handlers, the default handlers will not be used. + 'b'handler_order'u'handler_order'b'Process HTTP error responses.'u'Process HTTP error responses.'b'Return a Request or None in response to a redirect. + + This is called by the http_error_30x methods when a + redirection response is received. If a redirection should + take place, return a new Request to allow http_error_30x to + perform the redirect. Otherwise, raise HTTPError if no-one + else should try to handle this url. Return None if you can't + but another Handler might. + 'u'Return a Request or None in response to a redirect. + + This is called by the http_error_30x methods when a + redirection response is received. If a redirection should + take place, return a new Request to allow http_error_30x to + perform the redirect. Otherwise, raise HTTPError if no-one + else should try to handle this url. Return None if you can't + but another Handler might. + 'b'%20'u'%20'b'uri'u'uri'b'%s - Redirection to url '%s' is not allowed'u'%s - Redirection to url '%s' is not allowed'b'redirect_dict'u'redirect_dict'b'The HTTP server returned a redirect error that would lead to an infinite loop. +The last 30x error message was: +'u'The HTTP server returned a redirect error that would lead to an infinite loop. +The last 30x error message was: +'b'Return (scheme, user, password, host/port) given a URL or an authority. + + If a URL is supplied, it must have an authority (host:port) component. + According to RFC 3986, having an authority component means the URL must + have two slashes after the scheme. + 'u'Return (scheme, user, password, host/port) given a URL or an authority. + + If a URL is supplied, it must have an authority (host:port) component. + According to RFC 3986, having an authority component means the URL must + have two slashes after the scheme. + 'b'proxy URL with no authority: %r'u'proxy URL with no authority: %r'b'proxies must be a mapping'u'proxies must be a mapping'b'%s_open'u'%s_open'b'Proxy-authorization'u'Proxy-authorization'b'Accept authority or URI and extract only the authority and path.'u'Accept authority or URI and extract only the authority and path.'b'%s:%d'u'%s:%d'b'Check if test is below base in a URI tree + + Both args must be URIs in reduced form. + 'u'Check if test is below base in a URI tree + + Both args must be URIs in reduced form. + 'b'(?:^|,)[ ]*([^ ,]+)[ ]+realm=(["']?)([^"']*)\2'u'(?:^|,)[ ]*([^ ,]+)[ ]+realm=(["']?)([^"']*)\2'b'Basic Auth Realm was unquoted'u'Basic Auth Realm was unquoted'b'basic'u'basic'b'AbstractBasicAuthHandler does not support the following scheme: %r'u'AbstractBasicAuthHandler does not support the following scheme: %r'b'is_authenticated'u'is_authenticated'b'{0}:{1}'u'{0}:{1}'b'Basic {}'u'Basic {}'b'www-authenticate'u'www-authenticate'b'proxy-authenticate'u'proxy-authenticate'b'digest auth failed'u'digest auth failed'b'digest'u'digest'b'AbstractDigestAuthHandler does not support the following scheme: '%s''u'AbstractDigestAuthHandler does not support the following scheme: '%s''b'Digest %s'u'Digest %s'b'%s:%s:%s:'u'%s:%s:%s:'b'realm'u'realm'b'nonce'u'nonce'b'qop'u'qop'b'algorithm'u'algorithm'b'opaque'u'opaque'b'%s:%s:%s'u'%s:%s:%s'b'%08x'u'%08x'b'%s:%s:%s:%s:%s'u'%s:%s:%s:%s:%s'b'qop '%s' is not supported.'u'qop '%s' is not supported.'b'username="%s", realm="%s", nonce="%s", uri="%s", response="%s"'u'username="%s", realm="%s", nonce="%s", uri="%s", response="%s"'b', opaque="%s"'u', opaque="%s"'b', digest="%s"'u', digest="%s"'b', algorithm="%s"'u', algorithm="%s"'b', qop=auth, nc=%s, cnonce="%s"'u', qop=auth, nc=%s, cnonce="%s"'b'SHA'u'SHA'b'Unsupported digest authentication algorithm %r'u'Unsupported digest authentication algorithm %r'b'An authentication protocol defined by RFC 2069 + + Digest authentication improves on basic authentication because it + does not transmit passwords in the clear. + 'u'An authentication protocol defined by RFC 2069 + + Digest authentication improves on basic authentication because it + does not transmit passwords in the clear. + 'b'Proxy-Authorization'u'Proxy-Authorization'b'no host given'u'no host given'b'POST data should be bytes, an iterable of bytes, or a file object. It cannot be of type str.'u'POST data should be bytes, an iterable of bytes, or a file object. It cannot be of type str.'b'Transfer-encoding'u'Transfer-encoding'b'Return an HTTPResponse object for the request, using http_class. + + http_class must implement the HTTPConnection API from http.client. + 'u'Return an HTTPResponse object for the request, using http_class. + + http_class must implement the HTTPConnection API from http.client. + 'b'HTTPSHandler'u'HTTPSHandler'b'unknown url type: %s'u'unknown url type: %s'b'Parse list of key=value strings where keys are not duplicated.'u'Parse list of key=value strings where keys are not duplicated.'b'Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Neither commas nor quotes count if they are escaped. + Only double-quotes count, not single-quotes. + 'u'Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Neither commas nor quotes count if they are escaped. + Only double-quotes count, not single-quotes. + 'b'file:// scheme is supported only on localhost'u'file:// scheme is supported only on localhost'b'Content-type: %s +Content-length: %d +Last-modified: %s +'u'Content-type: %s +Content-length: %d +Last-modified: %s +'b'file://'u'file://'b'file not on local host'u'file not on local host'b'ftp error: no host given'u'ftp error: no host given'b'Content-type: %s +'u'Content-type: %s +'b'Content-length: %d +'u'Content-length: %d +'b'ftp error: %r'u'ftp error: %r'b';base64'u';base64'b'text/plain;charset=US-ASCII'u'text/plain;charset=US-ASCII'b'Content-type: %s +Content-length: %d +'u'Content-type: %s +Content-length: %d +'b'OS-specific conversion from a relative URL of the 'file' scheme + to a file system path; not recommended for general use.'u'OS-specific conversion from a relative URL of the 'file' scheme + to a file system path; not recommended for general use.'b'OS-specific conversion from a file system path to a relative URL + of the 'file' scheme; not recommended for general use.'u'OS-specific conversion from a file system path to a relative URL + of the 'file' scheme; not recommended for general use.'b'Class to open URLs. + This is a class rather than just a subroutine because we may need + more than one set of global protocol-specific options. + Note -- this is a base class for those who don't want the + automatic handling of errors type 302 (relocated) and 401 + (authorization needed).'u'Class to open URLs. + This is a class rather than just a subroutine because we may need + more than one set of global protocol-specific options. + Note -- this is a base class for those who don't want the + automatic handling of errors type 302 (relocated) and 401 + (authorization needed).'b'%(class)s style of invoking requests is deprecated. Use newer urlopen functions/methods'u'%(class)s style of invoking requests is deprecated. Use newer urlopen functions/methods'b'key_file'u'key_file'b'cert_file'u'cert_file'b'Accept'u'Accept'b'*/*'u'*/*'b'Add a header to be used by the HTTP interface only + e.g. u.addheader('Accept', 'sound/basic')'u'Add a header to be used by the HTTP interface only + e.g. u.addheader('Accept', 'sound/basic')'b'Use URLopener().open(file) instead of open(file, 'r').'u'Use URLopener().open(file) instead of open(file, 'r').'b'%/:=&?~#+!$,;'@()*[]|'u'%/:=&?~#+!$,;'@()*[]|'b'open_'u'open_'b'open_local_file'u'open_local_file'b'socket error'u'socket error'b'Overridable interface to open unknown URL type.'u'Overridable interface to open unknown URL type.'b'url error'u'url error'b'unknown url type'u'unknown url type'b'invalid proxy for %s'u'invalid proxy for %s'b'retrieve(url) returns (filename, headers) for a local object + or (tempfilename, headers) for a remote object.'u'retrieve(url) returns (filename, headers) for a local object + or (tempfilename, headers) for a remote object.'b'Make an HTTP connection using connection_class. + + This is an internal method that should be called from + open_http() or open_https(). + + Arguments: + - connection_factory should take a host name and return an + HTTPConnection instance. + - url is the url to retrieval or a host, relative-path pair. + - data is payload for a POST request or None. + 'u'Make an HTTP connection using connection_class. + + This is an internal method that should be called from + open_http() or open_https(). + + Arguments: + - connection_factory should take a host name and return an + HTTPConnection instance. + - url is the url to retrieval or a host, relative-path pair. + - data is payload for a POST request or None. + 'b'%s://%s%s'u'%s://%s%s'b'http error'u'http error'b'Basic %s'u'Basic %s'b'http protocol error: bad status line'u'http protocol error: bad status line'b'http:'u'http:'b'Use HTTP protocol.'u'Use HTTP protocol.'b'Handle http errors. + + Derived class can override this, or provide specific handlers + named http_error_DDD where DDD is the 3-digit error code.'u'Handle http errors. + + Derived class can override this, or provide specific handlers + named http_error_DDD where DDD is the 3-digit error code.'b'http_error_%d'u'http_error_%d'b'Default error handler: close the connection and raise OSError.'u'Default error handler: close the connection and raise OSError.'b'Use HTTPS protocol.'u'Use HTTPS protocol.'b'Use local file or FTP depending on form of URL.'u'Use local file or FTP depending on form of URL.'b'file error: proxy support for file protocol currently not implemented'u'file error: proxy support for file protocol currently not implemented'b'localhost/'u'localhost/'b'Use local file.'u'Use local file.'b'Content-Type: %s +Content-Length: %d +Last-modified: %s +'u'Content-Type: %s +Content-Length: %d +Last-modified: %s +'b'local file url may start with / or file:. Unknown url of type: %s'u'local file url may start with / or file:. Unknown url of type: %s'b'local file error: not on local host'u'local file error: not on local host'b'Use FTP protocol.'u'Use FTP protocol.'b'ftp error: proxy support for ftp protocol currently not implemented'u'ftp error: proxy support for ftp protocol currently not implemented'b'ftp:'u'ftp:'b'Content-Type: %s +'u'Content-Type: %s +'b'Content-Length: %d +'u'Content-Length: %d +'b'ftp error %r'u'ftp error %r'b'Use "data" URL.'u'Use "data" URL.'b'data error: proxy support for data protocol currently not implemented'u'data error: proxy support for data protocol currently not implemented'b'data error'u'data error'b'bad data URL'u'bad data URL'b'Date: %s'u'Date: %s'b'%a, %d %b %Y %H:%M:%S GMT'u'%a, %d %b %Y %H:%M:%S GMT'b'Content-type: %s'u'Content-type: %s'b'Content-Length: %d'u'Content-Length: %d'b'Derived class with handlers for errors we can handle (perhaps).'u'Derived class with handlers for errors we can handle (perhaps).'b'Default error handling -- don't raise an exception.'u'Default error handling -- don't raise an exception.'b'Error 302 -- relocated (temporarily).'u'Error 302 -- relocated (temporarily).'b'http_error_500'u'http_error_500'b'Internal Server Error: Redirect Recursion'u'Internal Server Error: Redirect Recursion'b' Redirection to url '%s' is not allowed.'u' Redirection to url '%s' is not allowed.'b'Error 301 -- also relocated (permanently).'u'Error 301 -- also relocated (permanently).'b'Error 303 -- also relocated (essentially identical to 302).'u'Error 303 -- also relocated (essentially identical to 302).'b'Error 307 -- relocated, but turn POST into error.'u'Error 307 -- relocated, but turn POST into error.'b'Error 401 -- authentication required. + This function supports Basic authentication only.'u'Error 401 -- authentication required. + This function supports Basic authentication only.'b'[ ]*([^ ]+)[ ]+realm="([^"]*)"'u'[ ]*([^ ]+)[ ]+realm="([^"]*)"'b'retry_'u'retry_'b'_basic_auth'u'_basic_auth'b'Error 407 -- proxy authentication required. + This function supports Basic authentication only.'u'Error 407 -- proxy authentication required. + This function supports Basic authentication only.'b'retry_proxy_'u'retry_proxy_'b'%s:%s@%s'u'%s:%s@%s'b'Override this in a GUI environment!'u'Override this in a GUI environment!'b'Enter username for %s at %s: 'u'Enter username for %s at %s: 'b'Enter password for %s in %s at %s: 'u'Enter password for %s in %s at %s: 'b'Return the IP address of the magic hostname 'localhost'.'u'Return the IP address of the magic hostname 'localhost'.'b'Return the IP addresses of the current host.'u'Return the IP addresses of the current host.'b'Return the set of errors raised by the FTP class.'u'Return the set of errors raised by the FTP class.'b'Return an empty email Message object.'u'Return an empty email Message object.'b'Class used by open_ftp() for cache of open FTP connections.'u'Class used by open_ftp() for cache of open FTP connections.'b'550'u'550'b'LIST 'u'LIST 'b'Return a dictionary of scheme -> proxy server URL mappings. + + Scan the environment for variables named _proxy; + this seems to be the standard convention. If you need a + different way, you can pass a proxies dictionary to the + [Fancy]URLopener constructor. + + 'u'Return a dictionary of scheme -> proxy server URL mappings. + + Scan the environment for variables named _proxy; + this seems to be the standard convention. If you need a + different way, you can pass a proxies dictionary to the + [Fancy]URLopener constructor. + + 'b'_proxy'u'_proxy'b'REQUEST_METHOD'u'REQUEST_METHOD'b'Test if proxies should not be used for a particular host. + + Checks the proxy dict for the value of no_proxy, which should + be a list of comma separated DNS suffixes, or '*' for all hosts. + + 'u'Test if proxies should not be used for a particular host. + + Checks the proxy dict for the value of no_proxy, which should + be a list of comma separated DNS suffixes, or '*' for all hosts. + + 'b' + Return True iff this host shouldn't be accessed using a proxy + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + + proxy_settings come from _scproxy._get_proxy_settings or get mocked ie: + { 'exclude_simple': bool, + 'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.1', '10.0/16'] + } + 'u' + Return True iff this host shouldn't be accessed using a proxy + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + + proxy_settings come from _scproxy._get_proxy_settings or get mocked ie: + { 'exclude_simple': bool, + 'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.1', '10.0/16'] + } + 'b'exclude_simple'u'exclude_simple'b'(\d+(?:\.\d+)*)(/\d+)?'u'(\d+(?:\.\d+)*)(/\d+)?'b'Return a dictionary of scheme -> proxy server URL mappings. + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + 'u'Return a dictionary of scheme -> proxy server URL mappings. + + This function uses the MacOSX framework SystemConfiguration + to fetch the proxy information. + 'b'Return True, if host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or from the MacOSX framework SystemConfiguration. + + 'u'Return True, if host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or from the MacOSX framework SystemConfiguration. + + 'b'Return a dictionary of scheme -> proxy server URL mappings. + + Win32 uses the registry to store proxies. + + 'u'Return a dictionary of scheme -> proxy server URL mappings. + + Win32 uses the registry to store proxies. + + 'b'Software\Microsoft\Windows\CurrentVersion\Internet Settings'u'Software\Microsoft\Windows\CurrentVersion\Internet Settings'b'ProxyEnable'u'ProxyEnable'b'ProxyServer'u'ProxyServer'b'(?:[^/:]+)://'u'(?:[^/:]+)://'b'%s://%s'u'%s://%s'b'http://%s'u'http://%s'b'https://%s'u'https://%s'b'ftp://%s'u'ftp://%s'b'Return a dictionary of scheme -> proxy server URL mappings. + + Returns settings gathered from the environment, if specified, + or the registry. + + 'u'Return a dictionary of scheme -> proxy server URL mappings. + + Returns settings gathered from the environment, if specified, + or the registry. + + 'b'ProxyOverride'u'ProxyOverride'b''u''b'\.'u'\.'b'Return True, if host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or the registry. + + 'u'Return True, if host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or the registry. + + 'RLIMIT_ASRLIMIT_CPURLIMIT_DATARLIMIT_FSIZERLIMIT_MEMLOCKRLIMIT_NOFILERLIMIT_NPROCRLIMIT_RSSRLIMIT_STACKRLIM_INFINITYRUSAGE_CHILDRENRUSAGE_SELFu'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/resource.cpython-38-darwin.so'u'resource'getpagesizegetrusageu'struct_rusage: Result from getrusage. + +This object may be accessed either as a tuple of + (utime,stime,maxrss,ixrss,idrss,isrss,minflt,majflt, + nswap,inblock,oublock,msgsnd,msgrcv,nsignals,nvcsw,nivcsw) +or via the attributes ru_utime, ru_stime, ru_maxrss, and so on.'ru_idrssru_inblockru_isrssru_ixrssru_majfltru_maxrssru_minfltru_msgrcvru_msgsndru_nivcswru_nsignalsru_nswapru_nvcswru_oublockru_stimeru_utimeresource.struct_rusagestruct_rusagePicklable wrapper for a socket.new_sock_resource_sharerGet the socket. This should only be called once.get_connectionfromshareWrapper for fd which can be used at any time.new_fdGet the fd. This should only be called once._ResourceSharerManager for resources using background thread._old_locks_afterforkRegister resource, returning an identifier.Return connection from which to receive identified resource.Stop the background thread and clear registered resources.sub_warning_ResourceSharer thread did not stop when asked'_ResourceSharer thread did ''not stop when asked'Already have Listenerstarting listener and thread for sending handles_serve# We use a background thread for sharing fds on Unix, and for sharing sockets on# Windows.# A client which wants to pickle a resource registers it with the resource# sharer and gets an identifier in return. The unpickling process will connect# to the resource sharer, sends the identifier and its pid, and then receives# the resource.# If self._lock was locked at the time of the fork, it may be broken# -- see issue 6721. Replace it without letting it be gc'ed.b'stop'u'stop'b'DupSocket'u'DupSocket'b'Picklable wrapper for a socket.'u'Picklable wrapper for a socket.'b'Get the socket. This should only be called once.'u'Get the socket. This should only be called once.'b'Wrapper for fd which can be used at any time.'u'Wrapper for fd which can be used at any time.'b'Get the fd. This should only be called once.'u'Get the fd. This should only be called once.'b'Manager for resources using background thread.'u'Manager for resources using background thread.'b'Register resource, returning an identifier.'u'Register resource, returning an identifier.'b'Return connection from which to receive identified resource.'u'Return connection from which to receive identified resource.'b'Stop the background thread and clear registered resources.'u'Stop the background thread and clear registered resources.'b'_ResourceSharer thread did not stop when asked'u'_ResourceSharer thread did not stop when asked'b'Already have Listener'u'Already have Listener'b'starting listener and thread for sending handles'u'starting listener and thread for sending handles'b'pthread_sigmask'u'pthread_sigmask'u'multiprocessing.resource_sharer'u'resource_sharer'_HAVE_SIGMASK_IGNORED_SIGNALSnoop_CLEANUP_FUNCSsemaphoreshared_memoryResourceTrackerMake sure that resource tracker process is running. + + This can be run from any process. Usually a child process will use + the resource created by its parent._check_aliveresource_tracker: process died unexpectedly, relaunching. Some resources might leak.'resource_tracker: process died unexpectedly, ''relaunching. Some resources might leak.'from multiprocessing.resource_tracker import main;main(%d)Check that the pipe has not been closed by sending a probe.PROBE:0:noop +rtypeRegister name of resource with resource tracker.REGISTERUnregister name of resource with resource tracker.UNREGISTER{0}:{1}:{2} +name too longnbytes {0:n} but len(msg) {1:n}Run resource tracker.cleanup_funcCannot register for automatic cleanup: unknown resource type ' for automatic cleanup: ''unknown resource type 'PROBEunrecognized command %rrtype_cacheresource_tracker: There appear to be %d leaked %s objects to clean up at shutdown'resource_tracker: There appear to be %d ''leaked %s objects to clean up at shutdown'resource_tracker: %r: %s# Server process to keep track of unlinked resources (like shared memory# segments, semaphores etc.) and clean them.# On Unix we run a server process which keeps track of unlinked# resources. The server ignores SIGINT and SIGTERM and reads from a# pipe. Every other process of the program has a copy of the writable# end of the pipe, so we get EOF when all other processes have exited.# Then the server process unlinks any remaining resource names.# This is important because there may be system limits for such resources: for# instance, the system only supports a limited number of named semaphores, and# shared-memory segments live in the RAM. If a python process leaks such a# resource, this resource will not be removed till the next reboot. Without# this resource tracker process, "killall python" would probably leave unlinked# resources.# not running# closing the "alive" file descriptor stops main()# resource tracker was launched before, is it still running?# => still alive# => dead, launch it again# Clean-up to avoid dangling processes.# _pid can be None if this process is a child from another# python process, which has started the resource_tracker.# The resource_tracker has already been terminated.# process will out live us, so no need to wait on pid# bpo-33613: Register a signal mask that will block the signals.# This signal mask will be inherited by the child that is going# to be spawned and will protect the child from a race condition# that can make the child die before it registers signal handlers# for SIGINT and SIGTERM. The mask is unregistered after spawning# the child.# We cannot use send here as it calls ensure_running, creating# a cycle.# posix guarantees that writes to a pipe of less than PIPE_BUF# bytes are atomic, and that PIPE_BUF >= 512# protect the process from ^C and "killall python" etc# keep track of registered/unregistered resources# all processes have terminated; cleanup any remaining resources# For some reason the process which created and registered this# resource has failed to unregister it. Presumably it has# died. We therefore unlink it.b'unregister'u'unregister'b'noop'u'noop'b'semaphore'u'semaphore'b'shared_memory'u'shared_memory'b'Make sure that resource tracker process is running. + + This can be run from any process. Usually a child process will use + the resource created by its parent.'u'Make sure that resource tracker process is running. + + This can be run from any process. Usually a child process will use + the resource created by its parent.'b'resource_tracker: process died unexpectedly, relaunching. Some resources might leak.'u'resource_tracker: process died unexpectedly, relaunching. Some resources might leak.'b'from multiprocessing.resource_tracker import main;main(%d)'u'from multiprocessing.resource_tracker import main;main(%d)'b'Check that the pipe has not been closed by sending a probe.'u'Check that the pipe has not been closed by sending a probe.'b'PROBE:0:noop +'b'Register name of resource with resource tracker.'u'Register name of resource with resource tracker.'b'REGISTER'u'REGISTER'b'Unregister name of resource with resource tracker.'u'Unregister name of resource with resource tracker.'b'UNREGISTER'u'UNREGISTER'b'{0}:{1}:{2} +'u'{0}:{1}:{2} +'b'name too long'u'name too long'b'nbytes {0:n} but len(msg) {1:n}'u'nbytes {0:n} but len(msg) {1:n}'b'Run resource tracker.'u'Run resource tracker.'b'Cannot register 'u'Cannot register 'b' for automatic cleanup: unknown resource type 'u' for automatic cleanup: unknown resource type 'b'PROBE'u'PROBE'b'unrecognized command %r'u'unrecognized command %r'b'resource_tracker: There appear to be %d leaked %s objects to clean up at shutdown'u'resource_tracker: There appear to be %d leaked %s objects to clean up at shutdown'b'resource_tracker: %r: %s'u'resource_tracker: %r: %s'u'multiprocessing.resource_tracker'u'resource_tracker'Response classes used by urllib. + +The base class, addbase, defines a minimal file-like interface, +including read() and readline(). The typical response object is an +addinfourl instance, which defines an info() method that returns +headers and a geturl() method that returns the url. +addbaseaddinfo_TemporaryFileWrapperBase class for addinfo and addclosehook. Is a good idea for garbage collection.<%s at %r whose fp = %r>Class to add a close hook to an open file.closehookhookargsclass to add an info() method to an open file.class to add info() and geturl() methods to an open file.# XXX Add a method to expose the timeout on the underlying socket?# Keep reference around as this was part of the original API.b'Response classes used by urllib. + +The base class, addbase, defines a minimal file-like interface, +including read() and readline(). The typical response object is an +addinfourl instance, which defines an info() method that returns +headers and a geturl() method that returns the url. +'u'Response classes used by urllib. + +The base class, addbase, defines a minimal file-like interface, +including read() and readline(). The typical response object is an +addinfourl instance, which defines an info() method that returns +headers and a geturl() method that returns the url. +'b'addbase'u'addbase'b'addclosehook'u'addclosehook'b'addinfo'u'addinfo'b'addinfourl'u'addinfourl'b'Base class for addinfo and addclosehook. Is a good idea for garbage collection.'u'Base class for addinfo and addclosehook. Is a good idea for garbage collection.'b''u''b'<%s at %r whose fp = %r>'u'<%s at %r whose fp = %r>'b'Class to add a close hook to an open file.'u'Class to add a close hook to an open file.'b'class to add an info() method to an open file.'u'class to add an info() method to an open file.'b'class to add info() and geturl() methods to an open file.'u'class to add info() and geturl() methods to an open file.'u'urllib.response'Test result object +Stdout: +%sSTDOUT_LINE +Stderr: +%sSTDERR_LINEHolder for test result information. + + Test results are automatically managed by the TestCase and TestSuite + classes, and do not need to be explicitly manipulated by writers of tests. + + Each instance holds the total number of tests run, and collections of + failures and errors that occurred among those test runs. The collections + contain tuples of (testcase, exceptioninfo), where exceptioninfo is the + formatted traceback of the error that occurred. + _previousTestClass_testRunEntered_moduleSetUpFailedexpectedFailuresunexpectedSuccessesshouldStop_stdout_buffer_stderr_buffer_original_stderr_mirrorOutputprintErrorsCalled by TestRunner after test runCalled when the given test is about to be run_setupStdoutCalled once before any tests are executed. + + See startTest for a method called before each test. + Called when the given test has been run_restoreStdoutCalled once after all tests are executed. + + See stopTest for a method called after each test. + Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info(). + _exc_info_to_stringCalled when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info().subtestCalled at the end of a subtest. + 'err' is None if the subtest ended successfully, otherwise it's a + tuple of values as returned by sys.exc_info(). + Called when a test has completed successfullyCalled when a test is skipped.Called when an expected failure/error occurred.Called when a test was expected to fail, but succeed.Tells whether or not this result was a success.Indicates that the tests should be aborted.Converts a sys.exc_info()-style tuple of values into a string._is_relevant_tb_level_count_relevant_tb_levelsTracebackExceptioncapture_localstb_emsgLines<%s run=%i errors=%i failures=%i># By default, we don't do anything with successful subtests, but# more sophisticated test results might want to record them.# The hasattr check is for test_result's OldResult test. That# way this method works on objects that lack the attribute.# (where would such result intances come from? old stored pickles?)# Skip test runner traceback levels# Skip assert*() traceback levelsb'Test result object'u'Test result object'b' +Stdout: +%s'u' +Stdout: +%s'b' +Stderr: +%s'u' +Stderr: +%s'b'Holder for test result information. + + Test results are automatically managed by the TestCase and TestSuite + classes, and do not need to be explicitly manipulated by writers of tests. + + Each instance holds the total number of tests run, and collections of + failures and errors that occurred among those test runs. The collections + contain tuples of (testcase, exceptioninfo), where exceptioninfo is the + formatted traceback of the error that occurred. + 'u'Holder for test result information. + + Test results are automatically managed by the TestCase and TestSuite + classes, and do not need to be explicitly manipulated by writers of tests. + + Each instance holds the total number of tests run, and collections of + failures and errors that occurred among those test runs. The collections + contain tuples of (testcase, exceptioninfo), where exceptioninfo is the + formatted traceback of the error that occurred. + 'b'Called by TestRunner after test run'u'Called by TestRunner after test run'b'Called when the given test is about to be run'u'Called when the given test is about to be run'b'Called once before any tests are executed. + + See startTest for a method called before each test. + 'u'Called once before any tests are executed. + + See startTest for a method called before each test. + 'b'Called when the given test has been run'u'Called when the given test has been run'b'Called once after all tests are executed. + + See stopTest for a method called after each test. + 'u'Called once after all tests are executed. + + See stopTest for a method called after each test. + 'b'Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info(). + 'u'Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info(). + 'b'Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info().'u'Called when an error has occurred. 'err' is a tuple of values as + returned by sys.exc_info().'b'Called at the end of a subtest. + 'err' is None if the subtest ended successfully, otherwise it's a + tuple of values as returned by sys.exc_info(). + 'u'Called at the end of a subtest. + 'err' is None if the subtest ended successfully, otherwise it's a + tuple of values as returned by sys.exc_info(). + 'b'Called when a test has completed successfully'u'Called when a test has completed successfully'b'Called when a test is skipped.'u'Called when a test is skipped.'b'Called when an expected failure/error occurred.'u'Called when an expected failure/error occurred.'b'Called when a test was expected to fail, but succeed.'u'Called when a test was expected to fail, but succeed.'b'Tells whether or not this result was a success.'u'Tells whether or not this result was a success.'b'unexpectedSuccesses'u'unexpectedSuccesses'b'Indicates that the tests should be aborted.'u'Indicates that the tests should be aborted.'b'Converts a sys.exc_info()-style tuple of values into a string.'u'Converts a sys.exc_info()-style tuple of values into a string.'b'__unittest'u'__unittest'b'<%s run=%i errors=%i failures=%i>'u'<%s run=%i errors=%i failures=%i>'u'unittest.result'Running tests_WritelnDecoratorUsed to decorate file-like objects with a handy 'writeln' methodA test result class that can print formatted text results to a stream. + + Used by TextTestRunner. + separator1separator2showAlldotsgetDescriptiondoc_first_line ... FAILskipped {0!r}expected failureunexpected successprintErrorListflavourA test runner class that displays results in textual form. + + It prints out the names of tests as they are run, errors as they + occur, and a summary of the results at the end of the test run. + resultclassConstruct a TextTestRunner. + + Subclasses should accept **kwargs to ensure compatibility as the + interface changes. + _makeResultRun the given test case or test suite.Please use assert\w+ instead.startTimestopTimetimeTakenRan %d test%s in %.3fsexpectedFailsFAILEDerroredfailures=%derrors=%dskipped=%dexpected failures=%dunexpected successes=%d# text-mode streams translate to \r\n if needed# if self.warnings is set, use it to filter all the warnings# if the filter is 'default' or 'always', special-case the# warnings from the deprecated unittest methods to show them# no more than once per module, because they can be fairly# noisy. The -Wd and -Wa flags can be used to bypass this# only when self.warnings is None.b'Running tests'u'Running tests'b'Used to decorate file-like objects with a handy 'writeln' method'u'Used to decorate file-like objects with a handy 'writeln' method'b'A test result class that can print formatted text results to a stream. + + Used by TextTestRunner. + 'u'A test result class that can print formatted text results to a stream. + + Used by TextTestRunner. + 'b' ... 'u' ... 'b'ok'u'ok'b'FAIL'u'FAIL'b'skipped {0!r}'u'skipped {0!r}'b'expected failure'u'expected failure'b'unexpected success'u'unexpected success'b'A test runner class that displays results in textual form. + + It prints out the names of tests as they are run, errors as they + occur, and a summary of the results at the end of the test run. + 'u'A test runner class that displays results in textual form. + + It prints out the names of tests as they are run, errors as they + occur, and a summary of the results at the end of the test run. + 'b'Construct a TextTestRunner. + + Subclasses should accept **kwargs to ensure compatibility as the + interface changes. + 'u'Construct a TextTestRunner. + + Subclasses should accept **kwargs to ensure compatibility as the + interface changes. + 'b'Run the given test case or test suite.'u'Run the given test case or test suite.'b'module'b'Please use assert\w+ instead.'u'Please use assert\w+ instead.'b'separator2'u'separator2'b'Ran %d test%s in %.3fs'u'Ran %d test%s in %.3fs'b'FAILED'u'FAILED'b'failures=%d'u'failures=%d'b'errors=%d'u'errors=%d'b'skipped=%d'u'skipped=%d'b'expected failures=%d'u'expected failures=%d'b'unexpected successes=%d'u'unexpected successes=%d'u'unittest.runner'u'runner'Execute the coroutine and return the result. + + This function runs the passed coroutine, taking care of + managing the asyncio event loop and finalizing asynchronous + generators. + + This function cannot be called when another asyncio event loop is + running in the same thread. + + If debug is True, the event loop will be run in debug mode. + + This function always creates a new event loop and closes it at the end. + It should be used as a main entry point for asyncio programs, and should + ideally only be called once. + + Example: + + async def main(): + await asyncio.sleep(1) + print('hello') + + asyncio.run(main()) + asyncio.run() cannot be called from a running event loopa coroutine was expected, got {!r}_cancel_all_tasksunhandled exception during asyncio.run() shutdownb'Execute the coroutine and return the result. + + This function runs the passed coroutine, taking care of + managing the asyncio event loop and finalizing asynchronous + generators. + + This function cannot be called when another asyncio event loop is + running in the same thread. + + If debug is True, the event loop will be run in debug mode. + + This function always creates a new event loop and closes it at the end. + It should be used as a main entry point for asyncio programs, and should + ideally only be called once. + + Example: + + async def main(): + await asyncio.sleep(1) + print('hello') + + asyncio.run(main()) + 'u'Execute the coroutine and return the result. + + This function runs the passed coroutine, taking care of + managing the asyncio event loop and finalizing asynchronous + generators. + + This function cannot be called when another asyncio event loop is + running in the same thread. + + If debug is True, the event loop will be run in debug mode. + + This function always creates a new event loop and closes it at the end. + It should be used as a main entry point for asyncio programs, and should + ideally only be called once. + + Example: + + async def main(): + await asyncio.sleep(1) + print('hello') + + asyncio.run(main()) + 'b'asyncio.run() cannot be called from a running event loop'u'asyncio.run() cannot be called from a running event loop'b'a coroutine was expected, got {!r}'u'a coroutine was expected, got {!r}'b'unhandled exception during asyncio.run() shutdown'u'unhandled exception during asyncio.run() shutdown'u'asyncio.runners'u'runners'runpy.py - locating and running Python code using the module namespace + +Provides support for locating and running Python scripts using the Python +module namespace instead of the native filesystem. + +This allows Python code to play nicely with non-filesystem based PEP 302 +importers when locating support scripts as well as when importing modules. +run_modulerun_path_TempModuleTemporarily replace a module in sys.modules with an empty namespace_saved_module_ModifiedArgv0_saved_valueAlready preserving saved value_run_coderun_globalsinit_globalsscript_nameHelper to run code in nominated namespace_run_module_codeHelper to run code in new namespace with sys modifiedtemp_modulemod_globalsRelative module names not supportedexisting{mod_name!r} found in sys.modules after import of package {pkg_name!r}, but prior to execution of {mod_name!r}; this may result in unpredictable behaviour"{mod_name!r} found in sys.modules after import of ""package {pkg_name!r}, but prior to execution of ""{mod_name!r}; this may result in unpredictable ""behaviour"Error while finding module specification for {!r} ({}: {})No module named %s.__main__Cannot use package as __main__ modulepkg_main_name%s; %r is a package and cannot be directly executed%r is a namespace package and cannot be executedNo code object available for %s_ErrorError that _run_module_as_main() should report without a traceback_run_module_as_mainalter_argvRuns the designated module in the __main__ namespace + + Note that the executed module will have full access to the + __main__ namespace. If this is not desirable, the run_module() + function should be used to run the module code in a fresh namespace. + + At the very least, these variables in __main__ will be overwritten: + __name__ + __file__ + __cached__ + __loader__ + __package__ + _get_main_module_detailsmain_globalsrun_namealter_sysExecute a module's code without importing it + + Returns the resulting top level namespace dictionary + main_namesaved_maincan't find %r module in %r_get_code_from_filedecoded_pathpath_nameExecute code located at the specified filesystem location + + Returns the resulting top level namespace dictionary + + The file path may refer directly to a Python script (i.e. + one that could be directly executed with execfile) or else + it may refer to a zipfile or directory containing a top + level __main__.py script. + is_NullImporterNullImporterNo module specified for execution# Written by Nick Coghlan # to implement PEP 338 (Executing Modules as Scripts)# importlib first so we can test #15386 via -m# TODO: Replace these helpers with importlib._bootstrap_external functions.# Copy the globals of the temporary module, as they# may be cleared when the temporary module goes away# Helper to get the full name, spec and code for a module# Try importing the parent to avoid catching initialization errors# If the parent or higher ancestor package is missing, let the# error be raised by find_spec() below and then be caught. But do# not allow other errors to be caught.# Warn if the module has already been imported under its normal name# No module loaded; being a package is irrelevant# XXX ncoghlan: Should this be documented and made public?# (Current thoughts: don't repeat the mistake that lead to its# creation when run_module() no longer met the needs of# mainmodule.c, but couldn't be changed because it was public)# i.e. -m switch# i.e. directory or zipfile execution# Leave the sys module alone# Helper that gives a nicer error message when attempting to# execute a zipfile or directory by invoking __main__.py# Also moves the standard __main__ out of the way so that the# preexisting __loader__ entry doesn't cause issues# Check for a compiled file first# That didn't work, so try it as normal source code# Trying to avoid importing imp so as to not consume the deprecation warning.# Not a valid sys.path entry, so run the code directly# execfile() doesn't help as we want to allow compiled files# Finder is defined for path, so add it to# the start of sys.path# Here's where things are a little different from the run_module# case. There, we only had to replace the module in sys while the# code was running and doing so was somewhat optional. Here, we# have no choice and we have to remove it even while we read the# code. If we don't do this, a __loader__ attribute in the# existing __main__ module may prevent location of the new module.# Run the module specified as the next command line argument# Make the requested module sys.argv[0]b'runpy.py - locating and running Python code using the module namespace + +Provides support for locating and running Python scripts using the Python +module namespace instead of the native filesystem. + +This allows Python code to play nicely with non-filesystem based PEP 302 +importers when locating support scripts as well as when importing modules. +'u'runpy.py - locating and running Python code using the module namespace + +Provides support for locating and running Python scripts using the Python +module namespace instead of the native filesystem. + +This allows Python code to play nicely with non-filesystem based PEP 302 +importers when locating support scripts as well as when importing modules. +'b'run_module'u'run_module'b'run_path'u'run_path'b'Temporarily replace a module in sys.modules with an empty namespace'u'Temporarily replace a module in sys.modules with an empty namespace'b'Already preserving saved value'u'Already preserving saved value'b'Helper to run code in nominated namespace'u'Helper to run code in nominated namespace'b'Helper to run code in new namespace with sys modified'u'Helper to run code in new namespace with sys modified'b'Relative module names not supported'u'Relative module names not supported'b'{mod_name!r} found in sys.modules after import of package {pkg_name!r}, but prior to execution of {mod_name!r}; this may result in unpredictable behaviour'u'{mod_name!r} found in sys.modules after import of package {pkg_name!r}, but prior to execution of {mod_name!r}; this may result in unpredictable behaviour'b'Error while finding module specification for {!r} ({}: {})'u'Error while finding module specification for {!r} ({}: {})'b'No module named %s'u'No module named %s'b'.__main__'u'.__main__'b'Cannot use package as __main__ module'u'Cannot use package as __main__ module'b'%s; %r is a package and cannot 'u'%s; %r is a package and cannot 'b'be directly executed'u'be directly executed'b'%r is a namespace package and cannot be executed'u'%r is a namespace package and cannot be executed'b'No code object available for %s'u'No code object available for %s'b'Error that _run_module_as_main() should report without a traceback'u'Error that _run_module_as_main() should report without a traceback'b'Runs the designated module in the __main__ namespace + + Note that the executed module will have full access to the + __main__ namespace. If this is not desirable, the run_module() + function should be used to run the module code in a fresh namespace. + + At the very least, these variables in __main__ will be overwritten: + __name__ + __file__ + __cached__ + __loader__ + __package__ + 'u'Runs the designated module in the __main__ namespace + + Note that the executed module will have full access to the + __main__ namespace. If this is not desirable, the run_module() + function should be used to run the module code in a fresh namespace. + + At the very least, these variables in __main__ will be overwritten: + __name__ + __file__ + __cached__ + __loader__ + __package__ + 'b'Execute a module's code without importing it + + Returns the resulting top level namespace dictionary + 'u'Execute a module's code without importing it + + Returns the resulting top level namespace dictionary + 'b'can't find %r module in %r'u'can't find %r module in %r'b'Execute code located at the specified filesystem location + + Returns the resulting top level namespace dictionary + + The file path may refer directly to a Python script (i.e. + one that could be directly executed with execfile) or else + it may refer to a zipfile or directory containing a top + level __main__.py script. + 'u'Execute code located at the specified filesystem location + + Returns the resulting top level namespace dictionary + + The file path may refer directly to a Python script (i.e. + one that could be directly executed with execfile) or else + it may refer to a zipfile or directory containing a top + level __main__.py script. + 'b''u''b'NullImporter'u'NullImporter'b'No module specified for execution'u'No module specified for execution'u'runpy'KQ_EV_ADDKQ_EV_CLEARKQ_EV_DELETEKQ_EV_DISABLEKQ_EV_ENABLEKQ_EV_EOFKQ_EV_ERRORKQ_EV_FLAG1KQ_EV_ONESHOTKQ_EV_SYSFLAGS-3KQ_FILTER_AIO-5KQ_FILTER_PROCKQ_FILTER_READ-6KQ_FILTER_SIGNAL-7KQ_FILTER_TIMER-4KQ_FILTER_VNODEKQ_FILTER_WRITEKQ_NOTE_ATTRIBKQ_NOTE_CHILDKQ_NOTE_DELETEKQ_NOTE_EXECKQ_NOTE_EXITKQ_NOTE_EXTENDKQ_NOTE_FORKKQ_NOTE_LINKKQ_NOTE_LOWAT-1048576KQ_NOTE_PCTRLMASKKQ_NOTE_PDATAMASKKQ_NOTE_RENAMEKQ_NOTE_REVOKEKQ_NOTE_TRACKKQ_NOTE_TRACKERRKQ_NOTE_WRITEPIPE_BUFPOLLERRPOLLHUPPOLLINPOLLNVALPOLLOUTPOLLPRIPOLLRDBANDPOLLRDNORMPOLLWRBANDPOLLWRNORMu'This module supports asynchronous I/O on multiple file descriptors. + +*** IMPORTANT NOTICE *** +On Windows, only sockets are supported; on Unix, all file descriptors.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/select.cpython-38-darwin.so'u'kevent(ident, filter=KQ_FILTER_READ, flags=KQ_EV_ADD, fflags=0, data=0, udata=0) + +This object is the equivalent of the struct kevent for the C API. + +See the kqueue manpage for more detailed information about the meaning +of the arguments. + +One minor note: while you might hope that udata could store a +reference to a python object, it cannot, because it is impossible to +keep a proper reference count of the object once it's passed into the +kernel. Therefore, I have restricted it to only storing an integer. I +recommend ignoring it and simply using the 'ident' field to key off +of. You could also set up a dictionary on the python side to store a +udata->object mapping.'fflagsudataselect.keventkeventu'Kqueue syscall wrapper. + +For example, to start watching a socket for input: +>>> kq = kqueue() +>>> sock = socket() +>>> sock.connect((host, port)) +>>> kq.control([kevent(sock, KQ_FILTER_WRITE, KQ_EV_ADD)], 0) + +To wait one second for it to become writeable: +>>> kq.control(None, 1, 1000) + +To stop listening: +>>> kq.control([kevent(sock, KQ_FILTER_WRITE, KQ_EV_DELETE)], 0)'u'True if the kqueue handler is closed'u'kqueue.closed'controlselect.kqueuekqueueEvent loop using a selector and related classes. + +A selector is a "notify-when-ready" multiplexer. For a subclass which +also includes support for signal handling, see the unix_events sub-module. +BaseSelectorEventLoop_test_selector_eventget_key_check_ssl_socketSocket cannot be of type SSLSocketSelector event loop. + + See events.EventLoop for API specification. + Using selector: %s_transports_SelectorSocketTransport_SelectorDatagramTransport_remove_reader_add_reader_read_from_self_process_self_data_accept_connection_accept_connection2socket.accept() out of system resourceError on transport creation for incoming connection_ensure_fd_no_transportInvalid file object: File descriptor is used by transport ' is used by transport 'modify_add_writerEVENT_WRITE_remove_writerRemove a writer callback.Add a reader callback.Remove a reader callback.Add a writer callback..Receive data from the socket. + + The return value is a bytes object representing the data received. + The maximum amount of data to be received at once is specified by + nbytes. + _sock_recv_sock_read_doneReceive data from the socket. + + The received data is written into *buf* (a writable buffer). + The return value is the number of bytes written. + _sock_recv_intoSend data to the socket. + + The socket must be connected to a remote socket. This method continues + to send data from data until either all data has been sent or an + error occurs. None is returned on success. On error, an exception is + raised, and there is no way to determine how much data, if any, was + successfully processed by the receiving end of the connection. + _sock_write_done_sock_sendallConnect to a remote socket at address. + + This method is a coroutine. + resolved_sock_connect_sock_connect_cbConnect call failed Accept a connection. + + The socket must be bound to an address and listening for connections. + The return value is a pair (conn, address) where conn is a new socket + object usable to send and receive data on the connection, and address + is the address bound to the socket on the other end of the connection. + _sock_acceptregistered_sock_fd_SelectorTransport_buffer_factory_protocol_connectedpollingread=pollingread=idlewrite=<, bufsize=Fatal error on transport_read_ready_cb_read_ready_read_ready__get_buffer_read_ready__data_receivedFatal error: protocol.get_buffer() call failed.Fatal read error on socket transport_read_ready__on_eofFatal error: protocol.data_received() call failed.'data argument must be a bytes-like object, ''not 'Cannot call write() after write_eof()Fatal write error on socket transport_write_readyData should not be emptyFatal read error on datagram transport_sendto_ready# Test if the selector is monitoring 'event' events# for the file descriptor 'fd'.# This method is only called once for each event loop tick where the# listening socket has triggered an EVENT_READ. There may be multiple# connections waiting for an .accept() so it is called in a loop.# See https://bugs.python.org/issue27906 for more details.# Early exit because the socket accept buffer is empty.# There's nowhere to send the error, so just log it.# Some platforms (e.g. Linux keep reporting the FD as# ready, so we remove the read handler temporarily.# We'll try again in a while.# The event loop will catch, log and ignore it.# It's now up to the protocol to handle the connection.# This code matches selectors._fileobj_to_fd function.# Remove both writer and connector.# _sock_recv() can add itself as an I/O callback if the operation can't# be done immediately. Don't use it directly, call sock_recv().# try again next time# _sock_recv_into() can add itself as an I/O callback if the operation# can't be done immediately. Don't use it directly, call# sock_recv_into().# all data sent# use a trick with a list in closure to store a mutable state# Future cancellation can be scheduled on previous loop iteration# Issue #23618: When the C function connect() fails with EINTR, the# connection runs in background. We have to wait until the socket# becomes writable to be notified when the connection succeed or# fails.# Jump to any except clause below.# socket is still registered, the callback will be retried later# Buffer size passed to recv().# Constructs initial value for self._buffer.# Attribute used in the destructor: it must be set even if the constructor# is not called (see _SelectorSslTransport which may start by raising an# exception)# Set when call to connection_lost scheduled.# test if the transport was closed# Should be called from exception handler only.# Disable the Nagle algorithm -- small writes will be# sent without waiting for the TCP ACK. This generally# decreases the latency (in some cases significantly.)# only start reading when connection_made() has been called# We're keeping the connection open so the# protocol can write more, but we still can't# receive more, so remove the reader callback.# Optimization: try to send now.# Not all was written; register write handler.# Add it to the buffer.# May append to buffer.# Attempt to send it right away first.# Try again later.b'Event loop using a selector and related classes. + +A selector is a "notify-when-ready" multiplexer. For a subclass which +also includes support for signal handling, see the unix_events sub-module. +'u'Event loop using a selector and related classes. + +A selector is a "notify-when-ready" multiplexer. For a subclass which +also includes support for signal handling, see the unix_events sub-module. +'b'BaseSelectorEventLoop'u'BaseSelectorEventLoop'b'Socket cannot be of type SSLSocket'u'Socket cannot be of type SSLSocket'b'Selector event loop. + + See events.EventLoop for API specification. + 'u'Selector event loop. + + See events.EventLoop for API specification. + 'b'Using selector: %s'u'Using selector: %s'b'socket.accept() out of system resource'u'socket.accept() out of system resource'b'Error on transport creation for incoming connection'u'Error on transport creation for incoming connection'b'Invalid file object: 'u'Invalid file object: 'b'File descriptor 'u'File descriptor 'b' is used by transport 'u' is used by transport 'b'Remove a writer callback.'u'Remove a writer callback.'b'Add a reader callback.'u'Add a reader callback.'b'Remove a reader callback.'u'Remove a reader callback.'b'Add a writer callback..'u'Add a writer callback..'b'Receive data from the socket. + + The return value is a bytes object representing the data received. + The maximum amount of data to be received at once is specified by + nbytes. + 'u'Receive data from the socket. + + The return value is a bytes object representing the data received. + The maximum amount of data to be received at once is specified by + nbytes. + 'b'Receive data from the socket. + + The received data is written into *buf* (a writable buffer). + The return value is the number of bytes written. + 'u'Receive data from the socket. + + The received data is written into *buf* (a writable buffer). + The return value is the number of bytes written. + 'b'Send data to the socket. + + The socket must be connected to a remote socket. This method continues + to send data from data until either all data has been sent or an + error occurs. None is returned on success. On error, an exception is + raised, and there is no way to determine how much data, if any, was + successfully processed by the receiving end of the connection. + 'u'Send data to the socket. + + The socket must be connected to a remote socket. This method continues + to send data from data until either all data has been sent or an + error occurs. None is returned on success. On error, an exception is + raised, and there is no way to determine how much data, if any, was + successfully processed by the receiving end of the connection. + 'b'Connect to a remote socket at address. + + This method is a coroutine. + 'u'Connect to a remote socket at address. + + This method is a coroutine. + 'b'Connect call failed 'u'Connect call failed 'b'Accept a connection. + + The socket must be bound to an address and listening for connections. + The return value is a pair (conn, address) where conn is a new socket + object usable to send and receive data on the connection, and address + is the address bound to the socket on the other end of the connection. + 'u'Accept a connection. + + The socket must be bound to an address and listening for connections. + The return value is a pair (conn, address) where conn is a new socket + object usable to send and receive data on the connection, and address + is the address bound to the socket on the other end of the connection. + 'b'read=polling'u'read=polling'b'read=idle'u'read=idle'b'polling'u'polling'b'write=<'u'write=<'b', bufsize='u', bufsize='b'Fatal error on transport'u'Fatal error on transport'b'Fatal error: protocol.get_buffer() call failed.'u'Fatal error: protocol.get_buffer() call failed.'b'Fatal read error on socket transport'u'Fatal read error on socket transport'b'Fatal error: protocol.data_received() call failed.'u'Fatal error: protocol.data_received() call failed.'b'Cannot call write() after write_eof()'u'Cannot call write() after write_eof()'b'Fatal write error on socket transport'u'Fatal write error on socket transport'b'Data should not be empty'u'Data should not be empty'b'Fatal read error on datagram transport'u'Fatal read error on datagram transport'u'asyncio.selector_events'u'selector_events'Selectors module. + +This module allows high-level and efficient I/O multiplexing, built upon the +`select` module primitives. +_fileobj_to_fdReturn a file descriptor from a file object. + + Parameters: + fileobj -- file object or file descriptor + + Returns: + corresponding file descriptor + + Raises: + ValueError if the object is invalid + Invalid file object: {!r}"Invalid file object: ""{!r}"Invalid file descriptor: {}SelectorKeySelectorKey(fileobj, fd, events, data) + + Object used to associate a file object to its backing + file descriptor, selected event mask, and attached data. +File object registered.Underlying file descriptor.Events that must be waited for on this file object.Optional opaque data associated to this file object. + For example, this could be used to store a per-client session ID._SelectorMappingMapping of file objects to selector keys._fd_to_key_fileobj_lookup{!r} is not registeredBaseSelectorSelector abstract base class. + + A selector supports registering file objects to be monitored for specific + I/O events. + + A file object is a file descriptor or any object with a `fileno()` method. + An arbitrary object can be attached to the file object, which can be used + for example to store context information, a callback, etc. + + A selector can use various implementations (select(), poll(), epoll()...) + depending on the platform. The default `Selector` class uses the most + efficient implementation on the current platform. + Register a file object. + + Parameters: + fileobj -- file object or file descriptor + events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) + data -- attached data + + Returns: + SelectorKey instance + + Raises: + ValueError if events is invalid + KeyError if fileobj is already registered + OSError if fileobj is closed or otherwise is unacceptable to + the underlying system call (if a system call is made) + + Note: + OSError may or may not be raised + Unregister a file object. + + Parameters: + fileobj -- file object or file descriptor + + Returns: + SelectorKey instance + + Raises: + KeyError if fileobj is not registered + + Note: + If fileobj is registered but has since been closed this does + *not* raise OSError (even if the wrapped syscall does) + Change a registered file object monitored events or attached data. + + Parameters: + fileobj -- file object or file descriptor + events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) + data -- attached data + + Returns: + SelectorKey instance + + Raises: + Anything that unregister() or register() raises + Perform the actual selection, until some monitored file objects are + ready or a timeout expires. + + Parameters: + timeout -- if timeout > 0, this specifies the maximum wait time, in + seconds + if timeout <= 0, the select() call won't block, and will + report the currently ready file objects + if timeout is None, select() will block until a monitored + file object becomes ready + + Returns: + list of (key, events) for ready file objects + `events` is a bitwise mask of EVENT_READ|EVENT_WRITE + Close the selector. + + This must be called to make sure that any underlying resource is freed. + Return the key associated to a registered file object. + + Returns: + SelectorKey for this file object + get_mapSelector is closedReturn a mapping of file objects to selector keys._BaseSelectorImplBase selector implementation.Return a file descriptor from a file object. + + This wraps _fileobj_to_fd() to do an exhaustive search in case + the object is invalid but we still have it in our map. This + is used by unregister() so we can unregister an object that + was previously registered even if it is closed. It is also + used by _SelectorMapping. + Invalid events: {!r}{!r} (FD {}) is already registered_key_from_fdReturn the key associated to a given file descriptor. + + Parameters: + fd -- file descriptor + + Returns: + corresponding key, or None if not found + Select-based selector._readers_writers_select_PollLikeSelectorBase class shared between poll, epoll and devpoll selectors._selector_cls_EVENT_READ_EVENT_WRITEpoller_events is not registeredselector_eventsfd_event_listPoll-based selector.epollEpollSelectorEpoll-based selector.EPOLLINEPOLLOUT1e-3max_evdevpollDevpollSelectorSolaris /dev/poll selector.KqueueSelectorKqueue-based selector.kevkev_list# generic events, that must be mapped to implementation-specific ones# this maps file descriptors to keys# read-only mapping returned by get_map()# Do an exhaustive search.# Raise ValueError after all.# Use a shortcut to update the data.# This can happen if the FD was closed since it# was registered.# This is shared between poll() and epoll().# epoll() has a different signature and handling of timeout parameter.# poll() has a resolution of 1 millisecond, round away from# zero to wait *at least* timeout seconds.# epoll_wait() has a resolution of 1 millisecond, round away# from zero to wait *at least* timeout seconds.# epoll_wait() expects `maxevents` to be greater than zero;# we want to make sure that `select()` can be called when no# FD is registered.# See comment above.# Choose the best implementation, roughly:# epoll|kqueue|devpoll > poll > select.# select() also can't accept a FD > FD_SETSIZE (usually around 1024)b'Selectors module. + +This module allows high-level and efficient I/O multiplexing, built upon the +`select` module primitives. +'u'Selectors module. + +This module allows high-level and efficient I/O multiplexing, built upon the +`select` module primitives. +'b'Return a file descriptor from a file object. + + Parameters: + fileobj -- file object or file descriptor + + Returns: + corresponding file descriptor + + Raises: + ValueError if the object is invalid + 'u'Return a file descriptor from a file object. + + Parameters: + fileobj -- file object or file descriptor + + Returns: + corresponding file descriptor + + Raises: + ValueError if the object is invalid + 'b'Invalid file object: {!r}'u'Invalid file object: {!r}'b'Invalid file descriptor: {}'u'Invalid file descriptor: {}'b'SelectorKey'u'SelectorKey'b'fileobj'u'fileobj'b'fd'u'fd'b'events'b'SelectorKey(fileobj, fd, events, data) + + Object used to associate a file object to its backing + file descriptor, selected event mask, and attached data. +'u'SelectorKey(fileobj, fd, events, data) + + Object used to associate a file object to its backing + file descriptor, selected event mask, and attached data. +'b'File object registered.'u'File object registered.'b'Underlying file descriptor.'u'Underlying file descriptor.'b'Events that must be waited for on this file object.'u'Events that must be waited for on this file object.'b'Optional opaque data associated to this file object. + For example, this could be used to store a per-client session ID.'u'Optional opaque data associated to this file object. + For example, this could be used to store a per-client session ID.'b'Mapping of file objects to selector keys.'u'Mapping of file objects to selector keys.'b'{!r} is not registered'u'{!r} is not registered'b'Selector abstract base class. + + A selector supports registering file objects to be monitored for specific + I/O events. + + A file object is a file descriptor or any object with a `fileno()` method. + An arbitrary object can be attached to the file object, which can be used + for example to store context information, a callback, etc. + + A selector can use various implementations (select(), poll(), epoll()...) + depending on the platform. The default `Selector` class uses the most + efficient implementation on the current platform. + 'u'Selector abstract base class. + + A selector supports registering file objects to be monitored for specific + I/O events. + + A file object is a file descriptor or any object with a `fileno()` method. + An arbitrary object can be attached to the file object, which can be used + for example to store context information, a callback, etc. + + A selector can use various implementations (select(), poll(), epoll()...) + depending on the platform. The default `Selector` class uses the most + efficient implementation on the current platform. + 'b'Register a file object. + + Parameters: + fileobj -- file object or file descriptor + events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) + data -- attached data + + Returns: + SelectorKey instance + + Raises: + ValueError if events is invalid + KeyError if fileobj is already registered + OSError if fileobj is closed or otherwise is unacceptable to + the underlying system call (if a system call is made) + + Note: + OSError may or may not be raised + 'u'Register a file object. + + Parameters: + fileobj -- file object or file descriptor + events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) + data -- attached data + + Returns: + SelectorKey instance + + Raises: + ValueError if events is invalid + KeyError if fileobj is already registered + OSError if fileobj is closed or otherwise is unacceptable to + the underlying system call (if a system call is made) + + Note: + OSError may or may not be raised + 'b'Unregister a file object. + + Parameters: + fileobj -- file object or file descriptor + + Returns: + SelectorKey instance + + Raises: + KeyError if fileobj is not registered + + Note: + If fileobj is registered but has since been closed this does + *not* raise OSError (even if the wrapped syscall does) + 'u'Unregister a file object. + + Parameters: + fileobj -- file object or file descriptor + + Returns: + SelectorKey instance + + Raises: + KeyError if fileobj is not registered + + Note: + If fileobj is registered but has since been closed this does + *not* raise OSError (even if the wrapped syscall does) + 'b'Change a registered file object monitored events or attached data. + + Parameters: + fileobj -- file object or file descriptor + events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) + data -- attached data + + Returns: + SelectorKey instance + + Raises: + Anything that unregister() or register() raises + 'u'Change a registered file object monitored events or attached data. + + Parameters: + fileobj -- file object or file descriptor + events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) + data -- attached data + + Returns: + SelectorKey instance + + Raises: + Anything that unregister() or register() raises + 'b'Perform the actual selection, until some monitored file objects are + ready or a timeout expires. + + Parameters: + timeout -- if timeout > 0, this specifies the maximum wait time, in + seconds + if timeout <= 0, the select() call won't block, and will + report the currently ready file objects + if timeout is None, select() will block until a monitored + file object becomes ready + + Returns: + list of (key, events) for ready file objects + `events` is a bitwise mask of EVENT_READ|EVENT_WRITE + 'u'Perform the actual selection, until some monitored file objects are + ready or a timeout expires. + + Parameters: + timeout -- if timeout > 0, this specifies the maximum wait time, in + seconds + if timeout <= 0, the select() call won't block, and will + report the currently ready file objects + if timeout is None, select() will block until a monitored + file object becomes ready + + Returns: + list of (key, events) for ready file objects + `events` is a bitwise mask of EVENT_READ|EVENT_WRITE + 'b'Close the selector. + + This must be called to make sure that any underlying resource is freed. + 'u'Close the selector. + + This must be called to make sure that any underlying resource is freed. + 'b'Return the key associated to a registered file object. + + Returns: + SelectorKey for this file object + 'u'Return the key associated to a registered file object. + + Returns: + SelectorKey for this file object + 'b'Selector is closed'u'Selector is closed'b'Return a mapping of file objects to selector keys.'u'Return a mapping of file objects to selector keys.'b'Base selector implementation.'u'Base selector implementation.'b'Return a file descriptor from a file object. + + This wraps _fileobj_to_fd() to do an exhaustive search in case + the object is invalid but we still have it in our map. This + is used by unregister() so we can unregister an object that + was previously registered even if it is closed. It is also + used by _SelectorMapping. + 'u'Return a file descriptor from a file object. + + This wraps _fileobj_to_fd() to do an exhaustive search in case + the object is invalid but we still have it in our map. This + is used by unregister() so we can unregister an object that + was previously registered even if it is closed. It is also + used by _SelectorMapping. + 'b'Invalid events: {!r}'u'Invalid events: {!r}'b'{!r} (FD {}) is already registered'u'{!r} (FD {}) is already registered'b'Return the key associated to a given file descriptor. + + Parameters: + fd -- file descriptor + + Returns: + corresponding key, or None if not found + 'u'Return the key associated to a given file descriptor. + + Parameters: + fd -- file descriptor + + Returns: + corresponding key, or None if not found + 'b'Select-based selector.'u'Select-based selector.'b'Base class shared between poll, epoll and devpoll selectors.'u'Base class shared between poll, epoll and devpoll selectors.'b' is not registered'u' is not registered'b'Poll-based selector.'u'Poll-based selector.'b'epoll'u'epoll'b'Epoll-based selector.'u'Epoll-based selector.'b'devpoll'u'devpoll'b'Solaris /dev/poll selector.'u'Solaris /dev/poll selector.'b'kqueue'u'kqueue'b'Kqueue-based selector.'u'Kqueue-based selector.'b'KqueueSelector'u'KqueueSelector'b'EpollSelector'u'EpollSelector'b'DevpollSelector'u'DevpollSelector'u'selectors'HTTP server classes. + +Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see +SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST, +and CGIHTTPRequestHandler for CGI scripts. + +It does, however, optionally implement HTTP/1.1 persistent connections, +as of version 0.3. + +Notes on CGIHTTPRequestHandler +------------------------------ + +This class implements GET and POST requests to cgi-bin scripts. + +If the os.fork() function is not present (e.g. on Windows), +subprocess.Popen() is used as a fallback, with slightly altered semantics. + +In all cases, the implementation is intentionally naive -- all +requests are executed synchronously. + +SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL +-- it may execute arbitrary Python code or external programs. + +Note that status code 200 is sent prior to execution of a CGI script, so +scripts cannot send other status codes such as 302 (redirect). + +XXX To do: + +- log requests even later (to capture byte count) +- log user-agent header and other interesting goodies +- send error log to separate file +ThreadingHTTPServer + + + + Error response + + +

Error response

+

Error code: %(code)d

+

Message: %(message)s.

+

Error code explanation: %(code)s - %(explain)s.

+ + +DEFAULT_ERROR_MESSAGEtext/html;charset=utf-8DEFAULT_ERROR_CONTENT_TYPETCPServerallow_reuse_addressserver_bindOverride server_bind to store the server name.server_addressserver_nameThreadingMixIndaemon_threadsStreamRequestHandlerHTTP request handler base class. + + The following explanation of HTTP serves to guide you through the + code as well as to expose any misunderstandings I may have about + HTTP (so you don't need to read the code to figure out I'm wrong + :-). + + HTTP (HyperText Transfer Protocol) is an extensible protocol on + top of a reliable stream transport (e.g. TCP/IP). The protocol + recognizes three parts to a request: + + 1. One line identifying the request type and path + 2. An optional set of RFC-822-style headers + 3. An optional data part + + The headers and data are separated by a blank line. + + The first line of the request has the form + + + + where is a (case-sensitive) keyword such as GET or POST, + is a string containing path information for the request, + and should be the string "HTTP/1.0" or "HTTP/1.1". + is encoded using the URL encoding scheme (using %xx to signify + the ASCII character with hex code xx). + + The specification specifies that lines are separated by CRLF but + for compatibility with the widest range of clients recommends + servers also handle LF. Similarly, whitespace in the request line + is treated sensibly (allowing multiple spaces between components + and allowing trailing whitespace). + + Similarly, for output, lines ought to be separated by CRLF pairs + but most clients grok LF characters just fine. + + If the first line of the request has the form + + + + (i.e. is left out) then this is assumed to be an HTTP + 0.9 request; this form has no optional headers and data part and + the reply consists of just the data. + + The reply form of the HTTP 1.x protocol again has three parts: + + 1. One line giving the response code + 2. An optional set of RFC-822-style headers + 3. The data + + Again, the headers and data are separated by a blank line. + + The response code line has the form + + + + where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), + is a 3-digit response code indicating success or + failure of the request, and is an optional + human-readable string explaining what the response code means. + + This server parses the request and the headers, and then calls a + function specific to the request type (). Specifically, + a request SPAM will be handled by a method do_SPAM(). If no + such method exists the server sends an error response to the + client. If it exists, it is called with no arguments: + + do_SPAM() + + Note that the request name is case sensitive (i.e. SPAM and spam + are different requests). + + The various request details are stored in instance variables: + + - client_address is the client IP address in the form (host, + port); + + - command, path and version are the broken-down request line; + + - headers is an instance of email.message.Message (or a derived + class) containing the header information; + + - rfile is a file object open for reading positioned at the + start of the optional input data part; + + - wfile is a file object open for writing. + + IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! + + The first thing to be written must be the response line. Then + follow 0 or more header lines, then a blank line, and then the + actual data (if any). The meaning of the header lines depends on + the command executed by the server; in most cases, when data is + returned, there should be at least one header line of the form + + Content-type: / + + where and should be registered MIME types, + e.g. "text/html" or "text/plain". + + Python/BaseHTTP/server_versionerror_message_formaterror_content_typedefault_request_versionparse_requestParse a request (internal). + + The request should be stored in self.raw_requestline; the results + are in self.command, self.path, self.request_version and + self.headers. + + Return True for success, False for failure; on failure, any relevant + error response has already been sent back. + + request_versionclose_connectionraw_requestlinerequestlinebase_version_numberversion_numbersend_errorBad request version (%r)protocol_versionInvalid HTTP version (%s)Bad request syntax (%r)Bad HTTP/0.9 request type (%r)rfileLine too longToo many headersconntypeExpect100-continuehandle_expect_100Decide what to do with an "Expect: 100-continue" header. + + If the client is expecting a 100 Continue response, we must + respond with either a 100 Continue or a final response before + waiting for the request body. The default is to always respond + with a 100 Continue. You can behave differently (for example, + reject unauthorized requests) by overriding this method. + + This method should either return True (possibly after sending + a 100 Continue response) or send an error response and return + False. + + send_response_onlyhandle_one_requestHandle a single HTTP request. + + You normally don't need to override this method; see the class + __doc__ string for information on how to handle specific HTTP + commands such as GET and POST. + + 65537mnameUnsupported method (%r)Request timed out: %rHandle multiple requests if necessary.explainSend and log an error reply. + + Arguments are + * code: an HTTP error code + 3 digits + * message: a simple optional 1 line reason phrase. + *( HTAB / SP / VCHAR / %x80-FF ) + defaults to short entry matching the response code + * explain: a detailed message defaults to the long entry + matching the response code. + + This sends an error response (so it must be called before any + output has been generated), logs the error, and finally sends + a piece of HTML explaining the error to the user. + + shortmsglongmsgcode %d, message %sAdd the response header to the headers buffer and log the + response code. + + Also send two standard headers with the server software + version and the current date. + + log_requestversion_stringdate_time_stringSend the response header only._headers_buffer%s %d %s +Send a MIME header to the headers buffer.Send the blank line ending the MIME headers.flush_headersLog an accepted request. + + This is called by send_response(). + + "%s" %s %sLog an error. + + This is called when a request cannot be fulfilled. By + default it passes the message on to log_message(). + + Arguments are the same as for log_message(). + + XXX This should go to the separate error log. + + Log an arbitrary message. + + This is used by all other logging functions. Override + it if you have specific logging wishes. + + The first argument, FORMAT, is a format string for the + message to be logged. If the format string contains + any % escapes requiring parameters, they should be + specified as subsequent arguments (it's just like + printf!). + + The client ip and current date/time are prefixed to + every message. + + %s - - [%s] %s +address_stringlog_date_time_stringReturn the server software version string.Return the current date and time formatted for a message header.Return the current time formatted for logging.%02d/%3s/%04d %02d:%02d:%02dmonthnameweekdaynameReturn the client address.client_addressSimple HTTP request handler with GET and HEAD commands. + + This serves files from the current directory and any of its + subdirectories. The MIME type for files is determined by + calling the .guess_type() method. + + The GET and HEAD requests are identical except that the HEAD + request omits the actual contents of the file. + + SimpleHTTP/Serve a GET request.send_headcopyfiledo_HEADServe a HEAD request.Common code for GET and HEAD commands. + + This sends the response code and MIME headers. + + Return value is either a file object (which has to be copied + to the outputfile by the caller unless the command was HEAD, + and must be closed by the caller under all circumstances), or + None, in which case the caller has nothing further to do. + + translate_pathnew_partsnew_urlLocationindex.htmlindex.htmlist_directoryFile not foundIf-Modified-SinceIf-None-Matchparsedate_to_datetimeimslast_modifLast-ModifiedHelper to produce a directory listing (absent index.html). + + Return value is either a file object, or None (indicating an + error). In either case, the headers are sent, making the + interface the same as for send_head(). + + No permission to list directorydisplaypathDirectory listing for %s'' +''%s + +

%s


+
    displaynamelinkname
  • %s
+
+ + +text/html; charset=%sTranslate a /-separated PATH to the local filename syntax. + + Components that mean special things to the local file system + (e.g. drive or directory names) are ignored. (XXX They should + probably be diagnosed.) + + trailing_slashoutputfileCopy all data between two file objects. + + The SOURCE argument is a file object open for reading + (or anything with a read() method) and the DESTINATION + argument is a file object open for writing (or + anything with a write() method). + + The only reason for overriding this would be to change + the block size or perhaps to replace newlines by CRLF + -- note however that this the default server uses this + to copy binary data as well. + + copyfileobjGuess the type of a file. + + Argument is a PATH (a filename). + + Return value is a string of the form type/subtype, + usable for a MIME Content-type header. + + The default implementation looks the file's extension + up in the table self.extensions_map, using application/octet-stream + as a default; however it would be permissible (if + slow) to look inside the data to make a better guess. + + extensions_map_url_collapse_path + Given a URL path, remove extra '/'s and '.' path elements and collapse + any '..' references and returns a collapsed path. + + Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. + The utility of this function is limited to is_cgi method and helps + preventing some security attacks. + + Returns: The reconstituted URL, which will always start with a '/'. + + Raises: IndexError if too many '..' occur within the path. + + head_partstail_partsplitpathcollapsed_pathnobodynobody_uidInternal routine to get nobody's uidTest for executable file.Complete HTTP server with GET, HEAD and POST commands. + + GET and HEAD also support running CGI scripts. + + The POST command is *only* implemented for CGI scripts. + + have_forkrbufsizedo_POSTServe a POST request. + + This is only implemented for CGI scripts. + + is_cgirun_cgiCan only POST to CGI scriptsVersion of send_head that support CGI scriptsTest whether self.path corresponds to a CGI script. + + Returns True and updates the cgi_info attribute to the tuple + (dir, rest) if self.path requires running a CGI script. + Returns False otherwise. + + If any exception is raised, the caller should assume that + self.path was rejected as invalid and act accordingly. + + The default implementation tests whether the normalized url + path begins with one of the strings in self.cgi_directories + (and the next character is a '/' or the end of the string). + + dir_sepcgi_directoriescgi_info/cgi-bin/htbinis_executableTest whether argument path is an executable file.is_pythonTest whether argument path is a Python script.Execute a CGI script.nextdirnextrestscriptdirscriptnamescriptfileNo such CGI script (%r)CGI script is not a plain file (%r)ispyCGI script is not executable (%r)SERVER_SOFTWARESERVER_NAMECGI/1.1GATEWAY_INTERFACESERVER_PROTOCOLSERVER_PORTuqrestPATH_INFOPATH_TRANSLATEDSCRIPT_NAMEQUERY_STRINGREMOTE_ADDRauthorizationAUTH_TYPEREMOTE_USERCONTENT_TYPECONTENT_LENGTHrefererHTTP_REFERER + HTTP_ACCEPTuser-agentuaHTTP_USER_AGENTcookie_strHTTP_COOKIEREMOTE_HOSTScript output followsdecoded_queryCGI script exit status %#xcmdlinew.execommand: %slist2cmdlineCGI script exited OK_get_best_familyHandlerClassServerClassTest the HTTP request handler class. + + This runs an HTTP server on port 8000 (or the port argument). + + address_familyhttpdurl_hostServing HTTP on port (http://"(http://"/) ... +Keyboard interrupt received, exiting.--cgiRun as CGI Server--bindADDRESSSpecify alternate bind address [default: all interfaces]'Specify alternate bind address ''[default: all interfaces]'--directorySpecify alternative directory [default:current directory]'Specify alternative directory ''[default:current directory]'Specify alternate port [default: 8000]cgihandler_classDualStackServer# See also:# HTTP Working Group T. Berners-Lee# INTERNET-DRAFT R. T. Fielding# H. Frystyk Nielsen# Expires September 8, 1995 March 8, 1995# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt# Network Working Group R. Fielding# Request for Comments: 2616 et al# Obsoletes: 2068 June 1999# Category: Standards Track# URL: http://www.faqs.org/rfcs/rfc2616.html# Log files# ---------# Here's a quote from the NCSA httpd docs about log file format.# | The logfile format is as follows. Each line consists of:# |# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb# | host: Either the DNS name or the IP number of the remote client# | rfc931: Any information returned by identd for this person,# | - otherwise.# | authuser: If user sent a userid for authentication, the user name,# | - otherwise.# | DD: Day# | Mon: Month (calendar name)# | YYYY: Year# | hh: hour (24-hour format, the machine's timezone)# | mm: minutes# | ss: seconds# | request: The first line of the HTTP request as sent by the client.# | ddd: the status code returned by the server, - if not available.# | bbbb: the total number of bytes sent,# | *not including the HTTP/1.0 header*, - if not available# | You can determine the name of the file accessed through request.# (Actually, the latter is only true if you know the server configuration# at the time the request was made!)# For gethostbyaddr()# Default error message template# Seems to make sense in testing environment# The Python system version, truncated to its first component.# The server software version. You may want to override this.# The format is multiple whitespace-separated strings,# where each string is of the form name[/version].# The default request version. This only affects responses up until# the point where the request line is parsed, so it mainly decides what# the client gets back when sending a malformed request line.# Most web servers default to HTTP 0.9, i.e. don't send a status line.# set in case of error on the first line# Enough to determine protocol version# RFC 2145 section 3.1 says there can be only one "." and# - major and minor numbers MUST be treated as# separate integers;# - HTTP/2.4 is a lower version than HTTP/2.13, which in# turn is lower than HTTP/12.3;# - Leading zeros MUST be ignored by recipients.# Examine the headers and look for a Connection directive.# Examine the headers and look for an Expect directive# An error code has been sent, just exit#actually send the response if not already done.#a read or a write timed out. Discard this connection# Message body is omitted for cases described in:# - RFC7230: 3.3. 1xx, 204(No Content), 304(Not Modified)# - RFC7231: 6.3.6. 205(Reset Content)# HTML encode to prevent Cross Site Scripting attacks# (see bug #1100201)# Essentially static class variables# The version of the HTTP protocol we support.# Set this to HTTP/1.1 to enable automatic keepalive# MessageClass used to parse headers# redirect browser - doing basically what apache does# check for trailing "/" which should return 404. See Issue17324# The test for this was added in test_httpserver.py# However, some OS platforms accept a trailingSlash as a filename# See discussion on python-dev and Issue34711 regarding# parseing and rejection of filenames with a trailing slash# Use browser cache if possible# compare If-Modified-Since and time of last file modification# ignore ill-formed values# obsolete format with no timezone, cf.# https://tools.ietf.org/html/rfc7231#section-7.1.1.1# compare to UTC datetime of last modification# remove microseconds, like in If-Modified-Since# Append / for directories or @ for symbolic links# Note: a link to a directory displays with @ and links with /# abandon query parameters# Don't forget explicit trailing slash when normalizing. Issue17324# Ignore components that are not a simple file/directory name# try to read system mime.types# Default# Utilities for CGIHTTPRequestHandler# Query component should not be involved.# Similar to os.path.split(os.path.normpath(path)) but specific to URL# path semantics rather than local operating system semantics.# IndexError if more '..' than prior parts# Determine platform specifics# Make rfile unbuffered -- we need to read one line and then pass# the rest to a subprocess, so we can't use buffered input.# find an explicit query string, if present.# dissect the part after the directory name into a script name &# a possible additional path, to be stored in PATH_INFO.# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html# XXX Much of the following could be prepared ahead of time!# XXX REMOTE_IDENT# XXX Other HTTP_* headers# Since we're setting the env in the parent, provide empty# values to override previously set values# Unix -- fork as we should# Always flush before forking# throw away additional data [see bug #427345]# Non-Unix -- use subprocess# On Windows, use python.exe, not pythonw.exe# ensure dual-stack is not disabled; ref #38907# suppress exception when protocol is IPv4b'HTTP server classes. + +Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see +SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST, +and CGIHTTPRequestHandler for CGI scripts. + +It does, however, optionally implement HTTP/1.1 persistent connections, +as of version 0.3. + +Notes on CGIHTTPRequestHandler +------------------------------ + +This class implements GET and POST requests to cgi-bin scripts. + +If the os.fork() function is not present (e.g. on Windows), +subprocess.Popen() is used as a fallback, with slightly altered semantics. + +In all cases, the implementation is intentionally naive -- all +requests are executed synchronously. + +SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL +-- it may execute arbitrary Python code or external programs. + +Note that status code 200 is sent prior to execution of a CGI script, so +scripts cannot send other status codes such as 302 (redirect). + +XXX To do: + +- log requests even later (to capture byte count) +- log user-agent header and other interesting goodies +- send error log to separate file +'u'HTTP server classes. + +Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see +SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST, +and CGIHTTPRequestHandler for CGI scripts. + +It does, however, optionally implement HTTP/1.1 persistent connections, +as of version 0.3. + +Notes on CGIHTTPRequestHandler +------------------------------ + +This class implements GET and POST requests to cgi-bin scripts. + +If the os.fork() function is not present (e.g. on Windows), +subprocess.Popen() is used as a fallback, with slightly altered semantics. + +In all cases, the implementation is intentionally naive -- all +requests are executed synchronously. + +SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL +-- it may execute arbitrary Python code or external programs. + +Note that status code 200 is sent prior to execution of a CGI script, so +scripts cannot send other status codes such as 302 (redirect). + +XXX To do: + +- log requests even later (to capture byte count) +- log user-agent header and other interesting goodies +- send error log to separate file +'b'0.6'u'0.6'b'HTTPServer'u'HTTPServer'b'ThreadingHTTPServer'u'ThreadingHTTPServer'b'BaseHTTPRequestHandler'u'BaseHTTPRequestHandler'b' + + + + Error response + + +

Error response

+

Error code: %(code)d

+

Message: %(message)s.

+

Error code explanation: %(code)s - %(explain)s.

+ + +'u' + + + + Error response + + +

Error response

+

Error code: %(code)d

+

Message: %(message)s.

+

Error code explanation: %(code)s - %(explain)s.

+ + +'b'text/html;charset=utf-8'u'text/html;charset=utf-8'b'Override server_bind to store the server name.'u'Override server_bind to store the server name.'b'HTTP request handler base class. + + The following explanation of HTTP serves to guide you through the + code as well as to expose any misunderstandings I may have about + HTTP (so you don't need to read the code to figure out I'm wrong + :-). + + HTTP (HyperText Transfer Protocol) is an extensible protocol on + top of a reliable stream transport (e.g. TCP/IP). The protocol + recognizes three parts to a request: + + 1. One line identifying the request type and path + 2. An optional set of RFC-822-style headers + 3. An optional data part + + The headers and data are separated by a blank line. + + The first line of the request has the form + + + + where is a (case-sensitive) keyword such as GET or POST, + is a string containing path information for the request, + and should be the string "HTTP/1.0" or "HTTP/1.1". + is encoded using the URL encoding scheme (using %xx to signify + the ASCII character with hex code xx). + + The specification specifies that lines are separated by CRLF but + for compatibility with the widest range of clients recommends + servers also handle LF. Similarly, whitespace in the request line + is treated sensibly (allowing multiple spaces between components + and allowing trailing whitespace). + + Similarly, for output, lines ought to be separated by CRLF pairs + but most clients grok LF characters just fine. + + If the first line of the request has the form + + + + (i.e. is left out) then this is assumed to be an HTTP + 0.9 request; this form has no optional headers and data part and + the reply consists of just the data. + + The reply form of the HTTP 1.x protocol again has three parts: + + 1. One line giving the response code + 2. An optional set of RFC-822-style headers + 3. The data + + Again, the headers and data are separated by a blank line. + + The response code line has the form + + + + where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), + is a 3-digit response code indicating success or + failure of the request, and is an optional + human-readable string explaining what the response code means. + + This server parses the request and the headers, and then calls a + function specific to the request type (). Specifically, + a request SPAM will be handled by a method do_SPAM(). If no + such method exists the server sends an error response to the + client. If it exists, it is called with no arguments: + + do_SPAM() + + Note that the request name is case sensitive (i.e. SPAM and spam + are different requests). + + The various request details are stored in instance variables: + + - client_address is the client IP address in the form (host, + port); + + - command, path and version are the broken-down request line; + + - headers is an instance of email.message.Message (or a derived + class) containing the header information; + + - rfile is a file object open for reading positioned at the + start of the optional input data part; + + - wfile is a file object open for writing. + + IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! + + The first thing to be written must be the response line. Then + follow 0 or more header lines, then a blank line, and then the + actual data (if any). The meaning of the header lines depends on + the command executed by the server; in most cases, when data is + returned, there should be at least one header line of the form + + Content-type: / + + where and should be registered MIME types, + e.g. "text/html" or "text/plain". + + 'u'HTTP request handler base class. + + The following explanation of HTTP serves to guide you through the + code as well as to expose any misunderstandings I may have about + HTTP (so you don't need to read the code to figure out I'm wrong + :-). + + HTTP (HyperText Transfer Protocol) is an extensible protocol on + top of a reliable stream transport (e.g. TCP/IP). The protocol + recognizes three parts to a request: + + 1. One line identifying the request type and path + 2. An optional set of RFC-822-style headers + 3. An optional data part + + The headers and data are separated by a blank line. + + The first line of the request has the form + + + + where is a (case-sensitive) keyword such as GET or POST, + is a string containing path information for the request, + and should be the string "HTTP/1.0" or "HTTP/1.1". + is encoded using the URL encoding scheme (using %xx to signify + the ASCII character with hex code xx). + + The specification specifies that lines are separated by CRLF but + for compatibility with the widest range of clients recommends + servers also handle LF. Similarly, whitespace in the request line + is treated sensibly (allowing multiple spaces between components + and allowing trailing whitespace). + + Similarly, for output, lines ought to be separated by CRLF pairs + but most clients grok LF characters just fine. + + If the first line of the request has the form + + + + (i.e. is left out) then this is assumed to be an HTTP + 0.9 request; this form has no optional headers and data part and + the reply consists of just the data. + + The reply form of the HTTP 1.x protocol again has three parts: + + 1. One line giving the response code + 2. An optional set of RFC-822-style headers + 3. The data + + Again, the headers and data are separated by a blank line. + + The response code line has the form + + + + where is the protocol version ("HTTP/1.0" or "HTTP/1.1"), + is a 3-digit response code indicating success or + failure of the request, and is an optional + human-readable string explaining what the response code means. + + This server parses the request and the headers, and then calls a + function specific to the request type (). Specifically, + a request SPAM will be handled by a method do_SPAM(). If no + such method exists the server sends an error response to the + client. If it exists, it is called with no arguments: + + do_SPAM() + + Note that the request name is case sensitive (i.e. SPAM and spam + are different requests). + + The various request details are stored in instance variables: + + - client_address is the client IP address in the form (host, + port); + + - command, path and version are the broken-down request line; + + - headers is an instance of email.message.Message (or a derived + class) containing the header information; + + - rfile is a file object open for reading positioned at the + start of the optional input data part; + + - wfile is a file object open for writing. + + IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! + + The first thing to be written must be the response line. Then + follow 0 or more header lines, then a blank line, and then the + actual data (if any). The meaning of the header lines depends on + the command executed by the server; in most cases, when data is + returned, there should be at least one header line of the form + + Content-type: / + + where and should be registered MIME types, + e.g. "text/html" or "text/plain". + + 'b'Python/'u'Python/'b'BaseHTTP/'u'BaseHTTP/'b'Parse a request (internal). + + The request should be stored in self.raw_requestline; the results + are in self.command, self.path, self.request_version and + self.headers. + + Return True for success, False for failure; on failure, any relevant + error response has already been sent back. + + 'u'Parse a request (internal). + + The request should be stored in self.raw_requestline; the results + are in self.command, self.path, self.request_version and + self.headers. + + Return True for success, False for failure; on failure, any relevant + error response has already been sent back. + + 'b'Bad request version (%r)'u'Bad request version (%r)'b'Invalid HTTP version (%s)'u'Invalid HTTP version (%s)'b'Bad request syntax (%r)'u'Bad request syntax (%r)'b'Bad HTTP/0.9 request type (%r)'u'Bad HTTP/0.9 request type (%r)'b'Line too long'u'Line too long'b'Too many headers'u'Too many headers'b'Expect'u'Expect'b'100-continue'u'100-continue'b'Decide what to do with an "Expect: 100-continue" header. + + If the client is expecting a 100 Continue response, we must + respond with either a 100 Continue or a final response before + waiting for the request body. The default is to always respond + with a 100 Continue. You can behave differently (for example, + reject unauthorized requests) by overriding this method. + + This method should either return True (possibly after sending + a 100 Continue response) or send an error response and return + False. + + 'u'Decide what to do with an "Expect: 100-continue" header. + + If the client is expecting a 100 Continue response, we must + respond with either a 100 Continue or a final response before + waiting for the request body. The default is to always respond + with a 100 Continue. You can behave differently (for example, + reject unauthorized requests) by overriding this method. + + This method should either return True (possibly after sending + a 100 Continue response) or send an error response and return + False. + + 'b'Handle a single HTTP request. + + You normally don't need to override this method; see the class + __doc__ string for information on how to handle specific HTTP + commands such as GET and POST. + + 'u'Handle a single HTTP request. + + You normally don't need to override this method; see the class + __doc__ string for information on how to handle specific HTTP + commands such as GET and POST. + + 'b'Unsupported method (%r)'u'Unsupported method (%r)'b'Request timed out: %r'u'Request timed out: %r'b'Handle multiple requests if necessary.'u'Handle multiple requests if necessary.'b'Send and log an error reply. + + Arguments are + * code: an HTTP error code + 3 digits + * message: a simple optional 1 line reason phrase. + *( HTAB / SP / VCHAR / %x80-FF ) + defaults to short entry matching the response code + * explain: a detailed message defaults to the long entry + matching the response code. + + This sends an error response (so it must be called before any + output has been generated), logs the error, and finally sends + a piece of HTML explaining the error to the user. + + 'u'Send and log an error reply. + + Arguments are + * code: an HTTP error code + 3 digits + * message: a simple optional 1 line reason phrase. + *( HTAB / SP / VCHAR / %x80-FF ) + defaults to short entry matching the response code + * explain: a detailed message defaults to the long entry + matching the response code. + + This sends an error response (so it must be called before any + output has been generated), logs the error, and finally sends + a piece of HTML explaining the error to the user. + + 'b'code %d, message %s'u'code %d, message %s'b'code'b'explain'u'explain'b'Add the response header to the headers buffer and log the + response code. + + Also send two standard headers with the server software + version and the current date. + + 'u'Add the response header to the headers buffer and log the + response code. + + Also send two standard headers with the server software + version and the current date. + + 'b'Server'u'Server'b'Send the response header only.'u'Send the response header only.'b'_headers_buffer'u'_headers_buffer'b'%s %d %s +'u'%s %d %s +'b'Send a MIME header to the headers buffer.'u'Send a MIME header to the headers buffer.'b'Send the blank line ending the MIME headers.'u'Send the blank line ending the MIME headers.'b'Log an accepted request. + + This is called by send_response(). + + 'u'Log an accepted request. + + This is called by send_response(). + + 'b'"%s" %s %s'u'"%s" %s %s'b'Log an error. + + This is called when a request cannot be fulfilled. By + default it passes the message on to log_message(). + + Arguments are the same as for log_message(). + + XXX This should go to the separate error log. + + 'u'Log an error. + + This is called when a request cannot be fulfilled. By + default it passes the message on to log_message(). + + Arguments are the same as for log_message(). + + XXX This should go to the separate error log. + + 'b'Log an arbitrary message. + + This is used by all other logging functions. Override + it if you have specific logging wishes. + + The first argument, FORMAT, is a format string for the + message to be logged. If the format string contains + any % escapes requiring parameters, they should be + specified as subsequent arguments (it's just like + printf!). + + The client ip and current date/time are prefixed to + every message. + + 'u'Log an arbitrary message. + + This is used by all other logging functions. Override + it if you have specific logging wishes. + + The first argument, FORMAT, is a format string for the + message to be logged. If the format string contains + any % escapes requiring parameters, they should be + specified as subsequent arguments (it's just like + printf!). + + The client ip and current date/time are prefixed to + every message. + + 'b'%s - - [%s] %s +'u'%s - - [%s] %s +'b'Return the server software version string.'u'Return the server software version string.'b'Return the current date and time formatted for a message header.'u'Return the current date and time formatted for a message header.'b'Return the current time formatted for logging.'u'Return the current time formatted for logging.'b'%02d/%3s/%04d %02d:%02d:%02d'u'%02d/%3s/%04d %02d:%02d:%02d'b'Return the client address.'u'Return the client address.'b'Simple HTTP request handler with GET and HEAD commands. + + This serves files from the current directory and any of its + subdirectories. The MIME type for files is determined by + calling the .guess_type() method. + + The GET and HEAD requests are identical except that the HEAD + request omits the actual contents of the file. + + 'u'Simple HTTP request handler with GET and HEAD commands. + + This serves files from the current directory and any of its + subdirectories. The MIME type for files is determined by + calling the .guess_type() method. + + The GET and HEAD requests are identical except that the HEAD + request omits the actual contents of the file. + + 'b'SimpleHTTP/'u'SimpleHTTP/'b'Serve a GET request.'u'Serve a GET request.'b'Serve a HEAD request.'u'Serve a HEAD request.'b'Common code for GET and HEAD commands. + + This sends the response code and MIME headers. + + Return value is either a file object (which has to be copied + to the outputfile by the caller unless the command was HEAD, + and must be closed by the caller under all circumstances), or + None, in which case the caller has nothing further to do. + + 'u'Common code for GET and HEAD commands. + + This sends the response code and MIME headers. + + Return value is either a file object (which has to be copied + to the outputfile by the caller unless the command was HEAD, + and must be closed by the caller under all circumstances), or + None, in which case the caller has nothing further to do. + + 'b'Location'u'Location'b'index.html'u'index.html'b'index.htm'u'index.htm'b'File not found'u'File not found'b'If-Modified-Since'u'If-Modified-Since'b'If-None-Match'u'If-None-Match'b'Last-Modified'u'Last-Modified'b'Helper to produce a directory listing (absent index.html). + + Return value is either a file object, or None (indicating an + error). In either case, the headers are sent, making the + interface the same as for send_head(). + + 'u'Helper to produce a directory listing (absent index.html). + + Return value is either a file object, or None (indicating an + error). In either case, the headers are sent, making the + interface the same as for send_head(). + + 'b'No permission to list directory'u'No permission to list directory'b'Directory listing for %s'u'Directory listing for %s'b''u''b' +'u' +'b''u''b'%s +'u'%s +'b' +

%s

'u' +

%s

'b'
+
    'u'
    + +
    + + +'u'
+
+ + +'b'text/html; charset=%s'u'text/html; charset=%s'b'Translate a /-separated PATH to the local filename syntax. + + Components that mean special things to the local file system + (e.g. drive or directory names) are ignored. (XXX They should + probably be diagnosed.) + + 'u'Translate a /-separated PATH to the local filename syntax. + + Components that mean special things to the local file system + (e.g. drive or directory names) are ignored. (XXX They should + probably be diagnosed.) + + 'b'Copy all data between two file objects. + + The SOURCE argument is a file object open for reading + (or anything with a read() method) and the DESTINATION + argument is a file object open for writing (or + anything with a write() method). + + The only reason for overriding this would be to change + the block size or perhaps to replace newlines by CRLF + -- note however that this the default server uses this + to copy binary data as well. + + 'u'Copy all data between two file objects. + + The SOURCE argument is a file object open for reading + (or anything with a read() method) and the DESTINATION + argument is a file object open for writing (or + anything with a write() method). + + The only reason for overriding this would be to change + the block size or perhaps to replace newlines by CRLF + -- note however that this the default server uses this + to copy binary data as well. + + 'b'Guess the type of a file. + + Argument is a PATH (a filename). + + Return value is a string of the form type/subtype, + usable for a MIME Content-type header. + + The default implementation looks the file's extension + up in the table self.extensions_map, using application/octet-stream + as a default; however it would be permissible (if + slow) to look inside the data to make a better guess. + + 'u'Guess the type of a file. + + Argument is a PATH (a filename). + + Return value is a string of the form type/subtype, + usable for a MIME Content-type header. + + The default implementation looks the file's extension + up in the table self.extensions_map, using application/octet-stream + as a default; however it would be permissible (if + slow) to look inside the data to make a better guess. + + 'b' + Given a URL path, remove extra '/'s and '.' path elements and collapse + any '..' references and returns a collapsed path. + + Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. + The utility of this function is limited to is_cgi method and helps + preventing some security attacks. + + Returns: The reconstituted URL, which will always start with a '/'. + + Raises: IndexError if too many '..' occur within the path. + + 'u' + Given a URL path, remove extra '/'s and '.' path elements and collapse + any '..' references and returns a collapsed path. + + Implements something akin to RFC-2396 5.2 step 6 to parse relative paths. + The utility of this function is limited to is_cgi method and helps + preventing some security attacks. + + Returns: The reconstituted URL, which will always start with a '/'. + + Raises: IndexError if too many '..' occur within the path. + + 'b'Internal routine to get nobody's uid'u'Internal routine to get nobody's uid'b'nobody'u'nobody'b'Test for executable file.'u'Test for executable file.'b'Complete HTTP server with GET, HEAD and POST commands. + + GET and HEAD also support running CGI scripts. + + The POST command is *only* implemented for CGI scripts. + + 'u'Complete HTTP server with GET, HEAD and POST commands. + + GET and HEAD also support running CGI scripts. + + The POST command is *only* implemented for CGI scripts. + + 'b'Serve a POST request. + + This is only implemented for CGI scripts. + + 'u'Serve a POST request. + + This is only implemented for CGI scripts. + + 'b'Can only POST to CGI scripts'u'Can only POST to CGI scripts'b'Version of send_head that support CGI scripts'u'Version of send_head that support CGI scripts'b'Test whether self.path corresponds to a CGI script. + + Returns True and updates the cgi_info attribute to the tuple + (dir, rest) if self.path requires running a CGI script. + Returns False otherwise. + + If any exception is raised, the caller should assume that + self.path was rejected as invalid and act accordingly. + + The default implementation tests whether the normalized url + path begins with one of the strings in self.cgi_directories + (and the next character is a '/' or the end of the string). + + 'u'Test whether self.path corresponds to a CGI script. + + Returns True and updates the cgi_info attribute to the tuple + (dir, rest) if self.path requires running a CGI script. + Returns False otherwise. + + If any exception is raised, the caller should assume that + self.path was rejected as invalid and act accordingly. + + The default implementation tests whether the normalized url + path begins with one of the strings in self.cgi_directories + (and the next character is a '/' or the end of the string). + + 'b'/cgi-bin'u'/cgi-bin'b'/htbin'u'/htbin'b'Test whether argument path is an executable file.'u'Test whether argument path is an executable file.'b'Test whether argument path is a Python script.'u'Test whether argument path is a Python script.'b'Execute a CGI script.'u'Execute a CGI script.'b'No such CGI script (%r)'u'No such CGI script (%r)'b'CGI script is not a plain file (%r)'u'CGI script is not a plain file (%r)'b'CGI script is not executable (%r)'u'CGI script is not executable (%r)'b'SERVER_SOFTWARE'u'SERVER_SOFTWARE'b'SERVER_NAME'u'SERVER_NAME'b'CGI/1.1'u'CGI/1.1'b'GATEWAY_INTERFACE'u'GATEWAY_INTERFACE'b'SERVER_PROTOCOL'u'SERVER_PROTOCOL'b'SERVER_PORT'u'SERVER_PORT'b'PATH_INFO'u'PATH_INFO'b'PATH_TRANSLATED'u'PATH_TRANSLATED'b'SCRIPT_NAME'u'SCRIPT_NAME'b'QUERY_STRING'u'QUERY_STRING'b'REMOTE_ADDR'u'REMOTE_ADDR'b'authorization'u'authorization'b'AUTH_TYPE'u'AUTH_TYPE'b'REMOTE_USER'u'REMOTE_USER'b'CONTENT_TYPE'u'CONTENT_TYPE'b'CONTENT_LENGTH'u'CONTENT_LENGTH'b'referer'u'referer'b'HTTP_REFERER'u'HTTP_REFERER'b'accept'u'accept'b' + 'u' + 'b'HTTP_ACCEPT'u'HTTP_ACCEPT'b'user-agent'u'user-agent'b'HTTP_USER_AGENT'u'HTTP_USER_AGENT'b'cookie'u'cookie'b'HTTP_COOKIE'u'HTTP_COOKIE'b'REMOTE_HOST'u'REMOTE_HOST'b'Script output follows'u'Script output follows'b'CGI script exit status %#x'u'CGI script exit status %#x'b'w.exe'u'w.exe'b'command: %s'u'command: %s'b'CGI script exited OK'u'CGI script exited OK'b'Test the HTTP request handler class. + + This runs an HTTP server on port 8000 (or the port argument). + + 'u'Test the HTTP request handler class. + + This runs an HTTP server on port 8000 (or the port argument). + + 'b'Serving HTTP on 'u'Serving HTTP on 'b' port 'u' port 'b' (http://'u' (http://'b'/) ...'u'/) ...'b' +Keyboard interrupt received, exiting.'u' +Keyboard interrupt received, exiting.'b'--cgi'u'--cgi'b'Run as CGI Server'u'Run as CGI Server'b'--bind'u'--bind'b'ADDRESS'u'ADDRESS'b'Specify alternate bind address [default: all interfaces]'u'Specify alternate bind address [default: all interfaces]'b'--directory'u'--directory'b'Specify alternative directory [default:current directory]'u'Specify alternative directory [default:current directory]'b'port'u'port'b'Specify alternate port [default: 8000]'u'Specify alternate port [default: 8000]'A lexical analyzer class for simple shell-like syntaxes.punctuation_charsabcdfeghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'abcdfeghijklmnopqrstuvwxyz''ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ''ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ'whitespace_splitquotesescapedquotespushbackfilestack();<>|&_punctuation_chars_pushback_chars~-./*?=Push a token onto the stack popped by the get_token methodshlex: pushing token push_sourcenewstreamnewfilePush an input source onto the lexer's input source stack.shlex: pushing to file %sshlex: pushing to stream %spop_sourcePop the input source stack.shlex: popping to %s, line %dGet a token from the input stream (or from stack if it's nonempty)shlex: popping token read_tokensourcehookshlex: token=shlex: token=EOFquotedescapedstatenextcharshlex: in state %r I see character: %rshlex: I see whitespace in whitespace stateshlex: I see EOF in quotes stateNo closing quotationshlex: I see EOF in escape stateNo escaped charactershlex: I see whitespace in word stateshlex: I see punctuation in word stateshlex: raw token=shlex: raw token=EOFHook called on a filename to be sourced.error_leaderEmit a C-compiler-like, Emacs-friendly error-message leader."%s", line %d: Split the string *s* using shell-like syntax.lexsplit_commandReturn a shell-escaped string from *split_command*.[^\w@%+=:,./-]_find_unsafeReturn a shell-escaped version of the string *s*._print_tokensToken: # Input stacking and error message cleanup added by ESR, March 2000# push_source() and pop_source() made explicit by ESR, January 2001.# Posix compliance, split(), string arguments, and# iterator interface by Gustavo Niemeyer, April 2003.# changes to tokenize more like Posix shells by Vinay Sajip, July 2016.# _pushback_chars is a push back queue used by lookahead logic# these chars added because allowed in file names, args, wildcards#remove any punctuation chars from wordchars# No pushback. Get a token.# Handle inclusions# Maybe we got EOF instead?# Neither inclusion nor EOF# past end of file# end of file# emit current token# XXX what error should be raised here?# In posix shells, only the quote itself or the escape# character may be escaped within quotes.# This implements cpp-like semantics for relative-path inclusion.# use single quotes, and put single quotes into double quotes# the string $'b is then quoted as '$'"'"'b'b'A lexical analyzer class for simple shell-like syntaxes.'u'A lexical analyzer class for simple shell-like syntaxes.'b'shlex'u'shlex'b'abcdfeghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'u'abcdfeghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'b'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ'u'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ'b'();<>|&'u'();<>|&'b'~-./*?='u'~-./*?='b'Push a token onto the stack popped by the get_token method'u'Push a token onto the stack popped by the get_token method'b'shlex: pushing token 'u'shlex: pushing token 'b'Push an input source onto the lexer's input source stack.'u'Push an input source onto the lexer's input source stack.'b'shlex: pushing to file %s'u'shlex: pushing to file %s'b'shlex: pushing to stream %s'u'shlex: pushing to stream %s'b'Pop the input source stack.'u'Pop the input source stack.'b'shlex: popping to %s, line %d'u'shlex: popping to %s, line %d'b'Get a token from the input stream (or from stack if it's nonempty)'u'Get a token from the input stream (or from stack if it's nonempty)'b'shlex: popping token 'u'shlex: popping token 'b'shlex: token='u'shlex: token='b'shlex: token=EOF'u'shlex: token=EOF'b'shlex: in state %r I see character: %r'u'shlex: in state %r I see character: %r'b'shlex: I see whitespace in whitespace state'u'shlex: I see whitespace in whitespace state'b'shlex: I see EOF in quotes state'u'shlex: I see EOF in quotes state'b'No closing quotation'u'No closing quotation'b'shlex: I see EOF in escape state'u'shlex: I see EOF in escape state'b'No escaped character'u'No escaped character'b'shlex: I see whitespace in word state'u'shlex: I see whitespace in word state'b'shlex: I see punctuation in word state'u'shlex: I see punctuation in word state'b'shlex: raw token='u'shlex: raw token='b'shlex: raw token=EOF'u'shlex: raw token=EOF'b'Hook called on a filename to be sourced.'u'Hook called on a filename to be sourced.'b'Emit a C-compiler-like, Emacs-friendly error-message leader.'u'Emit a C-compiler-like, Emacs-friendly error-message leader.'b'"%s", line %d: 'u'"%s", line %d: 'b'Split the string *s* using shell-like syntax.'u'Split the string *s* using shell-like syntax.'b'Return a shell-escaped string from *split_command*.'u'Return a shell-escaped string from *split_command*.'b'[^\w@%+=:,./-]'u'[^\w@%+=:,./-]'b'Return a shell-escaped version of the string *s*.'u'Return a shell-escaped version of the string *s*.'b''''u''''b'Token: 'u'Token: 'Utility functions for copying and archiving files and directory trees. + +XXX The functions here don't copy the resource fork or other metadata on Mac. + +_ZLIB_SUPPORTED_BZ2_SUPPORTED_LZMA_SUPPORTED_WINDOWSCOPY_BUFSIZE_USE_CP_SENDFILE_HAS_FCOPYFILE.COM;.EXE;.BAT;.CMD;.VBS;.JS;.WS;.MSC_WIN_DEFAULT_PATHEXTcopymodecopystatcopy2copytreeSpecialFileErrorExecErrormake_archiveget_archive_formatsregister_archive_formatunregister_archive_formatget_unpack_formatsregister_unpack_formatunregister_unpack_formatunpack_archiveSameFileErrorRaised when source and destination are the same file.Raised when trying to do a kind of operation (e.g. copying) which is + not supported on a special file (e.g. a named pipe)Raised when a command could not be executedReadErrorRaised when an archive cannot be readRegistryErrorRaised when a registry operation with the archiving + and unpacking registries fails_GiveupOnFastCopyRaised as a signal to fallback on using raw read()/write() + file copy when fast-copy functions fail to do so. + _fastcopy_fcopyfileCopy a regular file content or metadata by using high-performance + fcopyfile(3) syscall (macOS). + infdoutfd_fastcopy_sendfileCopy data from one regular mmap-like fd to another by using + high-performance sendfile(2) syscall. + This should work on Linux >= 2.6.33 only. + sent_copyfileobj_readintoreadinto()/memoryview() based variant of copyfileobj(). + *fsrc* must support readinto() method and both files must be + open in binary mode. + fsrc_readintofdst_writesmvcopy data from file-like object fsrc to file-like object fdstfsrc_read_samefile_islinkCopy data from src to dst in the most efficient way possible. + + If follow_symlinks is not set and src is a symbolic link, a new + symlink will be created instead of copying the file it points to. + + shutil.copyfile{!r} and {!r} are the same filefile_size`%s` is a named pipeCopy mode bits from src to dst. + + If follow_symlinks is not set, symlinks aren't followed if and only + if both `src` and `dst` are symlinks. If `lchmod` isn't available + (e.g. Linux) this method does nothing. + + shutil.copymodestat_funcchmod_funclistxattr_copyxattrCopy extended filesystem attributes from `src` to `dst`. + + Overwrite existing attributes. + + If `follow_symlinks` is false, symlinks won't be followed. + + getxattrCopy file metadata + + Copy the permission bits, last access time, last modification time, and + flags from `src` to `dst`. On Linux, copystat() also copies the "extended + attributes" where possible. The file contents, owner, and group are + unaffected. `src` and `dst` are path-like objects or path names given as + strings. + + If the optional flag `follow_symlinks` is not set, symlinks aren't + followed if and only if both `src` and `dst` are symlinks. + shutil.copystat_nopfollowCopy data and mode bits ("cp src dst"). Return the file's destination. + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + + If source and destination are the same file, a SameFileError will be + raised. + + Copy data and metadata. Return the file's destination. + + Metadata is copied with copystat(). Please see the copystat function + for more information. + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + Function that can be used as copytree() ignore parameter. + + Patterns is a sequence of glob-style patterns + that are used to exclude files_ignore_patternsignored_names_copytreesymlinkscopy_functionignore_dangling_symlinksdirs_exist_okuse_srcentrysrcentrysrcnamedstnamesrcobjst_reparse_tagIO_REPARSE_TAG_MOUNT_POINTlinktoRecursively copy a directory tree and return the destination directory. + + dirs_exist_ok dictates whether to raise an exception in case dst or any + missing parent directory already exists. + + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. If the file pointed by the symlink doesn't + exist, an exception will be added in the list of errors raised in + an Error exception at the end of the copy process. + + You can set the optional ignore_dangling_symlinks flag to true if you + want to silence this exception. Notice that this has no effect on + platforms that don't support os.symlink. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + The optional copy_function argument is a callable that will be used + to copy each file. It will be called with the source path and the + destination path as arguments. By default, copy2() is used, but any + function that supports the same signature (like copy()) can be used. + + shutil.copytreeitrst_file_attributes_rmtree_isdirFILE_ATTRIBUTE_REPARSE_POINT_rmtree_islink_rmtree_unsafeCannot call rmtree on a symbolic link_rmtree_safe_fd"Cannot call rmtree on a symbolic ""link"_use_fd_functionsRecursively delete a directory tree. + + If ignore_errors is set, errors are ignored; otherwise, if onerror + is set, it is called to handle the error with arguments (func, + path, exc_info) where func is platform and implementation dependent; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If ignore_errors + is false and onerror is None, an exception is raised. + + shutil.rmtreeavoids_symlink_attacks_basenameRecursively move a file or directory to another location. This is + similar to the Unix "mv" command. Return the file or directory's + destination. + + If the destination is a directory or a symlink to a directory, the source + is moved inside the directory. The destination path must not already + exist. + + If the destination already exists but is not a directory, it may be + overwritten depending on os.rename() semantics. + + If the destination is on our current filesystem, then rename() is used. + Otherwise, src is copied to the destination and then removed. Symlinks are + recreated under the new name if os.rename() fails because of cross + filesystem renames. + + The optional `copy_function` argument is a callable that will be used + to copy the source or it will be delegated to `copytree`. + By default, copy2() is used, but any function that supports the same + signature (like copy()) can be used. + + A lot more could be done here... A look at a mv.c shows a lot of + the issues this implementation glosses over. + + shutil.movereal_dstDestination path '%s' already exists_destinsrcCannot move a directory '%s' into itself '%s'."Cannot move a directory '%s' into itself"" '%s'."_is_immutableCannot move the non-empty directory '%s': Lacking write permission to '%s'."Cannot move the non-empty directory ""'%s': Lacking write permission to '%s'."immutable_states_get_gidReturns a gid, given a group name._get_uidReturns an uid, given a user name._make_tarballbase_nameCreate a (possibly compressed) tar file from all the files under + 'base_dir'. + + 'compress' must be "gzip" (the default), "bzip2", "xz", or None. + + 'owner' and 'group' can be used to define an owner and a group for the + archive that is being built. If not provided, the current owner and group + will be used. + + The output tar file will be named 'base_name' + ".tar", possibly plus + the appropriate compression extension (".gz", ".bz2", or ".xz"). + + Returns the output filename. + tar_compressiongzbad value for 'compress', or compression format not supported : {0}"bad value for 'compress', or compression format not ""supported : {0}"tarfilecompress_extarchive_namearchive_dirCreating tar archiveuid_set_uid_gidtarinfognamew|%star_make_zipfileCreate a zip file from all the files under 'base_dir'. + + The output zip file will be named 'base_name' + ".zip". Returns the + name of the output zip file. + zipfilezip_filenamecreating '%s' and adding '%s' to itZipFileZIP_DEFLATEDzfadding '%s'uncompressed tar file_ARCHIVE_FORMATSgzip'ed tar-filegztarZIP filebzip2'ed tar-filebztarxz'ed tar-filexztarReturns a list of supported formats for archiving and unarchiving. + + Each element of the returned sequence is a tuple (name, description) + Registers an archive format. + + name is the name of the format. function is the callable that will be + used to create archives. If provided, extra_args is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_archive_formats() function. + The %s object is not callableextra_args needs to be a sequenceextra_args elements are : (arg_name, value)root_dirCreate an archive file (eg. zip or tar). + + 'base_name' is the name of the file to create, minus any format-specific + extension; 'format' is the archive format: one of "zip", "tar", "gztar", + "bztar", or "xztar". Or any other registered format. + + 'root_dir' is a directory that will be the root directory of the + archive; ie. we typically chdir into 'root_dir' before creating the + archive. 'base_dir' is the directory where we start archiving from; + ie. 'base_dir' will be the common prefix of all files and + directories in the archive. 'root_dir' and 'base_dir' both default + to the current directory. Returns the name of the archive file. + + 'owner' and 'group' are used when creating a tar archive. By default, + uses the current owner and group. + shutil.make_archivesave_cwdchanging into '%s'format_infounknown archive format '%s'changing back to '%s'Returns a list of supported formats for unpacking. + + Each element of the returned sequence is a tuple + (name, extensions, description) + _UNPACK_FORMATS_check_unpack_optionsChecks what gets registered as an unpacker.existing_extensions%s is already registered for "%s"The registered function must be a callableRegisters an unpack format. + + `name` is the name of the format. `extensions` is a list of extensions + corresponding to the format. + + `function` is the callable that will be + used to unpack archives. The callable will receive archives to unpack. + If it's unable to handle an archive, it needs to raise a ReadError + exception. + + If provided, `extra_args` is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_unpack_formats() function. + Removes the pack format from the registry._ensure_directoryEnsure that the parent directory of `path` exists_unpack_zipfileextract_dirUnpack zip `filename` to `extract_dir` + is_zipfile%s is not a zip fileinfolist_unpack_tarfileUnpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir` + tarobjTarError%s is not a compressed or uncompressed tar fileextractall_find_unpack_formatUnpack an archive. + + `filename` is the name of the archive. + + `extract_dir` is the name of the target directory, where the archive + is unpacked. If not provided, the current working directory is used. + + `format` is the archive format: one of "zip", "tar", "gztar", "bztar", + or "xztar". Or any other registered format. If not provided, + unpack_archive will use the filename extension and see if an unpacker + was registered for that extension. + + In case none is found, a ValueError is raised. + shutil.unpack_archiveUnknown unpack format '{0}'Unknown archive format '{0}'disk_usagetotal used free_ntuple_diskusageTotal space in bytesUsed space in bytesusedFree space in bytesfreeReturn disk usage statistics about the given path. + + Returned value is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + Return disk usage statistics about the given path. + + Returned values is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + _getdiskusageChange owner user and group of the given path. + + user and group can be the uid/gid or the user/group names, and in that case, + they are converted to their respective uid/gid. + shutil.chownuser and/or group must be set_userno such user: {!r}no such group: {!r}Get the size of the terminal window. + + For each of the two dimensions, the environment variable, COLUMNS + and LINES respectively, is checked. If the variable is defined and + the value is a positive integer, it is used. + + When COLUMNS or LINES is not defined, which is the common case, + the terminal connected to sys.__stdout__ is queried + by invoking os.get_terminal_size. + + If the terminal size cannot be successfully queried, either because + the system doesn't support querying, or because we are not + connected to a terminal, the value given in fallback parameter + is used. Fallback defaults to (80, 24) which is the default + size used by many terminal emulators. + + The value returned is a named tuple of type os.terminal_size. + _access_checkGiven a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + use_bytesCS_PATHPATHEXTpathext_sourcepathextthefile# macOS# CMD defaults in Windows 10# disk_usage is added later, if available on the platform# not a regular file# Note: copyfileobj() is left alone in order to not introduce any# unexpected breakage. Possible risks by using zero-copy calls# in copyfileobj() are:# - fdst cannot be open in "a"(ppend) mode# - fsrc and fdst may be open in "t"(ext) mode# - fsrc may be a BufferedReader (which hides unread data in a buffer),# GzipFile (which decompresses data), HTTPResponse (which decodes# chunks).# - possibly others (e.g. encrypted fs/partition?)# Hopefully the whole file will be copied in a single call.# sendfile() is called in a loop 'till EOF is reached (0 return)# so a bufsize smaller or bigger than the actual file size# should not make any difference, also in case the file content# changes while being copied.# min 8MiB# 128MiB# On 32-bit architectures truncate to 1GiB to avoid OverflowError,# see bpo-38319.# ...in oder to have a more informative exception.# sendfile() on this platform (probably Linux < 2.6.33)# does not support copies between regular files (only# sockets).# filesystem is full# Give up on first call and if no data was copied.# Localize variable access to minimize overhead.# Macintosh, Unix.# All other platforms: check for same pathname.# File most likely does not exist# XXX What about other special files? (sockets, devices...)# Windows, see:# https://github.com/python/cpython/pull/7160#discussion_r195405230# follow symlinks (aka don't not follow symlinks)# use the real function if it exists# use the real function only if it exists# *and* it supports follow_symlinks# We must copy extended attributes before the file is (potentially)# chmod()'ed read-only, otherwise setxattr() will error with -EACCES.# if we got a NotImplementedError, it's because# * follow_symlinks=False,# * lchown() is unavailable, and# * either# * fchownat() is unavailable or# * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.# (it returned ENOSUP.)# therefore we're out of options--we simply cannot chown the# symlink. give up, suppress the error.# (which is what shutil always did in this circumstance.)# Special check for directory junctions, which appear as# symlinks but we want to recurse.# We can't just leave it to `copy_function` because legacy# code with a custom `copy_function` may rely on copytree# doing the right thing.# ignore dangling symlink if the flag is on# otherwise let the copy occur. copy2 will raise an error# Will raise a SpecialFileError for unsupported file types# catch the Error from the recursive copytree so that we can# continue with other files# Copying file access times may fail on Windows# Special handling for directory junctions to make them behave like# symlinks for shutil.rmtree, since in general they do not appear as# regular links.# version vulnerable to race conditions# This can only happen if someone replaces# a directory with a symlink after the call to# os.scandir or entry.is_dir above.# Version using fd-based APIs to protect against races# os.scandir or stat.S_ISDIR above.# While the unsafe rmtree works fine on bytes, the fd based does not.# symlinks to directories are forbidden, see bug #1669# can't continue even if onerror hook returns# Allow introspection of whether or not the hardening against symlink# attacks is supported on the current platform# A basename() variant which first strips the trailing slash, if present.# Thus we always get the last component of the path, even for directories.# We might be on a case insensitive filesystem,# perform the rename anyway.# late import for breaking circular dependency# creating the tarball# first make sure no other unpacker is registered for this extension# don't extract absolute paths or ones with .. in them# file# we need to look at the registered unpackers supported extensions# -1 means don't change it# user can either be an int (the uid) or a string (the system username)# columns, lines are the working values# only query if necessary# stdout is None, closed, detached, or not a terminal, or# os.get_terminal_size() is unsupported# Check that a given file can be accessed with the correct mode.# Additionally check that `file` is not a directory, as on Windows# directories pass the os.access check.# If we're given a path with a directory part, look it up directly rather# than referring to PATH directories. This includes checking relative to the# current directory, e.g. ./script# os.confstr() or CS_PATH is not available# bpo-35755: Don't use os.defpath if the PATH environment variable is# set to an empty string# PATH='' doesn't match, whereas PATH=':' looks in the current directory# The current directory takes precedence on Windows.# PATHEXT is necessary to check on Windows.# See if the given file matches any of the expected path extensions.# This will allow us to short circuit when given "python.exe".# If it does match, only test that one, otherwise we have to try# others.# On other platforms you don't have things like PATHEXT to tell you# what file suffixes are executable, so just pass on cmd as-is.b'Utility functions for copying and archiving files and directory trees. + +XXX The functions here don't copy the resource fork or other metadata on Mac. + +'u'Utility functions for copying and archiving files and directory trees. + +XXX The functions here don't copy the resource fork or other metadata on Mac. + +'b'sendfile'u'sendfile'b'_fcopyfile'u'_fcopyfile'b'.COM;.EXE;.BAT;.CMD;.VBS;.JS;.WS;.MSC'u'.COM;.EXE;.BAT;.CMD;.VBS;.JS;.WS;.MSC'b'copyfileobj'u'copyfileobj'b'copyfile'u'copyfile'b'copymode'u'copymode'b'copystat'u'copystat'b'copy2'u'copy2'b'copytree'u'copytree'b'SpecialFileError'u'SpecialFileError'b'ExecError'u'ExecError'b'make_archive'u'make_archive'b'get_archive_formats'u'get_archive_formats'b'register_archive_format'u'register_archive_format'b'unregister_archive_format'u'unregister_archive_format'b'get_unpack_formats'u'get_unpack_formats'b'register_unpack_format'u'register_unpack_format'b'unregister_unpack_format'u'unregister_unpack_format'b'unpack_archive'u'unpack_archive'b'ignore_patterns'u'ignore_patterns'b'which'u'which'b'get_terminal_size'u'get_terminal_size'b'SameFileError'u'SameFileError'b'Raised when source and destination are the same file.'u'Raised when source and destination are the same file.'b'Raised when trying to do a kind of operation (e.g. copying) which is + not supported on a special file (e.g. a named pipe)'u'Raised when trying to do a kind of operation (e.g. copying) which is + not supported on a special file (e.g. a named pipe)'b'Raised when a command could not be executed'u'Raised when a command could not be executed'b'Raised when an archive cannot be read'u'Raised when an archive cannot be read'b'Raised when a registry operation with the archiving + and unpacking registries fails'u'Raised when a registry operation with the archiving + and unpacking registries fails'b'Raised as a signal to fallback on using raw read()/write() + file copy when fast-copy functions fail to do so. + 'u'Raised as a signal to fallback on using raw read()/write() + file copy when fast-copy functions fail to do so. + 'b'Copy a regular file content or metadata by using high-performance + fcopyfile(3) syscall (macOS). + 'u'Copy a regular file content or metadata by using high-performance + fcopyfile(3) syscall (macOS). + 'b'Copy data from one regular mmap-like fd to another by using + high-performance sendfile(2) syscall. + This should work on Linux >= 2.6.33 only. + 'u'Copy data from one regular mmap-like fd to another by using + high-performance sendfile(2) syscall. + This should work on Linux >= 2.6.33 only. + 'b'readinto()/memoryview() based variant of copyfileobj(). + *fsrc* must support readinto() method and both files must be + open in binary mode. + 'u'readinto()/memoryview() based variant of copyfileobj(). + *fsrc* must support readinto() method and both files must be + open in binary mode. + 'b'copy data from file-like object fsrc to file-like object fdst'u'copy data from file-like object fsrc to file-like object fdst'b'Copy data from src to dst in the most efficient way possible. + + If follow_symlinks is not set and src is a symbolic link, a new + symlink will be created instead of copying the file it points to. + + 'u'Copy data from src to dst in the most efficient way possible. + + If follow_symlinks is not set and src is a symbolic link, a new + symlink will be created instead of copying the file it points to. + + 'b'shutil.copyfile'u'shutil.copyfile'b'{!r} and {!r} are the same file'u'{!r} and {!r} are the same file'b'`%s` is a named pipe'u'`%s` is a named pipe'b'Copy mode bits from src to dst. + + If follow_symlinks is not set, symlinks aren't followed if and only + if both `src` and `dst` are symlinks. If `lchmod` isn't available + (e.g. Linux) this method does nothing. + + 'u'Copy mode bits from src to dst. + + If follow_symlinks is not set, symlinks aren't followed if and only + if both `src` and `dst` are symlinks. If `lchmod` isn't available + (e.g. Linux) this method does nothing. + + 'b'shutil.copymode'u'shutil.copymode'b'lchmod'u'lchmod'b'listxattr'u'listxattr'b'Copy extended filesystem attributes from `src` to `dst`. + + Overwrite existing attributes. + + If `follow_symlinks` is false, symlinks won't be followed. + + 'u'Copy extended filesystem attributes from `src` to `dst`. + + Overwrite existing attributes. + + If `follow_symlinks` is false, symlinks won't be followed. + + 'b'Copy file metadata + + Copy the permission bits, last access time, last modification time, and + flags from `src` to `dst`. On Linux, copystat() also copies the "extended + attributes" where possible. The file contents, owner, and group are + unaffected. `src` and `dst` are path-like objects or path names given as + strings. + + If the optional flag `follow_symlinks` is not set, symlinks aren't + followed if and only if both `src` and `dst` are symlinks. + 'u'Copy file metadata + + Copy the permission bits, last access time, last modification time, and + flags from `src` to `dst`. On Linux, copystat() also copies the "extended + attributes" where possible. The file contents, owner, and group are + unaffected. `src` and `dst` are path-like objects or path names given as + strings. + + If the optional flag `follow_symlinks` is not set, symlinks aren't + followed if and only if both `src` and `dst` are symlinks. + 'b'shutil.copystat'u'shutil.copystat'b'st_flags'u'st_flags'b'EOPNOTSUPP'u'EOPNOTSUPP'b'ENOTSUP'u'ENOTSUP'b'Copy data and mode bits ("cp src dst"). Return the file's destination. + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + + If source and destination are the same file, a SameFileError will be + raised. + + 'u'Copy data and mode bits ("cp src dst"). Return the file's destination. + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + + If source and destination are the same file, a SameFileError will be + raised. + + 'b'Copy data and metadata. Return the file's destination. + + Metadata is copied with copystat(). Please see the copystat function + for more information. + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + 'u'Copy data and metadata. Return the file's destination. + + Metadata is copied with copystat(). Please see the copystat function + for more information. + + The destination may be a directory. + + If follow_symlinks is false, symlinks won't be followed. This + resembles GNU's "cp -P src dst". + 'b'Function that can be used as copytree() ignore parameter. + + Patterns is a sequence of glob-style patterns + that are used to exclude files'u'Function that can be used as copytree() ignore parameter. + + Patterns is a sequence of glob-style patterns + that are used to exclude files'b'winerror'u'winerror'b'Recursively copy a directory tree and return the destination directory. + + dirs_exist_ok dictates whether to raise an exception in case dst or any + missing parent directory already exists. + + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. If the file pointed by the symlink doesn't + exist, an exception will be added in the list of errors raised in + an Error exception at the end of the copy process. + + You can set the optional ignore_dangling_symlinks flag to true if you + want to silence this exception. Notice that this has no effect on + platforms that don't support os.symlink. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + The optional copy_function argument is a callable that will be used + to copy each file. It will be called with the source path and the + destination path as arguments. By default, copy2() is used, but any + function that supports the same signature (like copy()) can be used. + + 'u'Recursively copy a directory tree and return the destination directory. + + dirs_exist_ok dictates whether to raise an exception in case dst or any + missing parent directory already exists. + + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. If the file pointed by the symlink doesn't + exist, an exception will be added in the list of errors raised in + an Error exception at the end of the copy process. + + You can set the optional ignore_dangling_symlinks flag to true if you + want to silence this exception. Notice that this has no effect on + platforms that don't support os.symlink. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + The optional copy_function argument is a callable that will be used + to copy each file. It will be called with the source path and the + destination path as arguments. By default, copy2() is used, but any + function that supports the same signature (like copy()) can be used. + + 'b'shutil.copytree'u'shutil.copytree'b'st_file_attributes'u'st_file_attributes'b'Cannot call rmtree on a symbolic link'u'Cannot call rmtree on a symbolic link'b'Recursively delete a directory tree. + + If ignore_errors is set, errors are ignored; otherwise, if onerror + is set, it is called to handle the error with arguments (func, + path, exc_info) where func is platform and implementation dependent; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If ignore_errors + is false and onerror is None, an exception is raised. + + 'u'Recursively delete a directory tree. + + If ignore_errors is set, errors are ignored; otherwise, if onerror + is set, it is called to handle the error with arguments (func, + path, exc_info) where func is platform and implementation dependent; + path is the argument to that function that caused it to fail; and + exc_info is a tuple returned by sys.exc_info(). If ignore_errors + is false and onerror is None, an exception is raised. + + 'b'shutil.rmtree'u'shutil.rmtree'b'Recursively move a file or directory to another location. This is + similar to the Unix "mv" command. Return the file or directory's + destination. + + If the destination is a directory or a symlink to a directory, the source + is moved inside the directory. The destination path must not already + exist. + + If the destination already exists but is not a directory, it may be + overwritten depending on os.rename() semantics. + + If the destination is on our current filesystem, then rename() is used. + Otherwise, src is copied to the destination and then removed. Symlinks are + recreated under the new name if os.rename() fails because of cross + filesystem renames. + + The optional `copy_function` argument is a callable that will be used + to copy the source or it will be delegated to `copytree`. + By default, copy2() is used, but any function that supports the same + signature (like copy()) can be used. + + A lot more could be done here... A look at a mv.c shows a lot of + the issues this implementation glosses over. + + 'u'Recursively move a file or directory to another location. This is + similar to the Unix "mv" command. Return the file or directory's + destination. + + If the destination is a directory or a symlink to a directory, the source + is moved inside the directory. The destination path must not already + exist. + + If the destination already exists but is not a directory, it may be + overwritten depending on os.rename() semantics. + + If the destination is on our current filesystem, then rename() is used. + Otherwise, src is copied to the destination and then removed. Symlinks are + recreated under the new name if os.rename() fails because of cross + filesystem renames. + + The optional `copy_function` argument is a callable that will be used + to copy the source or it will be delegated to `copytree`. + By default, copy2() is used, but any function that supports the same + signature (like copy()) can be used. + + A lot more could be done here... A look at a mv.c shows a lot of + the issues this implementation glosses over. + + 'b'shutil.move'u'shutil.move'b'Destination path '%s' already exists'u'Destination path '%s' already exists'b'Cannot move a directory '%s' into itself '%s'.'u'Cannot move a directory '%s' into itself '%s'.'b'Cannot move the non-empty directory '%s': Lacking write permission to '%s'.'u'Cannot move the non-empty directory '%s': Lacking write permission to '%s'.'b'Returns a gid, given a group name.'u'Returns a gid, given a group name.'b'Returns an uid, given a user name.'u'Returns an uid, given a user name.'b'Create a (possibly compressed) tar file from all the files under + 'base_dir'. + + 'compress' must be "gzip" (the default), "bzip2", "xz", or None. + + 'owner' and 'group' can be used to define an owner and a group for the + archive that is being built. If not provided, the current owner and group + will be used. + + The output tar file will be named 'base_name' + ".tar", possibly plus + the appropriate compression extension (".gz", ".bz2", or ".xz"). + + Returns the output filename. + 'u'Create a (possibly compressed) tar file from all the files under + 'base_dir'. + + 'compress' must be "gzip" (the default), "bzip2", "xz", or None. + + 'owner' and 'group' can be used to define an owner and a group for the + archive that is being built. If not provided, the current owner and group + will be used. + + The output tar file will be named 'base_name' + ".tar", possibly plus + the appropriate compression extension (".gz", ".bz2", or ".xz"). + + Returns the output filename. + 'b'gz'u'gz'b'bad value for 'compress', or compression format not supported : {0}'u'bad value for 'compress', or compression format not supported : {0}'b'Creating tar archive'u'Creating tar archive'b'w|%s'u'w|%s'b'Create a zip file from all the files under 'base_dir'. + + The output zip file will be named 'base_name' + ".zip". Returns the + name of the output zip file. + 'u'Create a zip file from all the files under 'base_dir'. + + The output zip file will be named 'base_name' + ".zip". Returns the + name of the output zip file. + 'b'creating '%s' and adding '%s' to it'u'creating '%s' and adding '%s' to it'b'adding '%s''u'adding '%s''b'uncompressed tar file'u'uncompressed tar file'b'tar'u'tar'b'gzip'ed tar-file'u'gzip'ed tar-file'b'gztar'u'gztar'b'ZIP file'u'ZIP file'b'bzip2'ed tar-file'u'bzip2'ed tar-file'b'bztar'u'bztar'b'xz'ed tar-file'u'xz'ed tar-file'b'xztar'u'xztar'b'Returns a list of supported formats for archiving and unarchiving. + + Each element of the returned sequence is a tuple (name, description) + 'u'Returns a list of supported formats for archiving and unarchiving. + + Each element of the returned sequence is a tuple (name, description) + 'b'Registers an archive format. + + name is the name of the format. function is the callable that will be + used to create archives. If provided, extra_args is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_archive_formats() function. + 'u'Registers an archive format. + + name is the name of the format. function is the callable that will be + used to create archives. If provided, extra_args is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_archive_formats() function. + 'b'The %s object is not callable'u'The %s object is not callable'b'extra_args needs to be a sequence'u'extra_args needs to be a sequence'b'extra_args elements are : (arg_name, value)'u'extra_args elements are : (arg_name, value)'b'Create an archive file (eg. zip or tar). + + 'base_name' is the name of the file to create, minus any format-specific + extension; 'format' is the archive format: one of "zip", "tar", "gztar", + "bztar", or "xztar". Or any other registered format. + + 'root_dir' is a directory that will be the root directory of the + archive; ie. we typically chdir into 'root_dir' before creating the + archive. 'base_dir' is the directory where we start archiving from; + ie. 'base_dir' will be the common prefix of all files and + directories in the archive. 'root_dir' and 'base_dir' both default + to the current directory. Returns the name of the archive file. + + 'owner' and 'group' are used when creating a tar archive. By default, + uses the current owner and group. + 'u'Create an archive file (eg. zip or tar). + + 'base_name' is the name of the file to create, minus any format-specific + extension; 'format' is the archive format: one of "zip", "tar", "gztar", + "bztar", or "xztar". Or any other registered format. + + 'root_dir' is a directory that will be the root directory of the + archive; ie. we typically chdir into 'root_dir' before creating the + archive. 'base_dir' is the directory where we start archiving from; + ie. 'base_dir' will be the common prefix of all files and + directories in the archive. 'root_dir' and 'base_dir' both default + to the current directory. Returns the name of the archive file. + + 'owner' and 'group' are used when creating a tar archive. By default, + uses the current owner and group. + 'b'shutil.make_archive'u'shutil.make_archive'b'changing into '%s''u'changing into '%s''b'dry_run'u'dry_run'b'logger'u'logger'b'unknown archive format '%s''u'unknown archive format '%s''b'owner'u'owner'b'changing back to '%s''u'changing back to '%s''b'Returns a list of supported formats for unpacking. + + Each element of the returned sequence is a tuple + (name, extensions, description) + 'u'Returns a list of supported formats for unpacking. + + Each element of the returned sequence is a tuple + (name, extensions, description) + 'b'Checks what gets registered as an unpacker.'u'Checks what gets registered as an unpacker.'b'%s is already registered for "%s"'u'%s is already registered for "%s"'b'The registered function must be a callable'u'The registered function must be a callable'b'Registers an unpack format. + + `name` is the name of the format. `extensions` is a list of extensions + corresponding to the format. + + `function` is the callable that will be + used to unpack archives. The callable will receive archives to unpack. + If it's unable to handle an archive, it needs to raise a ReadError + exception. + + If provided, `extra_args` is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_unpack_formats() function. + 'u'Registers an unpack format. + + `name` is the name of the format. `extensions` is a list of extensions + corresponding to the format. + + `function` is the callable that will be + used to unpack archives. The callable will receive archives to unpack. + If it's unable to handle an archive, it needs to raise a ReadError + exception. + + If provided, `extra_args` is a sequence of + (name, value) tuples that will be passed as arguments to the callable. + description can be provided to describe the format, and will be returned + by the get_unpack_formats() function. + 'b'Removes the pack format from the registry.'u'Removes the pack format from the registry.'b'Ensure that the parent directory of `path` exists'u'Ensure that the parent directory of `path` exists'b'Unpack zip `filename` to `extract_dir` + 'u'Unpack zip `filename` to `extract_dir` + 'b'%s is not a zip file'u'%s is not a zip file'b'Unpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir` + 'u'Unpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir` + 'b'%s is not a compressed or uncompressed tar file'u'%s is not a compressed or uncompressed tar file'b'Unpack an archive. + + `filename` is the name of the archive. + + `extract_dir` is the name of the target directory, where the archive + is unpacked. If not provided, the current working directory is used. + + `format` is the archive format: one of "zip", "tar", "gztar", "bztar", + or "xztar". Or any other registered format. If not provided, + unpack_archive will use the filename extension and see if an unpacker + was registered for that extension. + + In case none is found, a ValueError is raised. + 'u'Unpack an archive. + + `filename` is the name of the archive. + + `extract_dir` is the name of the target directory, where the archive + is unpacked. If not provided, the current working directory is used. + + `format` is the archive format: one of "zip", "tar", "gztar", "bztar", + or "xztar". Or any other registered format. If not provided, + unpack_archive will use the filename extension and see if an unpacker + was registered for that extension. + + In case none is found, a ValueError is raised. + 'b'shutil.unpack_archive'u'shutil.unpack_archive'b'Unknown unpack format '{0}''u'Unknown unpack format '{0}''b'Unknown archive format '{0}''u'Unknown archive format '{0}''b'disk_usage'u'disk_usage'b'total used free'u'total used free'b'Total space in bytes'u'Total space in bytes'b'Used space in bytes'u'Used space in bytes'b'Free space in bytes'u'Free space in bytes'b'Return disk usage statistics about the given path. + + Returned value is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + 'u'Return disk usage statistics about the given path. + + Returned value is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + 'b'Return disk usage statistics about the given path. + + Returned values is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + 'u'Return disk usage statistics about the given path. + + Returned values is a named tuple with attributes 'total', 'used' and + 'free', which are the amount of total, used and free space, in bytes. + 'b'Change owner user and group of the given path. + + user and group can be the uid/gid or the user/group names, and in that case, + they are converted to their respective uid/gid. + 'u'Change owner user and group of the given path. + + user and group can be the uid/gid or the user/group names, and in that case, + they are converted to their respective uid/gid. + 'b'shutil.chown'u'shutil.chown'b'user and/or group must be set'u'user and/or group must be set'b'no such user: {!r}'u'no such user: {!r}'b'no such group: {!r}'u'no such group: {!r}'b'Get the size of the terminal window. + + For each of the two dimensions, the environment variable, COLUMNS + and LINES respectively, is checked. If the variable is defined and + the value is a positive integer, it is used. + + When COLUMNS or LINES is not defined, which is the common case, + the terminal connected to sys.__stdout__ is queried + by invoking os.get_terminal_size. + + If the terminal size cannot be successfully queried, either because + the system doesn't support querying, or because we are not + connected to a terminal, the value given in fallback parameter + is used. Fallback defaults to (80, 24) which is the default + size used by many terminal emulators. + + The value returned is a named tuple of type os.terminal_size. + 'u'Get the size of the terminal window. + + For each of the two dimensions, the environment variable, COLUMNS + and LINES respectively, is checked. If the variable is defined and + the value is a positive integer, it is used. + + When COLUMNS or LINES is not defined, which is the common case, + the terminal connected to sys.__stdout__ is queried + by invoking os.get_terminal_size. + + If the terminal size cannot be successfully queried, either because + the system doesn't support querying, or because we are not + connected to a terminal, the value given in fallback parameter + is used. Fallback defaults to (80, 24) which is the default + size used by many terminal emulators. + + The value returned is a named tuple of type os.terminal_size. + 'b'Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + 'u'Given a command, mode, and a PATH string, return the path which + conforms to the given mode on the PATH, or None if there is no such + file. + + `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result + of os.environ.get("PATH"), or can be overridden with a custom search + path. + + 'b'CS_PATH'u'CS_PATH'b'PATHEXT'u'PATHEXT'u'shutil'_wraps_IntEnumSignalsSIG_CTRL_HandlersSigmasks_int_to_enumenum_klassConvert a numeric value to an IntEnum member. + If it's not a known member, return the numeric value itself. + _enum_to_intConvert an IntEnum member to a numeric value. + If it's not an IntEnum member return the value itself. + signalnumhowsigs_setsigsetretsigb'Signals'u'Signals'b'SIG_'u'SIG_'b'CTRL_'u'CTRL_'b'Handlers'u'Handlers'b'SIG_DFL'u'SIG_DFL'b'SIG_IGN'u'SIG_IGN'b'Sigmasks'u'Sigmasks'b'SIG_BLOCK'u'SIG_BLOCK'b'SIG_UNBLOCK'u'SIG_UNBLOCK'b'SIG_SETMASK'u'SIG_SETMASK'b'Convert a numeric value to an IntEnum member. + If it's not a known member, return the numeric value itself. + 'u'Convert a numeric value to an IntEnum member. + If it's not a known member, return the numeric value itself. + 'b'Convert an IntEnum member to a numeric value. + If it's not an IntEnum member return the value itself. + 'u'Convert an IntEnum member to a numeric value. + If it's not an IntEnum member return the value itself. + 'b'sigpending'u'sigpending'b'sigwait'u'sigwait'b'valid_signals'u'valid_signals'_InterruptHandlerdefault_handlercalledoriginal_handlerunused_signumunused_frameexpected SIGINT signal handler to be signal.SIG_IGN, signal.SIG_DFL, or a callable object"expected SIGINT signal handler to be ""signal.SIG_IGN, signal.SIG_DFL, or a ""callable object"installed_handler_results_interrupt_handler# Pretend it's signal.default_int_handler instead.# Not quite the same thing as SIG_IGN, but the closest we# can make it: do nothing.# if we aren't the installed handler, then delegate immediately# to the default handlerb'expected SIGINT signal handler to be signal.SIG_IGN, signal.SIG_DFL, or a callable object'u'expected SIGINT signal handler to be signal.SIG_IGN, signal.SIG_DFL, or a callable object'u'unittest.signals'u'signals'This module provides socket operations and some related functions. +On Unix, it supports IP (Internet Protocol) and Unix domain sockets. +On other systems, it only supports IP. Functions specific for a +socket are available as methods of the socket object. + +Functions: + +socket() -- create a new socket object +socketpair() -- create a pair of new socket objects [*] +fromfd() -- create a socket object from an open file descriptor [*] +fromshare() -- create a socket object from data received from socket.share() [*] +gethostname() -- return the current hostname +gethostbyname() -- map a hostname to its IP number +gethostbyaddr() -- map an IP number or hostname to DNS info +getservbyname() -- map a service name and a protocol name to a port number +getprotobyname() -- map a protocol name (e.g. 'tcp') to a number +ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order +htons(), htonl() -- convert 16, 32 bit int from host to network byte order +inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format +inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) +socket.getdefaulttimeout() -- get the default timeout value +socket.setdefaulttimeout() -- set the default timeout value +create_connection() -- connects to an address, with an optional timeout and + optional source address. + + [*] not available on all platforms! + +Special objects: + +SocketType -- type object for socket objects +error -- exception raised for I/O errors +has_ipv6 -- boolean value indicating if IPv6 is supported + +IntEnum constants: + +AF_INET, AF_UNIX -- socket domains (first argument to socket() call) +SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) + +Integer constants: + +Many other constants may be defined; these may be used in calls to +the setsockopt() and getsockopt() methods. +has_dualstack_ipv6AddressFamilySocketKindAF_SOCK_MsgFlagMSG_AddressInfoAI__LOCALHOST_LOCALHOST_V6_intenum_converterConvert a numeric family value to an IntEnum member. + + If it's not a known member, return the numeric value itself. + _realsocketerrorTabSpecified event object handle is invalid.Insufficient memory available.One or more parameters are invalid.Overlapped operation aborted.995Overlapped I/O event object not in signaled state.996Overlapped operation will complete later.997The operation was interrupted.10004A bad file handle was passed.10009Permission denied.10013A fault occurred on the network??10014An invalid operation was attempted.10022Too many open files.10024The socket operation would block10035A blocking operation is already in progress.10036Operation already in progress.10037Socket operation on nonsocket.10038Destination address required.10039Message too long.10040Protocol wrong type for socket.10041Bad protocol option.10042Protocol not supported.10043Socket type not supported.10044Operation not supported.10045Protocol family not supported.10046Address family not supported by protocol family.10047The network address is in use.10048Cannot assign requested address.10049Network is down.10050Network is unreachable.10051Network dropped connection on reset.10052Software caused connection abort.10053The connection has been reset.10054No buffer space available.10055Socket is already connected.10056Socket is not connected.10057The network has been shut down.10058Too many references.10059The operation timed out.10060Connection refused.10061Cannot translate name.10062The name is too long.10063The host is down.10064The host is unreachable.10065Directory not empty.10066Too many processes.10067User quota exceeded.10068Disk quota exceeded.10069Stale file handle reference.10070Item is remote.10071Network subsystem is unavailable.10091Winsock.dll version out of range.10092Successful WSAStartup not yet performed.10093Graceful shutdown in progress.10101No more results from WSALookupServiceNext.10102Call has been canceled.10103Procedure call table is invalid.10104Service provider is invalid.10105Service provider failed to initialize.10106System call failure.10107Service not found.10108Class type not found.1010910110Call was canceled.10111Database query was refused.10112Host not found.11001Nonauthoritative host not found.11002This is a nonrecoverable error.11003Valid name, no data record requested type.QoS receivers.11005QoS senders.11006No QoS senders.11007QoS no receivers.11008QoS request confirmed.11009QoS admission error.11010QoS policy failure.11011QoS bad style.11012QoS bad object.11013QoS traffic control error.11014QoS generic error.11015QoS service type error.11016QoS flowspec error.11017Invalid QoS provider buffer.11018Invalid QoS filter style.1101911020Incorrect QoS filter count.11021Invalid QoS object length.11022Incorrect QoS flow count.11023Unrecognized QoS object.11024Invalid QoS policy object.11025Invalid QoS flow descriptor.11026Invalid QoS provider-specific flowspec.11027Invalid QoS provider-specific filterspec.11028Invalid QoS shape discard mode object.11029Invalid QoS shaping rate object.11030Reserved policy QoS element type.11031_GiveupOnSendfileA subclass of _socket.socket adding the makefile() method._io_refsWrap __repr__() to reveal the real class name and socket + address(es). + <%s.%s%s fd=%i, family=%s, type=%s, proto=%i [closed], laddr=%sraddr, raddr=%sdup() -> socket object + + Duplicate the socket. Return a new socket object connected to the same + system resource. The new socket is non-inheritable. + accept() -> (socket object, address info) + + Wait for an incoming connection. Return a new socket + representing the connection, and the address of the client. + For IP sockets, the address info is a pair (hostaddr, port). + makefile(...) -> an I/O stream connected to the socket + + The arguments are as for io.open() after the filename, except the only + supported mode values are 'r' (default), 'w' and 'b'. + invalid mode %r (only r, w, b allowed)readingrawmodeSocketIOunbuffered streams must be binary_sendfile_use_sendfilesocknonon-blocking sockets are not supportedselector_selectos_sendfiletimed outos.sendfile() not available on this platform_sendfile_use_sendsock_sendsendfile(file[, offset[, count]]) -> sent + + Send a file until EOF is reached by using high-performance + os.sendfile() and return the total number of bytes which + were sent. + *file* must be a regular file object opened in binary mode. + If os.sendfile() is not available (e.g. Windows) or file is + not a regular file socket.send() will be used instead. + *offset* tells from where to start reading the file. + If specified, *count* is the total number of bytes to transmit + as opposed to sending the file until EOF is reached. + File position is updated on return or also in case of error in + which case file.tell() can be used to figure out the number of + bytes which were sent. + The socket must be of SOCK_STREAM type. + Non-blocking sockets are not supported. + _decref_socketios_real_close_ssdetach() -> file descriptor + + Close the socket object without closing the underlying file descriptor. + The object cannot be used after this call, but the file descriptor + can be reused for other purposes. The file descriptor is returned. + Read-only access to the address family for this socket. + Read-only access to the socket type. + get_handle_inheritableset_handle_inheritableGet the inheritable flag of the socketSet the inheritable flag of the socket fromfd(fd, family, type[, proto]) -> socket object + + Create a socket object from a duplicate of the given file + descriptor. The remaining arguments are the same as for socket(). + nfd fromshare(info) -> socket object + + Create a socket object from the bytes object returned by + socket.share(pid). + socketpair([family[, type[, proto]]]) -> (socket object, socket object) + + Create a pair of socket objects from the sockets returned by the platform + socketpair() function. + The arguments are the same as for socket() except the default family is + AF_UNIX if defined on the platform; otherwise, the default is AF_INET. + Only AF_INET and AF_INET6 socket address families are supported"Only AF_INET and AF_INET6 socket address families ""are supported"Only SOCK_STREAM socket type is supportedOnly protocol zero is supportedlsockssocksocketpair([family[, type[, proto]]]) -> (socket object, socket object) +Create a pair of socket objects from the sockets returned by the platform +socketpair() function. +The arguments are the same as for socket() except the default family is AF_UNIX +if defined on the platform; otherwise, the default is AF_INET. +_blocking_errnosRaw I/O implementation for stream sockets. + + This class supports the makefile() method on sockets. It provides + the raw I/O interface on top of a socket object. + invalid mode: %r_reading_writing_timeout_occurredRead up to len(b) bytes into the writable buffer *b* and return + the number of bytes read. If the socket is non-blocking and no bytes + are available, None is returned. + + If *b* is non-empty, a 0 return value indicates that the connection + was shutdown at the other end. + cannot read from timed out objectWrite the given bytes or bytearray object *b* to the socket + and return the number of bytes written. This can be less than + len(b) if not all data could be written. If the socket is + non-blocking and no bytes could be written None is returned. + True if the SocketIO is open for reading. + I/O operation on closed socket.True if the SocketIO is open for writing. + True if the SocketIO is open for seeking. + Return the file descriptor of the underlying socket. + Close the SocketIO object. This doesn't close the underlying + socket, except if all references to it have disappeared. + Get fully qualified domain name from name. + + An empty argument is interpreted as meaning the local host. + + First the hostname returned by gethostbyaddr() is checked, then + possibly existing aliases. In case no FQDN is available, hostname + from gethostname() is returned. + 0.0.0.0ipaddrsConnect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + A host of '' or port 0 tells the OS to use the default. + Return True if the platform supports creating a SOCK_STREAM socket + which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections. + dualstack_ipv6Convenience function which creates a SOCK_STREAM type socket + bound to *address* (a 2-tuple (host, port)) and return the socket + object. + + *family* should be either AF_INET or AF_INET6. + *backlog* is the queue size passed to socket.listen(). + *reuse_port* dictates whether to use the SO_REUSEPORT socket option. + *dualstack_ipv6*: if true and the platform supports it, it will + create an AF_INET6 socket able to accept both IPv4 or IPv6 + connections. When false it will explicitly disable this option on + platforms that enable it by default (e.g. Linux). + + >>> with create_server(('', 8000)) as server: + ... while True: + ... conn, addr = server.accept() + ... # handle new connection + SO_REUSEPORT not supported on this platformdualstack_ipv6 not supported on this platformdualstack_ipv6 requires AF_INET6 family%s (while attempting to bind on address %r)Resolve host and port into list of address info entries. + + Translate the host/port argument into a sequence of 5-tuples that contain + all the necessary arguments for creating a socket connected to that service. + host is a domain name, a string representation of an IPv4/v6 address or + None. port is a string service name such as 'http', a numeric port number or + None. By passing None as the value of host and port, you can pass NULL to + the underlying C API. + + The family, type and proto arguments can be optionally specified in order to + narrow the list of addresses returned. Passing zero as a value for each of + these arguments selects the full range of results. + addrlist# Wrapper module for _socket, providing some additional facilities# implemented in Python.# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for# nicer string representations.# Note that _socket only knows about the integer values. The public interface# in this module understands the enums and translates them back from integers# where needed (e.g. .family property of a socket object).# WSA error codes# WSAEFAULT# For user code address family and type values are IntEnum members, but# for the underlying _socket.socket they're just integers. The# constructor of _socket.socket converts the given argument to an# integer automatically.# Issue #7995: if no default timeout is set and the listening# socket had a (non-zero) timeout, force the new socket in blocking# mode to override platform-specific socket flags inheritance.# XXX refactor to share code?# Truncate to 1GiB to avoid OverflowError, see bpo-38319.# poll/select have the advantage of not requiring any# extra file descriptor, contrarily to epoll/kqueue# (also, they require a single syscall).# localize variable access to minimize overhead# Block until the socket is ready to send some# data; avoids hogging CPU resources.# We can get here for different reasons, the main# one being 'file' is not a regular mmap(2)-like# file, in which case we'll fall back on using# plain send().# This function should not reference any globals. See issue #808164.# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.# We create a connected TCP socket. Note the trick with# setblocking(False) that prevents us from having to create a thread.# On IPv6, ignore flow_info and scope_id# One might wonder why not let FileIO do the job instead. There are two# main reasons why FileIO is not adapted:# - it wouldn't work under Windows (where you can't used read() and# write() on a socket handle)# - it wouldn't work with socket timeouts (FileIO would ignore the# timeout and consider the socket non-blocking)# XXX More docs# XXX what about EINTR?# Break explicitly a reference cycle# Note about Windows. We don't set SO_REUSEADDR because:# 1) It's unnecessary: bind() will succeed even in case of a# previous closed socket on the same address and still in# TIME_WAIT state.# 2) If set, another socket is free to bind() on the same# address, effectively preventing this one from accepting# connections. Also, it may set the process in a state where# it'll no longer respond to any signals or graceful kills.# See: msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx# Fail later on bind(), for platforms which may not# support this option.# We override this function since we want to translate the numeric family# and socket type values to enum constants.b'This module provides socket operations and some related functions. +On Unix, it supports IP (Internet Protocol) and Unix domain sockets. +On other systems, it only supports IP. Functions specific for a +socket are available as methods of the socket object. + +Functions: + +socket() -- create a new socket object +socketpair() -- create a pair of new socket objects [*] +fromfd() -- create a socket object from an open file descriptor [*] +fromshare() -- create a socket object from data received from socket.share() [*] +gethostname() -- return the current hostname +gethostbyname() -- map a hostname to its IP number +gethostbyaddr() -- map an IP number or hostname to DNS info +getservbyname() -- map a service name and a protocol name to a port number +getprotobyname() -- map a protocol name (e.g. 'tcp') to a number +ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order +htons(), htonl() -- convert 16, 32 bit int from host to network byte order +inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format +inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) +socket.getdefaulttimeout() -- get the default timeout value +socket.setdefaulttimeout() -- set the default timeout value +create_connection() -- connects to an address, with an optional timeout and + optional source address. + + [*] not available on all platforms! + +Special objects: + +SocketType -- type object for socket objects +error -- exception raised for I/O errors +has_ipv6 -- boolean value indicating if IPv6 is supported + +IntEnum constants: + +AF_INET, AF_UNIX -- socket domains (first argument to socket() call) +SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) + +Integer constants: + +Many other constants may be defined; these may be used in calls to +the setsockopt() and getsockopt() methods. +'u'This module provides socket operations and some related functions. +On Unix, it supports IP (Internet Protocol) and Unix domain sockets. +On other systems, it only supports IP. Functions specific for a +socket are available as methods of the socket object. + +Functions: + +socket() -- create a new socket object +socketpair() -- create a pair of new socket objects [*] +fromfd() -- create a socket object from an open file descriptor [*] +fromshare() -- create a socket object from data received from socket.share() [*] +gethostname() -- return the current hostname +gethostbyname() -- map a hostname to its IP number +gethostbyaddr() -- map an IP number or hostname to DNS info +getservbyname() -- map a service name and a protocol name to a port number +getprotobyname() -- map a protocol name (e.g. 'tcp') to a number +ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order +htons(), htonl() -- convert 16, 32 bit int from host to network byte order +inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format +inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) +socket.getdefaulttimeout() -- get the default timeout value +socket.setdefaulttimeout() -- set the default timeout value +create_connection() -- connects to an address, with an optional timeout and + optional source address. + + [*] not available on all platforms! + +Special objects: + +SocketType -- type object for socket objects +error -- exception raised for I/O errors +has_ipv6 -- boolean value indicating if IPv6 is supported + +IntEnum constants: + +AF_INET, AF_UNIX -- socket domains (first argument to socket() call) +SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) + +Integer constants: + +Many other constants may be defined; these may be used in calls to +the setsockopt() and getsockopt() methods. +'b'EBADF'u'EBADF'b'EAGAIN'u'EAGAIN'b'EWOULDBLOCK'u'EWOULDBLOCK'b'getfqdn'u'getfqdn'b'create_connection'u'create_connection'b'create_server'u'create_server'b'has_dualstack_ipv6'u'has_dualstack_ipv6'b'AddressFamily'u'AddressFamily'b'SocketKind'u'SocketKind'b'AF_'u'AF_'b'SOCK_'u'SOCK_'b'MsgFlag'u'MsgFlag'b'MSG_'u'MSG_'b'AddressInfo'u'AddressInfo'b'AI_'u'AI_'b'Convert a numeric family value to an IntEnum member. + + If it's not a known member, return the numeric value itself. + 'u'Convert a numeric family value to an IntEnum member. + + If it's not a known member, return the numeric value itself. + 'b'Specified event object handle is invalid.'u'Specified event object handle is invalid.'b'Insufficient memory available.'u'Insufficient memory available.'b'One or more parameters are invalid.'u'One or more parameters are invalid.'b'Overlapped operation aborted.'u'Overlapped operation aborted.'b'Overlapped I/O event object not in signaled state.'u'Overlapped I/O event object not in signaled state.'b'Overlapped operation will complete later.'u'Overlapped operation will complete later.'b'The operation was interrupted.'u'The operation was interrupted.'b'A bad file handle was passed.'u'A bad file handle was passed.'b'Permission denied.'u'Permission denied.'b'A fault occurred on the network??'u'A fault occurred on the network??'b'An invalid operation was attempted.'u'An invalid operation was attempted.'b'Too many open files.'u'Too many open files.'b'The socket operation would block'u'The socket operation would block'b'A blocking operation is already in progress.'u'A blocking operation is already in progress.'b'Operation already in progress.'u'Operation already in progress.'b'Socket operation on nonsocket.'u'Socket operation on nonsocket.'b'Destination address required.'u'Destination address required.'b'Message too long.'u'Message too long.'b'Protocol wrong type for socket.'u'Protocol wrong type for socket.'b'Bad protocol option.'u'Bad protocol option.'b'Protocol not supported.'u'Protocol not supported.'b'Socket type not supported.'u'Socket type not supported.'b'Operation not supported.'u'Operation not supported.'b'Protocol family not supported.'u'Protocol family not supported.'b'Address family not supported by protocol family.'u'Address family not supported by protocol family.'b'The network address is in use.'u'The network address is in use.'b'Cannot assign requested address.'u'Cannot assign requested address.'b'Network is down.'u'Network is down.'b'Network is unreachable.'u'Network is unreachable.'b'Network dropped connection on reset.'u'Network dropped connection on reset.'b'Software caused connection abort.'u'Software caused connection abort.'b'The connection has been reset.'u'The connection has been reset.'b'No buffer space available.'u'No buffer space available.'b'Socket is already connected.'u'Socket is already connected.'b'Socket is not connected.'u'Socket is not connected.'b'The network has been shut down.'u'The network has been shut down.'b'Too many references.'u'Too many references.'b'The operation timed out.'u'The operation timed out.'b'Connection refused.'b'Cannot translate name.'u'Cannot translate name.'b'The name is too long.'u'The name is too long.'b'The host is down.'u'The host is down.'b'The host is unreachable.'u'The host is unreachable.'b'Directory not empty.'u'Directory not empty.'b'Too many processes.'u'Too many processes.'b'User quota exceeded.'u'User quota exceeded.'b'Disk quota exceeded.'u'Disk quota exceeded.'b'Stale file handle reference.'u'Stale file handle reference.'b'Item is remote.'u'Item is remote.'b'Network subsystem is unavailable.'u'Network subsystem is unavailable.'b'Winsock.dll version out of range.'u'Winsock.dll version out of range.'b'Successful WSAStartup not yet performed.'u'Successful WSAStartup not yet performed.'b'Graceful shutdown in progress.'u'Graceful shutdown in progress.'b'No more results from WSALookupServiceNext.'u'No more results from WSALookupServiceNext.'b'Call has been canceled.'u'Call has been canceled.'b'Procedure call table is invalid.'u'Procedure call table is invalid.'b'Service provider is invalid.'u'Service provider is invalid.'b'Service provider failed to initialize.'u'Service provider failed to initialize.'b'System call failure.'u'System call failure.'b'Service not found.'u'Service not found.'b'Class type not found.'u'Class type not found.'b'Call was canceled.'u'Call was canceled.'b'Database query was refused.'u'Database query was refused.'b'Host not found.'u'Host not found.'b'Nonauthoritative host not found.'u'Nonauthoritative host not found.'b'This is a nonrecoverable error.'u'This is a nonrecoverable error.'b'Valid name, no data record requested type.'u'Valid name, no data record requested type.'b'QoS receivers.'u'QoS receivers.'b'QoS senders.'u'QoS senders.'b'No QoS senders.'u'No QoS senders.'b'QoS no receivers.'u'QoS no receivers.'b'QoS request confirmed.'u'QoS request confirmed.'b'QoS admission error.'u'QoS admission error.'b'QoS policy failure.'u'QoS policy failure.'b'QoS bad style.'u'QoS bad style.'b'QoS bad object.'u'QoS bad object.'b'QoS traffic control error.'u'QoS traffic control error.'b'QoS generic error.'u'QoS generic error.'b'QoS service type error.'u'QoS service type error.'b'QoS flowspec error.'u'QoS flowspec error.'b'Invalid QoS provider buffer.'u'Invalid QoS provider buffer.'b'Invalid QoS filter style.'u'Invalid QoS filter style.'b'Incorrect QoS filter count.'u'Incorrect QoS filter count.'b'Invalid QoS object length.'u'Invalid QoS object length.'b'Incorrect QoS flow count.'u'Incorrect QoS flow count.'b'Unrecognized QoS object.'u'Unrecognized QoS object.'b'Invalid QoS policy object.'u'Invalid QoS policy object.'b'Invalid QoS flow descriptor.'u'Invalid QoS flow descriptor.'b'Invalid QoS provider-specific flowspec.'u'Invalid QoS provider-specific flowspec.'b'Invalid QoS provider-specific filterspec.'u'Invalid QoS provider-specific filterspec.'b'Invalid QoS shape discard mode object.'u'Invalid QoS shape discard mode object.'b'Invalid QoS shaping rate object.'u'Invalid QoS shaping rate object.'b'Reserved policy QoS element type.'u'Reserved policy QoS element type.'b'errorTab'u'errorTab'b'A subclass of _socket.socket adding the makefile() method.'u'A subclass of _socket.socket adding the makefile() method.'b'_io_refs'u'_io_refs'b'_closed'u'_closed'b'Wrap __repr__() to reveal the real class name and socket + address(es). + 'u'Wrap __repr__() to reveal the real class name and socket + address(es). + 'b'<%s.%s%s fd=%i, family=%s, type=%s, proto=%i'u'<%s.%s%s fd=%i, family=%s, type=%s, proto=%i'b' [closed]'u' [closed]'b', laddr=%s'u', laddr=%s'b', raddr=%s'u', raddr=%s'b'dup() -> socket object + + Duplicate the socket. Return a new socket object connected to the same + system resource. The new socket is non-inheritable. + 'u'dup() -> socket object + + Duplicate the socket. Return a new socket object connected to the same + system resource. The new socket is non-inheritable. + 'b'accept() -> (socket object, address info) + + Wait for an incoming connection. Return a new socket + representing the connection, and the address of the client. + For IP sockets, the address info is a pair (hostaddr, port). + 'u'accept() -> (socket object, address info) + + Wait for an incoming connection. Return a new socket + representing the connection, and the address of the client. + For IP sockets, the address info is a pair (hostaddr, port). + 'b'makefile(...) -> an I/O stream connected to the socket + + The arguments are as for io.open() after the filename, except the only + supported mode values are 'r' (default), 'w' and 'b'. + 'u'makefile(...) -> an I/O stream connected to the socket + + The arguments are as for io.open() after the filename, except the only + supported mode values are 'r' (default), 'w' and 'b'. + 'b'invalid mode %r (only r, w, b allowed)'u'invalid mode %r (only r, w, b allowed)'b'unbuffered streams must be binary'u'unbuffered streams must be binary'b'non-blocking sockets are not supported'u'non-blocking sockets are not supported'b'timed out'u'timed out'b'os.sendfile() not available on this platform'u'os.sendfile() not available on this platform'b'sendfile(file[, offset[, count]]) -> sent + + Send a file until EOF is reached by using high-performance + os.sendfile() and return the total number of bytes which + were sent. + *file* must be a regular file object opened in binary mode. + If os.sendfile() is not available (e.g. Windows) or file is + not a regular file socket.send() will be used instead. + *offset* tells from where to start reading the file. + If specified, *count* is the total number of bytes to transmit + as opposed to sending the file until EOF is reached. + File position is updated on return or also in case of error in + which case file.tell() can be used to figure out the number of + bytes which were sent. + The socket must be of SOCK_STREAM type. + Non-blocking sockets are not supported. + 'u'sendfile(file[, offset[, count]]) -> sent + + Send a file until EOF is reached by using high-performance + os.sendfile() and return the total number of bytes which + were sent. + *file* must be a regular file object opened in binary mode. + If os.sendfile() is not available (e.g. Windows) or file is + not a regular file socket.send() will be used instead. + *offset* tells from where to start reading the file. + If specified, *count* is the total number of bytes to transmit + as opposed to sending the file until EOF is reached. + File position is updated on return or also in case of error in + which case file.tell() can be used to figure out the number of + bytes which were sent. + The socket must be of SOCK_STREAM type. + Non-blocking sockets are not supported. + 'b'detach() -> file descriptor + + Close the socket object without closing the underlying file descriptor. + The object cannot be used after this call, but the file descriptor + can be reused for other purposes. The file descriptor is returned. + 'u'detach() -> file descriptor + + Close the socket object without closing the underlying file descriptor. + The object cannot be used after this call, but the file descriptor + can be reused for other purposes. The file descriptor is returned. + 'b'Read-only access to the address family for this socket. + 'u'Read-only access to the address family for this socket. + 'b'Read-only access to the socket type. + 'u'Read-only access to the socket type. + 'b'Get the inheritable flag of the socket'u'Get the inheritable flag of the socket'b'Set the inheritable flag of the socket'u'Set the inheritable flag of the socket'b' fromfd(fd, family, type[, proto]) -> socket object + + Create a socket object from a duplicate of the given file + descriptor. The remaining arguments are the same as for socket(). + 'u' fromfd(fd, family, type[, proto]) -> socket object + + Create a socket object from a duplicate of the given file + descriptor. The remaining arguments are the same as for socket(). + 'b' fromshare(info) -> socket object + + Create a socket object from the bytes object returned by + socket.share(pid). + 'u' fromshare(info) -> socket object + + Create a socket object from the bytes object returned by + socket.share(pid). + 'b'fromshare'u'fromshare'b'socketpair'u'socketpair'b'socketpair([family[, type[, proto]]]) -> (socket object, socket object) + + Create a pair of socket objects from the sockets returned by the platform + socketpair() function. + The arguments are the same as for socket() except the default family is + AF_UNIX if defined on the platform; otherwise, the default is AF_INET. + 'u'socketpair([family[, type[, proto]]]) -> (socket object, socket object) + + Create a pair of socket objects from the sockets returned by the platform + socketpair() function. + The arguments are the same as for socket() except the default family is + AF_UNIX if defined on the platform; otherwise, the default is AF_INET. + 'b'Only AF_INET and AF_INET6 socket address families are supported'u'Only AF_INET and AF_INET6 socket address families are supported'b'Only SOCK_STREAM socket type is supported'u'Only SOCK_STREAM socket type is supported'b'Only protocol zero is supported'u'Only protocol zero is supported'b'socketpair([family[, type[, proto]]]) -> (socket object, socket object) +Create a pair of socket objects from the sockets returned by the platform +socketpair() function. +The arguments are the same as for socket() except the default family is AF_UNIX +if defined on the platform; otherwise, the default is AF_INET. +'u'socketpair([family[, type[, proto]]]) -> (socket object, socket object) +Create a pair of socket objects from the sockets returned by the platform +socketpair() function. +The arguments are the same as for socket() except the default family is AF_UNIX +if defined on the platform; otherwise, the default is AF_INET. +'b'Raw I/O implementation for stream sockets. + + This class supports the makefile() method on sockets. It provides + the raw I/O interface on top of a socket object. + 'u'Raw I/O implementation for stream sockets. + + This class supports the makefile() method on sockets. It provides + the raw I/O interface on top of a socket object. + 'b'invalid mode: %r'u'invalid mode: %r'b'Read up to len(b) bytes into the writable buffer *b* and return + the number of bytes read. If the socket is non-blocking and no bytes + are available, None is returned. + + If *b* is non-empty, a 0 return value indicates that the connection + was shutdown at the other end. + 'u'Read up to len(b) bytes into the writable buffer *b* and return + the number of bytes read. If the socket is non-blocking and no bytes + are available, None is returned. + + If *b* is non-empty, a 0 return value indicates that the connection + was shutdown at the other end. + 'b'cannot read from timed out object'u'cannot read from timed out object'b'Write the given bytes or bytearray object *b* to the socket + and return the number of bytes written. This can be less than + len(b) if not all data could be written. If the socket is + non-blocking and no bytes could be written None is returned. + 'u'Write the given bytes or bytearray object *b* to the socket + and return the number of bytes written. This can be less than + len(b) if not all data could be written. If the socket is + non-blocking and no bytes could be written None is returned. + 'b'True if the SocketIO is open for reading. + 'u'True if the SocketIO is open for reading. + 'b'I/O operation on closed socket.'u'I/O operation on closed socket.'b'True if the SocketIO is open for writing. + 'u'True if the SocketIO is open for writing. + 'b'True if the SocketIO is open for seeking. + 'u'True if the SocketIO is open for seeking. + 'b'Return the file descriptor of the underlying socket. + 'u'Return the file descriptor of the underlying socket. + 'b'Close the SocketIO object. This doesn't close the underlying + socket, except if all references to it have disappeared. + 'u'Close the SocketIO object. This doesn't close the underlying + socket, except if all references to it have disappeared. + 'b'Get fully qualified domain name from name. + + An empty argument is interpreted as meaning the local host. + + First the hostname returned by gethostbyaddr() is checked, then + possibly existing aliases. In case no FQDN is available, hostname + from gethostname() is returned. + 'u'Get fully qualified domain name from name. + + An empty argument is interpreted as meaning the local host. + + First the hostname returned by gethostbyaddr() is checked, then + possibly existing aliases. In case no FQDN is available, hostname + from gethostname() is returned. + 'b'0.0.0.0'u'0.0.0.0'b'Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + A host of '' or port 0 tells the OS to use the default. + 'u'Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + A host of '' or port 0 tells the OS to use the default. + 'b'Return True if the platform supports creating a SOCK_STREAM socket + which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections. + 'u'Return True if the platform supports creating a SOCK_STREAM socket + which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections. + 'b'IPV6_V6ONLY'u'IPV6_V6ONLY'b'Convenience function which creates a SOCK_STREAM type socket + bound to *address* (a 2-tuple (host, port)) and return the socket + object. + + *family* should be either AF_INET or AF_INET6. + *backlog* is the queue size passed to socket.listen(). + *reuse_port* dictates whether to use the SO_REUSEPORT socket option. + *dualstack_ipv6*: if true and the platform supports it, it will + create an AF_INET6 socket able to accept both IPv4 or IPv6 + connections. When false it will explicitly disable this option on + platforms that enable it by default (e.g. Linux). + + >>> with create_server(('', 8000)) as server: + ... while True: + ... conn, addr = server.accept() + ... # handle new connection + 'u'Convenience function which creates a SOCK_STREAM type socket + bound to *address* (a 2-tuple (host, port)) and return the socket + object. + + *family* should be either AF_INET or AF_INET6. + *backlog* is the queue size passed to socket.listen(). + *reuse_port* dictates whether to use the SO_REUSEPORT socket option. + *dualstack_ipv6*: if true and the platform supports it, it will + create an AF_INET6 socket able to accept both IPv4 or IPv6 + connections. When false it will explicitly disable this option on + platforms that enable it by default (e.g. Linux). + + >>> with create_server(('', 8000)) as server: + ... while True: + ... conn, addr = server.accept() + ... # handle new connection + 'b'SO_REUSEPORT not supported on this platform'u'SO_REUSEPORT not supported on this platform'b'dualstack_ipv6 not supported on this platform'u'dualstack_ipv6 not supported on this platform'b'dualstack_ipv6 requires AF_INET6 family'u'dualstack_ipv6 requires AF_INET6 family'b'%s (while attempting to bind on address %r)'u'%s (while attempting to bind on address %r)'b'Resolve host and port into list of address info entries. + + Translate the host/port argument into a sequence of 5-tuples that contain + all the necessary arguments for creating a socket connected to that service. + host is a domain name, a string representation of an IPv4/v6 address or + None. port is a string service name such as 'http', a numeric port number or + None. By passing None as the value of host and port, you can pass NULL to + the underlying C API. + + The family, type and proto arguments can be optionally specified in order to + narrow the list of addresses returned. Passing zero as a value for each of + these arguments selects the full range of results. + 'u'Resolve host and port into list of address info entries. + + Translate the host/port argument into a sequence of 5-tuples that contain + all the necessary arguments for creating a socket connected to that service. + host is a domain name, a string representation of an IPv4/v6 address or + None. port is a string service name such as 'http', a numeric port number or + None. By passing None as the value of host and port, you can pass NULL to + the underlying C API. + + The family, type and proto arguments can be optionally specified in order to + narrow the list of addresses returned. Passing zero as a value for each of + these arguments selects the full range of results. + 'Generic socket server classes. + +This module tries to capture the various aspects of defining a server: + +For socket-based servers: + +- address family: + - AF_INET{,6}: IP (Internet Protocol) sockets (default) + - AF_UNIX: Unix domain sockets + - others, e.g. AF_DECNET are conceivable (see +- socket type: + - SOCK_STREAM (reliable stream, e.g. TCP) + - SOCK_DGRAM (datagrams, e.g. UDP) + +For request-based servers (including socket-based): + +- client address verification before further looking at the request + (This is actually a hook for any processing that needs to look + at the request before anything else, e.g. logging) +- how to handle multiple requests: + - synchronous (one request is handled at a time) + - forking (each request is handled by a new process) + - threading (each request is handled by a new thread) + +The classes in this module favor the server type that is simplest to +write: a synchronous TCP/IP server. This is bad class design, but +saves some typing. (There's also the issue that a deep class hierarchy +slows down method lookups.) + +There are five classes in an inheritance diagram, four of which represent +synchronous servers of four types: + + +------------+ + | BaseServer | + +------------+ + | + v + +-----------+ +------------------+ + | TCPServer |------->| UnixStreamServer | + +-----------+ +------------------+ + | + v + +-----------+ +--------------------+ + | UDPServer |------->| UnixDatagramServer | + +-----------+ +--------------------+ + +Note that UnixDatagramServer derives from UDPServer, not from +UnixStreamServer -- the only difference between an IP and a Unix +stream server is the address family, which is simply repeated in both +unix server classes. + +Forking and threading versions of each type of server can be created +using the ForkingMixIn and ThreadingMixIn mix-in classes. For +instance, a threading UDP server class is created as follows: + + class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass + +The Mix-in class must come first, since it overrides a method defined +in UDPServer! Setting the various member variables also changes +the behavior of the underlying server mechanism. + +To implement a service, you must derive a class from +BaseRequestHandler and redefine its handle() method. You can then run +various versions of the service by combining one of the server classes +with your request handler class. + +The request handler class must be different for datagram or stream +services. This can be hidden by using the request handler +subclasses StreamRequestHandler or DatagramRequestHandler. + +Of course, you still have to use your head! + +For instance, it makes no sense to use a forking server if the service +contains state in memory that can be modified by requests (since the +modifications in the child process would never reach the initial state +kept in the parent process and passed to each child). In this case, +you can use a threading server, but you will probably have to use +locks to avoid two requests that come in nearly simultaneous to apply +conflicting changes to the server state. + +On the other hand, if you are building e.g. an HTTP server, where all +data is stored externally (e.g. in the file system), a synchronous +class will essentially render the service "deaf" while one request is +being handled -- which may be for a very long time if a client is slow +to read all the data it has requested. Here a threading or forking +server is appropriate. + +In some cases, it may be appropriate to process part of a request +synchronously, but to finish processing in a forked child depending on +the request data. This can be implemented by using a synchronous +server and doing an explicit fork in the request handler class +handle() method. + +Another approach to handling multiple simultaneous requests in an +environment that supports neither threads nor fork (or where these are +too expensive or inappropriate for the service) is to maintain an +explicit table of partially finished requests and to use a selector to +decide which request to work on next (or whether to handle a new +incoming request). This is particularly important for stream services +where each client can potentially be connected for a long time (if +threads or subprocesses cannot be used). + +Future work: +- Standard classes for Sun RPC (which uses either UDP or TCP) +- Standard mix-in classes to implement various authentication + and encryption schemes + +XXX Open problems: +- What to do with out-of-band data? + +BaseServer: +- split generic "request" functionality out into BaseServer class. + Copyright (C) 2000 Luke Kenneth Casson Leighton + + example: read entries from a SQL database (requires overriding + get_request() to return a table entry from the database). + entry is processed by a RequestHandlerClass. + +0.4BaseServerUDPServerThreadingUDPServerThreadingTCPServerBaseRequestHandlerDatagramRequestHandlerForkingUDPServerForkingTCPServerForkingMixInUnixStreamServerUnixDatagramServerThreadingUnixStreamServerThreadingUnixDatagramServer_ServerSelectorBase class for server classes. + + Methods for the caller: + + - __init__(server_address, RequestHandlerClass) + - serve_forever(poll_interval=0.5) + - shutdown() + - handle_request() # if you do not use serve_forever() + - fileno() -> int # for selector + + Methods that may be overridden: + + - server_bind() + - server_activate() + - get_request() -> request, client_address + - handle_timeout() + - verify_request(request, client_address) + - server_close() + - process_request(request, client_address) + - shutdown_request(request) + - close_request(request) + - service_actions() + - handle_error() + + Methods for derived classes: + + - finish_request(request, client_address) + + Class variables that may be overridden by derived classes or + instances: + + - timeout + - address_family + - socket_type + - allow_reuse_address + + Instance variables: + + - RequestHandlerClass + - socket + + RequestHandlerClassConstructor. May be extended, do not override.__is_shut_down__shutdown_requestCalled by constructor to activate the server. + + May be overridden. + + poll_intervalHandle one request at a time until shutdown. + + Polls for shutdown every poll_interval seconds. Ignores + self.timeout. If you need to do periodic tasks, do them in + another thread. + _handle_request_noblockservice_actionsStops the serve_forever loop. + + Blocks until the loop has finished. This must be called while + serve_forever() is running in another thread, or it will + deadlock. + Called by the serve_forever() loop. + + May be overridden by a subclass / Mixin to implement any code that + needs to be run during the loop. + Handle one request, possibly blocking. + + Respects self.timeout. + handle_timeoutHandle one request, without blocking. + + I assume that selector.select() has returned that the socket is + readable before this function was called, so there should be no risk of + blocking in get_request(). + get_requestverify_requestshutdown_requestCalled if no new request arrives within self.timeout. + + Overridden by ForkingMixIn. + Verify the request. May be overridden. + + Return True if we should proceed with this request. + + Call finish_request. + + Overridden by ForkingMixIn and ThreadingMixIn. + + finish_requestCalled to clean-up the server. + + May be overridden. + + Finish one request by instantiating RequestHandlerClass.Called to shutdown and close an individual request.close_requestCalled to clean up an individual request.Handle an error gracefully. May be overridden. + + The default is to print a traceback and continue. + + Exception happened during processing of request fromBase class for various socket-based server classes. + + Defaults to synchronous IP stream (i.e., TCP). + + Methods for the caller: + + - __init__(server_address, RequestHandlerClass, bind_and_activate=True) + - serve_forever(poll_interval=0.5) + - shutdown() + - handle_request() # if you don't use serve_forever() + - fileno() -> int # for selector + + Methods that may be overridden: + + - server_bind() + - server_activate() + - get_request() -> request, client_address + - handle_timeout() + - verify_request(request, client_address) + - process_request(request, client_address) + - shutdown_request(request) + - close_request(request) + - handle_error() + + Methods for derived classes: + + - finish_request(request, client_address) + + Class variables that may be overridden by derived classes or + instances: + + - timeout + - address_family + - socket_type + - request_queue_size (only for stream sockets) + - allow_reuse_address + + Instance variables: + + - server_address + - RequestHandlerClass + - socket + + socket_typerequest_queue_sizebind_and_activateCalled by constructor to bind the socket. + + May be overridden. + + Return socket file number. + + Interface required by selector. + + Get the request and client address from the socket. + + May be overridden. + + UDP server class.max_packet_sizeclient_addrMix-in class to handle each request in a new process.max_childrenblock_on_closecollect_childrenblockingInternal routine to wait for children that have exited.Wait for zombies after self.timeout seconds of inactivity. + + May be extended, do not override. + Collect the zombie child processes regularly in the ForkingMixIn. + + service_actions is called in the BaseServer's serve_forever loop. + Fork a new subprocess to process the request._Threads + Joinable list of all non-daemon threads. + reap_NoThreads + Degenerate version of _Threads. + Mix-in class to handle each request in a new thread._threadsprocess_request_threadSame as in BaseServer but as a thread. + + In addition, exception handling is done here. + + Start a new thread to process the request.Base class for request handler classes. + + This class is instantiated for each request to be handled. The + constructor sets the instance variables request, client_address + and server, and then calls the handle() method. To implement a + specific service, all you need to do is to derive a class which + defines a handle() method. + + The handle() method can find the request as self.request, the + client address as self.client_address, and the server (in case it + needs access to per-server information) as self.server. Since a + separate instance is created for each request, the handle() method + can define other arbitrary instance variables. + + Define self.rfile and self.wfile for stream sockets.wbufsizedisable_nagle_algorithm_SocketWriterSimple writable BufferedIOBase implementation for a socket + + Does not hold data in a buffer, avoiding any need to call flush().Define self.rfile and self.wfile for datagram sockets.packet# Author of the BaseServer patch: Luke Kenneth Casson Leighton# poll/select have the advantage of not requiring any extra file descriptor,# contrarily to epoll/kqueue (also, they require a single syscall).# XXX: Consider using another file descriptor or connecting to the# socket to wake this up instead of polling. Polling reduces our# responsiveness to a shutdown request and wastes cpu at all other# times.# bpo-35017: shutdown() called during select(), exit immediately.# The distinction between handling, getting, processing and finishing a# request is fairly arbitrary. Remember:# - handle_request() is the top-level call. It calls selector.select(),# get_request(), verify_request() and process_request()# - get_request() is different for stream or datagram sockets# - process_request() is the place that may fork a new process or create a# new thread to finish the request# - finish_request() instantiates the request handler class; this# constructor will handle the request all by itself# Support people who used socket.settimeout() to escape# handle_request before self.timeout was available.# Wait until a request arrives or the timeout expires - the loop is# necessary to accommodate early wakeups due to EINTR.#explicitly shutdown. socket.close() merely releases#the socket and waits for GC to perform the actual close.#some platforms may raise ENOTCONN here# No need to call listen() for UDP.# No need to shutdown anything.# No need to close anything.# If true, server_close() waits until all child processes complete.# If we're above the max number of children, wait and reap them until# we go back below threshold. Note that we use waitpid(-1) below to be# able to collect children in size() syscalls instead# of size(): the downside is that this might reap children# which we didn't spawn, which is why we only resort to this when we're# above max_children.# we don't have any children, we're done# Now reap all defunct children.# if the child hasn't exited yet, pid will be 0 and ignored by# discard() below# someone else reaped it# Parent process# Child process.# This must never return, hence os._exit()!# Decides how threads will act upon termination of the# main process# If true, server_close() waits until all non-daemonic threads terminate.# Threads object# used by server_close() to wait for all threads completion.# The following two classes make it possible to use the same service# class for stream or datagram servers.# Each class sets up these instance variables:# - rfile: a file object from which receives the request is read# - wfile: a file object to which the reply is written# When the handle() method returns, wfile is flushed properly# Default buffer sizes for rfile, wfile.# We default rfile to buffered because otherwise it could be# really slow for large data (a getc() call per byte); we make# wfile unbuffered because (a) often after a write() we want to# read and we need to flush the line; (b) big writes to unbuffered# files are typically optimized by stdio even when big reads# aren't.# A timeout to apply to the request socket, if not None.# Disable nagle algorithm for this socket, if True.# Use only when wbufsize != 0, to avoid small packets.# A final socket error may have occurred here, such as# the local error ECONNABORTED.b'Generic socket server classes. + +This module tries to capture the various aspects of defining a server: + +For socket-based servers: + +- address family: + - AF_INET{,6}: IP (Internet Protocol) sockets (default) + - AF_UNIX: Unix domain sockets + - others, e.g. AF_DECNET are conceivable (see +- socket type: + - SOCK_STREAM (reliable stream, e.g. TCP) + - SOCK_DGRAM (datagrams, e.g. UDP) + +For request-based servers (including socket-based): + +- client address verification before further looking at the request + (This is actually a hook for any processing that needs to look + at the request before anything else, e.g. logging) +- how to handle multiple requests: + - synchronous (one request is handled at a time) + - forking (each request is handled by a new process) + - threading (each request is handled by a new thread) + +The classes in this module favor the server type that is simplest to +write: a synchronous TCP/IP server. This is bad class design, but +saves some typing. (There's also the issue that a deep class hierarchy +slows down method lookups.) + +There are five classes in an inheritance diagram, four of which represent +synchronous servers of four types: + + +------------+ + | BaseServer | + +------------+ + | + v + +-----------+ +------------------+ + | TCPServer |------->| UnixStreamServer | + +-----------+ +------------------+ + | + v + +-----------+ +--------------------+ + | UDPServer |------->| UnixDatagramServer | + +-----------+ +--------------------+ + +Note that UnixDatagramServer derives from UDPServer, not from +UnixStreamServer -- the only difference between an IP and a Unix +stream server is the address family, which is simply repeated in both +unix server classes. + +Forking and threading versions of each type of server can be created +using the ForkingMixIn and ThreadingMixIn mix-in classes. For +instance, a threading UDP server class is created as follows: + + class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass + +The Mix-in class must come first, since it overrides a method defined +in UDPServer! Setting the various member variables also changes +the behavior of the underlying server mechanism. + +To implement a service, you must derive a class from +BaseRequestHandler and redefine its handle() method. You can then run +various versions of the service by combining one of the server classes +with your request handler class. + +The request handler class must be different for datagram or stream +services. This can be hidden by using the request handler +subclasses StreamRequestHandler or DatagramRequestHandler. + +Of course, you still have to use your head! + +For instance, it makes no sense to use a forking server if the service +contains state in memory that can be modified by requests (since the +modifications in the child process would never reach the initial state +kept in the parent process and passed to each child). In this case, +you can use a threading server, but you will probably have to use +locks to avoid two requests that come in nearly simultaneous to apply +conflicting changes to the server state. + +On the other hand, if you are building e.g. an HTTP server, where all +data is stored externally (e.g. in the file system), a synchronous +class will essentially render the service "deaf" while one request is +being handled -- which may be for a very long time if a client is slow +to read all the data it has requested. Here a threading or forking +server is appropriate. + +In some cases, it may be appropriate to process part of a request +synchronously, but to finish processing in a forked child depending on +the request data. This can be implemented by using a synchronous +server and doing an explicit fork in the request handler class +handle() method. + +Another approach to handling multiple simultaneous requests in an +environment that supports neither threads nor fork (or where these are +too expensive or inappropriate for the service) is to maintain an +explicit table of partially finished requests and to use a selector to +decide which request to work on next (or whether to handle a new +incoming request). This is particularly important for stream services +where each client can potentially be connected for a long time (if +threads or subprocesses cannot be used). + +Future work: +- Standard classes for Sun RPC (which uses either UDP or TCP) +- Standard mix-in classes to implement various authentication + and encryption schemes + +XXX Open problems: +- What to do with out-of-band data? + +BaseServer: +- split generic "request" functionality out into BaseServer class. + Copyright (C) 2000 Luke Kenneth Casson Leighton + + example: read entries from a SQL database (requires overriding + get_request() to return a table entry from the database). + entry is processed by a RequestHandlerClass. + +'u'Generic socket server classes. + +This module tries to capture the various aspects of defining a server: + +For socket-based servers: + +- address family: + - AF_INET{,6}: IP (Internet Protocol) sockets (default) + - AF_UNIX: Unix domain sockets + - others, e.g. AF_DECNET are conceivable (see +- socket type: + - SOCK_STREAM (reliable stream, e.g. TCP) + - SOCK_DGRAM (datagrams, e.g. UDP) + +For request-based servers (including socket-based): + +- client address verification before further looking at the request + (This is actually a hook for any processing that needs to look + at the request before anything else, e.g. logging) +- how to handle multiple requests: + - synchronous (one request is handled at a time) + - forking (each request is handled by a new process) + - threading (each request is handled by a new thread) + +The classes in this module favor the server type that is simplest to +write: a synchronous TCP/IP server. This is bad class design, but +saves some typing. (There's also the issue that a deep class hierarchy +slows down method lookups.) + +There are five classes in an inheritance diagram, four of which represent +synchronous servers of four types: + + +------------+ + | BaseServer | + +------------+ + | + v + +-----------+ +------------------+ + | TCPServer |------->| UnixStreamServer | + +-----------+ +------------------+ + | + v + +-----------+ +--------------------+ + | UDPServer |------->| UnixDatagramServer | + +-----------+ +--------------------+ + +Note that UnixDatagramServer derives from UDPServer, not from +UnixStreamServer -- the only difference between an IP and a Unix +stream server is the address family, which is simply repeated in both +unix server classes. + +Forking and threading versions of each type of server can be created +using the ForkingMixIn and ThreadingMixIn mix-in classes. For +instance, a threading UDP server class is created as follows: + + class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass + +The Mix-in class must come first, since it overrides a method defined +in UDPServer! Setting the various member variables also changes +the behavior of the underlying server mechanism. + +To implement a service, you must derive a class from +BaseRequestHandler and redefine its handle() method. You can then run +various versions of the service by combining one of the server classes +with your request handler class. + +The request handler class must be different for datagram or stream +services. This can be hidden by using the request handler +subclasses StreamRequestHandler or DatagramRequestHandler. + +Of course, you still have to use your head! + +For instance, it makes no sense to use a forking server if the service +contains state in memory that can be modified by requests (since the +modifications in the child process would never reach the initial state +kept in the parent process and passed to each child). In this case, +you can use a threading server, but you will probably have to use +locks to avoid two requests that come in nearly simultaneous to apply +conflicting changes to the server state. + +On the other hand, if you are building e.g. an HTTP server, where all +data is stored externally (e.g. in the file system), a synchronous +class will essentially render the service "deaf" while one request is +being handled -- which may be for a very long time if a client is slow +to read all the data it has requested. Here a threading or forking +server is appropriate. + +In some cases, it may be appropriate to process part of a request +synchronously, but to finish processing in a forked child depending on +the request data. This can be implemented by using a synchronous +server and doing an explicit fork in the request handler class +handle() method. + +Another approach to handling multiple simultaneous requests in an +environment that supports neither threads nor fork (or where these are +too expensive or inappropriate for the service) is to maintain an +explicit table of partially finished requests and to use a selector to +decide which request to work on next (or whether to handle a new +incoming request). This is particularly important for stream services +where each client can potentially be connected for a long time (if +threads or subprocesses cannot be used). + +Future work: +- Standard classes for Sun RPC (which uses either UDP or TCP) +- Standard mix-in classes to implement various authentication + and encryption schemes + +XXX Open problems: +- What to do with out-of-band data? + +BaseServer: +- split generic "request" functionality out into BaseServer class. + Copyright (C) 2000 Luke Kenneth Casson Leighton + + example: read entries from a SQL database (requires overriding + get_request() to return a table entry from the database). + entry is processed by a RequestHandlerClass. + +'b'0.4'u'0.4'b'BaseServer'u'BaseServer'b'TCPServer'u'TCPServer'b'UDPServer'u'UDPServer'b'ThreadingUDPServer'u'ThreadingUDPServer'b'ThreadingTCPServer'u'ThreadingTCPServer'b'BaseRequestHandler'u'BaseRequestHandler'b'StreamRequestHandler'u'StreamRequestHandler'b'DatagramRequestHandler'u'DatagramRequestHandler'b'ThreadingMixIn'u'ThreadingMixIn'b'ForkingUDPServer'u'ForkingUDPServer'b'ForkingTCPServer'u'ForkingTCPServer'b'ForkingMixIn'u'ForkingMixIn'b'UnixStreamServer'u'UnixStreamServer'b'UnixDatagramServer'u'UnixDatagramServer'b'ThreadingUnixStreamServer'u'ThreadingUnixStreamServer'b'ThreadingUnixDatagramServer'u'ThreadingUnixDatagramServer'b'Base class for server classes. + + Methods for the caller: + + - __init__(server_address, RequestHandlerClass) + - serve_forever(poll_interval=0.5) + - shutdown() + - handle_request() # if you do not use serve_forever() + - fileno() -> int # for selector + + Methods that may be overridden: + + - server_bind() + - server_activate() + - get_request() -> request, client_address + - handle_timeout() + - verify_request(request, client_address) + - server_close() + - process_request(request, client_address) + - shutdown_request(request) + - close_request(request) + - service_actions() + - handle_error() + + Methods for derived classes: + + - finish_request(request, client_address) + + Class variables that may be overridden by derived classes or + instances: + + - timeout + - address_family + - socket_type + - allow_reuse_address + + Instance variables: + + - RequestHandlerClass + - socket + + 'u'Base class for server classes. + + Methods for the caller: + + - __init__(server_address, RequestHandlerClass) + - serve_forever(poll_interval=0.5) + - shutdown() + - handle_request() # if you do not use serve_forever() + - fileno() -> int # for selector + + Methods that may be overridden: + + - server_bind() + - server_activate() + - get_request() -> request, client_address + - handle_timeout() + - verify_request(request, client_address) + - server_close() + - process_request(request, client_address) + - shutdown_request(request) + - close_request(request) + - service_actions() + - handle_error() + + Methods for derived classes: + + - finish_request(request, client_address) + + Class variables that may be overridden by derived classes or + instances: + + - timeout + - address_family + - socket_type + - allow_reuse_address + + Instance variables: + + - RequestHandlerClass + - socket + + 'b'Constructor. May be extended, do not override.'u'Constructor. May be extended, do not override.'b'Called by constructor to activate the server. + + May be overridden. + + 'u'Called by constructor to activate the server. + + May be overridden. + + 'b'Handle one request at a time until shutdown. + + Polls for shutdown every poll_interval seconds. Ignores + self.timeout. If you need to do periodic tasks, do them in + another thread. + 'u'Handle one request at a time until shutdown. + + Polls for shutdown every poll_interval seconds. Ignores + self.timeout. If you need to do periodic tasks, do them in + another thread. + 'b'Stops the serve_forever loop. + + Blocks until the loop has finished. This must be called while + serve_forever() is running in another thread, or it will + deadlock. + 'u'Stops the serve_forever loop. + + Blocks until the loop has finished. This must be called while + serve_forever() is running in another thread, or it will + deadlock. + 'b'Called by the serve_forever() loop. + + May be overridden by a subclass / Mixin to implement any code that + needs to be run during the loop. + 'u'Called by the serve_forever() loop. + + May be overridden by a subclass / Mixin to implement any code that + needs to be run during the loop. + 'b'Handle one request, possibly blocking. + + Respects self.timeout. + 'u'Handle one request, possibly blocking. + + Respects self.timeout. + 'b'Handle one request, without blocking. + + I assume that selector.select() has returned that the socket is + readable before this function was called, so there should be no risk of + blocking in get_request(). + 'u'Handle one request, without blocking. + + I assume that selector.select() has returned that the socket is + readable before this function was called, so there should be no risk of + blocking in get_request(). + 'b'Called if no new request arrives within self.timeout. + + Overridden by ForkingMixIn. + 'u'Called if no new request arrives within self.timeout. + + Overridden by ForkingMixIn. + 'b'Verify the request. May be overridden. + + Return True if we should proceed with this request. + + 'u'Verify the request. May be overridden. + + Return True if we should proceed with this request. + + 'b'Call finish_request. + + Overridden by ForkingMixIn and ThreadingMixIn. + + 'u'Call finish_request. + + Overridden by ForkingMixIn and ThreadingMixIn. + + 'b'Called to clean-up the server. + + May be overridden. + + 'u'Called to clean-up the server. + + May be overridden. + + 'b'Finish one request by instantiating RequestHandlerClass.'u'Finish one request by instantiating RequestHandlerClass.'b'Called to shutdown and close an individual request.'u'Called to shutdown and close an individual request.'b'Called to clean up an individual request.'u'Called to clean up an individual request.'b'Handle an error gracefully. May be overridden. + + The default is to print a traceback and continue. + + 'u'Handle an error gracefully. May be overridden. + + The default is to print a traceback and continue. + + 'b'Exception happened during processing of request from'u'Exception happened during processing of request from'b'Base class for various socket-based server classes. + + Defaults to synchronous IP stream (i.e., TCP). + + Methods for the caller: + + - __init__(server_address, RequestHandlerClass, bind_and_activate=True) + - serve_forever(poll_interval=0.5) + - shutdown() + - handle_request() # if you don't use serve_forever() + - fileno() -> int # for selector + + Methods that may be overridden: + + - server_bind() + - server_activate() + - get_request() -> request, client_address + - handle_timeout() + - verify_request(request, client_address) + - process_request(request, client_address) + - shutdown_request(request) + - close_request(request) + - handle_error() + + Methods for derived classes: + + - finish_request(request, client_address) + + Class variables that may be overridden by derived classes or + instances: + + - timeout + - address_family + - socket_type + - request_queue_size (only for stream sockets) + - allow_reuse_address + + Instance variables: + + - server_address + - RequestHandlerClass + - socket + + 'u'Base class for various socket-based server classes. + + Defaults to synchronous IP stream (i.e., TCP). + + Methods for the caller: + + - __init__(server_address, RequestHandlerClass, bind_and_activate=True) + - serve_forever(poll_interval=0.5) + - shutdown() + - handle_request() # if you don't use serve_forever() + - fileno() -> int # for selector + + Methods that may be overridden: + + - server_bind() + - server_activate() + - get_request() -> request, client_address + - handle_timeout() + - verify_request(request, client_address) + - process_request(request, client_address) + - shutdown_request(request) + - close_request(request) + - handle_error() + + Methods for derived classes: + + - finish_request(request, client_address) + + Class variables that may be overridden by derived classes or + instances: + + - timeout + - address_family + - socket_type + - request_queue_size (only for stream sockets) + - allow_reuse_address + + Instance variables: + + - server_address + - RequestHandlerClass + - socket + + 'b'Called by constructor to bind the socket. + + May be overridden. + + 'u'Called by constructor to bind the socket. + + May be overridden. + + 'b'Return socket file number. + + Interface required by selector. + + 'u'Return socket file number. + + Interface required by selector. + + 'b'Get the request and client address from the socket. + + May be overridden. + + 'u'Get the request and client address from the socket. + + May be overridden. + + 'b'UDP server class.'u'UDP server class.'b'Mix-in class to handle each request in a new process.'u'Mix-in class to handle each request in a new process.'b'Internal routine to wait for children that have exited.'u'Internal routine to wait for children that have exited.'b'Wait for zombies after self.timeout seconds of inactivity. + + May be extended, do not override. + 'u'Wait for zombies after self.timeout seconds of inactivity. + + May be extended, do not override. + 'b'Collect the zombie child processes regularly in the ForkingMixIn. + + service_actions is called in the BaseServer's serve_forever loop. + 'u'Collect the zombie child processes regularly in the ForkingMixIn. + + service_actions is called in the BaseServer's serve_forever loop. + 'b'Fork a new subprocess to process the request.'u'Fork a new subprocess to process the request.'b' + Joinable list of all non-daemon threads. + 'u' + Joinable list of all non-daemon threads. + 'b' + Degenerate version of _Threads. + 'u' + Degenerate version of _Threads. + 'b'Mix-in class to handle each request in a new thread.'u'Mix-in class to handle each request in a new thread.'b'Same as in BaseServer but as a thread. + + In addition, exception handling is done here. + + 'u'Same as in BaseServer but as a thread. + + In addition, exception handling is done here. + + 'b'Start a new thread to process the request.'u'Start a new thread to process the request.'b'_threads'u'_threads'b'Base class for request handler classes. + + This class is instantiated for each request to be handled. The + constructor sets the instance variables request, client_address + and server, and then calls the handle() method. To implement a + specific service, all you need to do is to derive a class which + defines a handle() method. + + The handle() method can find the request as self.request, the + client address as self.client_address, and the server (in case it + needs access to per-server information) as self.server. Since a + separate instance is created for each request, the handle() method + can define other arbitrary instance variables. + + 'u'Base class for request handler classes. + + This class is instantiated for each request to be handled. The + constructor sets the instance variables request, client_address + and server, and then calls the handle() method. To implement a + specific service, all you need to do is to derive a class which + defines a handle() method. + + The handle() method can find the request as self.request, the + client address as self.client_address, and the server (in case it + needs access to per-server information) as self.server. Since a + separate instance is created for each request, the handle() method + can define other arbitrary instance variables. + + 'b'Define self.rfile and self.wfile for stream sockets.'u'Define self.rfile and self.wfile for stream sockets.'b'Simple writable BufferedIOBase implementation for a socket + + Does not hold data in a buffer, avoiding any need to call flush().'u'Simple writable BufferedIOBase implementation for a socket + + Does not hold data in a buffer, avoiding any need to call flush().'b'Define self.rfile and self.wfile for datagram sockets.'u'Define self.rfile and self.wfile for datagram sockets.'distutils.spawn + +Provides the 'spawn()' function, a front-end to various platform- +specific functions for launching another program in a sub-process. +Also provides the 'find_executable()' to search the path for a given +executable name. +Run another program, specified as a command list 'cmd', in a new process. + + 'cmd' is just the argument list for the new process, ie. + cmd[0] is the program to run and cmd[1:] are the rest of its arguments. + There is no way to run a program with a name different from that of its + executable. + + If 'search_path' is true (the default), the system's executable + search path will be used to find the program; otherwise, cmd[0] + must be the exact path to the executable. If 'dry_run' is true, + the command will not actually be run. + + Raise DistutilsExecError if running the program fails in any way; just + return on success. + _spawn_posix_spawn_ntdon't know how to spawn programs on platform '%s'_nt_quote_argsQuote command-line arguments for DOS/Windows conventions. + + Just wraps every argument which contains blanks in double quotes, and + returns a new argument list. + command %r failed: %scommand %r failed with exit status %d_cfg_target_cfg_target_splitexec_fncur_target$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" during configure'$MACOSX_DEPLOYMENT_TARGET mismatch: ''now "%s" but "%s" during configure'my_msgunable to execute %r: %s +unable to execute %r for unknown reasonscommand %r terminated by signal %dexit_statusunknown error executing %r: termination status %d# cmd is documented as a list, but just in case some code passes a tuple# in, protect our %-formatting code against horrible death# XXX this doesn't seem very robust to me -- but if the Windows guys# say it'll work, I guess I'll have to accept it. (What if an arg# contains quotes? What other magic characters, other than spaces,# have to be escaped? Is there an escaping mechanism other than# quoting?)# either we find one or it stays the same# spawn for NT requires a full path to the .exe# this seems to happen when the command isn't found# and this reflects the command running but failing# ensure that the deployment target of build process is not less# than that used when the interpreter was built. This ensures# extension modules are built with correct compatibility values# in the child# in the parent# Loop until the child either exits or is terminated by a signal# (ie. keep waiting if it's merely stopped)# hey, it succeeded!b'distutils.spawn + +Provides the 'spawn()' function, a front-end to various platform- +specific functions for launching another program in a sub-process. +Also provides the 'find_executable()' to search the path for a given +executable name. +'u'distutils.spawn + +Provides the 'spawn()' function, a front-end to various platform- +specific functions for launching another program in a sub-process. +Also provides the 'find_executable()' to search the path for a given +executable name. +'b'Run another program, specified as a command list 'cmd', in a new process. + + 'cmd' is just the argument list for the new process, ie. + cmd[0] is the program to run and cmd[1:] are the rest of its arguments. + There is no way to run a program with a name different from that of its + executable. + + If 'search_path' is true (the default), the system's executable + search path will be used to find the program; otherwise, cmd[0] + must be the exact path to the executable. If 'dry_run' is true, + the command will not actually be run. + + Raise DistutilsExecError if running the program fails in any way; just + return on success. + 'u'Run another program, specified as a command list 'cmd', in a new process. + + 'cmd' is just the argument list for the new process, ie. + cmd[0] is the program to run and cmd[1:] are the rest of its arguments. + There is no way to run a program with a name different from that of its + executable. + + If 'search_path' is true (the default), the system's executable + search path will be used to find the program; otherwise, cmd[0] + must be the exact path to the executable. If 'dry_run' is true, + the command will not actually be run. + + Raise DistutilsExecError if running the program fails in any way; just + return on success. + 'b'don't know how to spawn programs on platform '%s''u'don't know how to spawn programs on platform '%s''b'Quote command-line arguments for DOS/Windows conventions. + + Just wraps every argument which contains blanks in double quotes, and + returns a new argument list. + 'u'Quote command-line arguments for DOS/Windows conventions. + + Just wraps every argument which contains blanks in double quotes, and + returns a new argument list. + 'b'command %r failed: %s'u'command %r failed: %s'b'command %r failed with exit status %d'u'command %r failed with exit status %d'b'$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" during configure'u'$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" during configure'b'unable to execute %r: %s +'u'unable to execute %r: %s +'b'unable to execute %r for unknown reasons'u'unable to execute %r for unknown reasons'b'command %r terminated by signal %d'u'command %r terminated by signal %d'b'unknown error executing %r: termination status %d'u'unknown error executing %r: termination status %d'u'distutils.spawn'get_command_lineWINEXEWINSERVICEpythonservice.exepython.exe_python_exeis_forking + Return whether commandline indicates we are forking + --multiprocessing-fork + Run code for process object if this in not the main process + spawn_main + Returns prefix of command line used for spawning a child process + from multiprocessing.spawn import spawn_main; spawn_main(%s)pipe_handleparent_pidtracker_fd + Run code specified by data received over pipe + Not forkingSYNCHRONIZEnew_handleopen_osfhandlefrom_parentpreparation_data_check_not_importing_main + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable. + Return info about parent needed by child to unpickle process object + _log_to_stderr_loggerlog_levelsys_argvorig_dirstart_methodmain_modulemain_mod_nameinit_main_from_nameinit_main_from_pathold_main_modules + Try to get current process ready to unpickle process object + _fixup_main_from_name_fixup_main_from_pathcurrent_mainmain_contentipython + Set sys.modules['__main__'] to module at main_path + # Code used to start processes when using the spawn or forkserver# start methods.# multiprocessing/spawn.py# _python_exe is the assumed path to the python executable.# People embedding Python want to modify it.# Figure out whether to initialise main in the subprocess as a module# or through direct execution (or to leave it alone entirely)# Prepare current process# Multiprocessing module helpers to fix up the main module in# spawned subprocesses# __main__.py files for packages, directories, zip archives, etc, run# their "main only" code unconditionally, so we don't even try to# populate anything in __main__, nor do we make any changes to# __main__ attributes# If this process was forked, __main__ may already be populated# Otherwise, __main__ may contain some non-main code where we need to# support unpickling it properly. We rerun it as __mp_main__ and make# the normal __main__ an alias to that# Unfortunately, the main ipython launch script historically had no# "if __name__ == '__main__'" guard, so we work around that# by treating it like a __main__.py file# See https://github.com/ipython/ipython/issues/4698# Otherwise, if __file__ already has the setting we expect,# there's nothing more to do# If the parent process has sent a path through rather than a module# name we assume it is an executable script that may contain# non-main code that needs to be executedb'_main'u'_main'b'freeze_support'u'freeze_support'b'set_executable'u'set_executable'b'get_executable'u'get_executable'b'get_preparation_data'u'get_preparation_data'b'get_command_line'u'get_command_line'b'import_main_path'u'import_main_path'b'pythonservice.exe'u'pythonservice.exe'b'python.exe'u'python.exe'b' + Return whether commandline indicates we are forking + 'u' + Return whether commandline indicates we are forking + 'b'--multiprocessing-fork'u'--multiprocessing-fork'b' + Run code for process object if this in not the main process + 'u' + Run code for process object if this in not the main process + 'b' + Returns prefix of command line used for spawning a child process + 'u' + Returns prefix of command line used for spawning a child process + 'b'from multiprocessing.spawn import spawn_main; spawn_main(%s)'u'from multiprocessing.spawn import spawn_main; spawn_main(%s)'b' + Run code specified by data received over pipe + 'u' + Run code specified by data received over pipe + 'b'Not forking'u'Not forking'b'_inheriting'u'_inheriting'b' + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable.'u' + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable.'b' + Return info about parent needed by child to unpickle process object + 'u' + Return info about parent needed by child to unpickle process object + 'b'log_level'u'log_level'b'init_main_from_name'u'init_main_from_name'b'init_main_from_path'u'init_main_from_path'b' + Try to get current process ready to unpickle process object + 'u' + Try to get current process ready to unpickle process object + 'b'log_to_stderr'u'log_to_stderr'b'sys_argv'u'sys_argv'b'dir'u'dir'b'orig_dir'u'orig_dir'b'start_method'u'start_method'b'ipython'u'ipython'b' + Set sys.modules['__main__'] to module at main_path + 'u' + Set sys.modules['__main__'] to module at main_path + 'u'multiprocessing.spawn'Internal support module for sreSRE module mismatchLITERALNOT_LITERAL_LITERAL_CODESREPEATMIN_REPEATMAX_REPEAT_REPEATING_CODES_SUCCESS_CODESASSERTASSERT_NOT_ASSERT_CODESANYIN_UNIT_CODES0x690x1310x733830x17f0xb50x3bc8370x3450x3b981260x1fbe9120x39081470x1fd39440x3b081630x1fe30x3b29760x3d00x3b50x3f50x3b80x3d10x3ba10080x3f00x3c00x3d60x3c110090x3f10x3c20x3c30x3c69810x3d577770x1e6178350x1e9b642610xfb05642620xfb06_equivalences_ignorecase_fixes_combine_flagsTYPE_FLAGSadd_flagsdel_flagsLITERAL_CODESREPEATING_CODESSUCCESS_CODESASSERT_CODESiscasedtolowerfixesavOP_LOCALE_IGNOREOP_IGNOREOP_UNICODE_IGNOREIN_UNI_IGNORENEGATE_optimize_charsethascasedIN_LOC_IGNOREIN_IGNORE_compile_charsetANY_ALLinternal: unsupported template operator %r_simpleREPEAT_ONEMIN_REPEAT_ONEMAX_UNTILMIN_UNTILgetwidthlook-behind requires fixed-width patternCALLATAT_MULTILINEAT_LOCALEAT_UNICODEtailappendJUMPCATEGORYCH_LOCALECH_UNICODEGROUPREFGROUPREF_LOC_IGNOREGROUPREF_IGNOREGROUPREF_UNI_IGNOREGROUPREF_EXISTSskipyesskipnointernal: unsupported operand type %rRANGERANGE_UNI_IGNORECHARSETBIGCHARSETinternal: unsupported set operator %rfixupcharmap652800xff00runs_mk_bitmap_bytes_to_codes_CODEBITSMAXCODE_BITS_TRANS_generate_overlap_table + Generate an overlap table for the following prefix. + An overlap table is a table of the same size as the prefix which + informs about the potential self-overlap for each index in the prefix: + - if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...] + - if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with + prefix[0:k] + _get_iscased_get_literal_prefixprefixappendprefix_skipflags1prefix_skip1got_all_get_charset_prefixcharsetappend_compile_infoSRE_INFO_PREFIXSRE_INFO_LITERALSRE_INFO_CHARSET_code_hex_code%#0*xdis_print_(to %d)%*d%s print_2OPCODESLITERAL_IGNORENOT_LITERAL_IGNORELITERAL_UNI_IGNORENOT_LITERAL_UNI_IGNORELITERAL_LOC_IGNORENOT_LITERAL_LOC_IGNORE%#02x (%r)ATCODESAT_CHCODESCATEGORY_%#02x %#02x (%r-%r)prefix_len prefix_skip prefix%#02x(%r) overlapgroupindexindexgroup# convert template to internal format# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.# See the sre.py file for information on usage and redistribution.# Sets of lowercase characters which have the same uppercase.# LATIN SMALL LETTER I, LATIN SMALL LETTER DOTLESS I# iı# LATIN SMALL LETTER S, LATIN SMALL LETTER LONG S# sſ# MICRO SIGN, GREEK SMALL LETTER MU# µμ# COMBINING GREEK YPOGEGRAMMENI, GREEK SMALL LETTER IOTA, GREEK PROSGEGRAMMENI# \u0345ιι# GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA# ΐΐ# GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA# ΰΰ# GREEK SMALL LETTER BETA, GREEK BETA SYMBOL# βϐ# GREEK SMALL LETTER EPSILON, GREEK LUNATE EPSILON SYMBOL# εϵ# GREEK SMALL LETTER THETA, GREEK THETA SYMBOL# θϑ# GREEK SMALL LETTER KAPPA, GREEK KAPPA SYMBOL# κϰ# GREEK SMALL LETTER PI, GREEK PI SYMBOL# πϖ# GREEK SMALL LETTER RHO, GREEK RHO SYMBOL# ρϱ# GREEK SMALL LETTER FINAL SIGMA, GREEK SMALL LETTER SIGMA# ςσ# GREEK SMALL LETTER PHI, GREEK PHI SYMBOL# φϕ# LATIN SMALL LETTER S WITH DOT ABOVE, LATIN SMALL LETTER LONG S WITH DOT ABOVE# ṡẛ# LATIN SMALL LIGATURE LONG S T, LATIN SMALL LIGATURE ST# ſtst# Maps the lowercase code to lowercase codes which have the same uppercase.# internal: compile a (sub)pattern# ascii# _compile_info(code, p, _combine_flags(flags, add_flags, del_flags))# look ahead# look behind# _compile_info(code, av, flags)# end of branch# compile charset subprogram# internal: optimize character set# character set contains non-UCS1 character codes# Character set contains non-BMP character codes.# There are only two ranges of cased non-BMP characters:# 10400-1044F (Deseret) and 118A0-118DF (Warang Citi),# and for both ranges RANGE_UNI_IGNORE works.# compress character map# use literal/range# if the case was changed or new representation is more compact# else original character set is good enough# use bitmap# To represent a big charset, first a bitmap of all characters in the# set is constructed. Then, this bitmap is sliced into chunks of 256# characters, duplicate chunks are eliminated, and each chunk is# given a number. In the compiled expression, the charset is# represented by a 32-bit word sequence, consisting of one word for# the number of different chunks, a sequence of 256 bytes (64 words)# of chunk numbers indexed by their original chunk position, and a# sequence of 256-bit chunks (8 words each).# Compression is normally good: in a typical charset, large ranges of# Unicode will be either completely excluded (e.g. if only cyrillic# letters are to be matched), or completely included (e.g. if large# subranges of Kanji match). These ranges will be represented by# chunks of all one-bits or all zero-bits.# Matching can be also done efficiently: the more significant byte of# the Unicode character is an index into the chunk number, and the# less significant byte is a bit index in the chunk (just like the# CHARSET matching).# should be hashable# Convert block indices to word array# check if this subpattern is a "simple" operator# look for literal prefix# internal: compile an info block. in the current version,# this contains min/max pattern width, and an optional literal# prefix or a character map# look for a literal prefix# not used# if no prefix, look for charset prefix## if prefix:## print("*** PREFIX", prefix, prefix_skip)## if charset:## print("*** CHARSET", charset)# add an info block# literal flag# pattern length# add literal prefix# length# skip# generate overlap table# compile info block# compile the pattern# internal: convert pattern list to internal format# map in either directionb'Internal support module for sre'u'Internal support module for sre'b'SRE module mismatch'u'SRE module mismatch'b'internal: unsupported template operator %r'u'internal: unsupported template operator %r'b'look-behind requires fixed-width pattern'u'look-behind requires fixed-width pattern'b'internal: unsupported operand type %r'u'internal: unsupported operand type %r'b'internal: unsupported set operator %r'u'internal: unsupported set operator %r'b' + Generate an overlap table for the following prefix. + An overlap table is a table of the same size as the prefix which + informs about the potential self-overlap for each index in the prefix: + - if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...] + - if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with + prefix[0:k] + 'u' + Generate an overlap table for the following prefix. + An overlap table is a table of the same size as the prefix which + informs about the potential self-overlap for each index in the prefix: + - if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...] + - if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with + prefix[0:k] + 'b'%#0*x'u'%#0*x'b'(to %d)'u'(to %d)'b'%*d%s 'u'%*d%s 'b'%#02x (%r)'u'%#02x (%r)'b'AT_'u'AT_'b'CATEGORY_'u'CATEGORY_'b'%#02x %#02x (%r-%r)'u'%#02x %#02x (%r-%r)'b'branch'u'branch'b'MAXREPEAT'u'MAXREPEAT'b' prefix_skip'u' prefix_skip'b' prefix'u' prefix'b'%#02x'u'%#02x'b'(%r)'u'(%r)'b' overlap'u' overlap'u'sre_compile'Exception raised for invalid regular expressions. + + Attributes: + + msg: The unformatted error message + pattern: The regular expression pattern + pos: The index in the pattern where compilation failed (may be None) + lineno: The line corresponding to pos (may be None) + colno: The column corresponding to pos (may be None) + %s at position %dcolno%s (line %d, column %d)_NamedIntConstant_makecodes + FAILURE SUCCESS + + ANY ANY_ALL + ASSERT ASSERT_NOT + AT + BRANCH + CALL + CATEGORY + CHARSET BIGCHARSET + GROUPREF GROUPREF_EXISTS + IN + INFO + JUMP + LITERAL + MARK + MAX_UNTIL + MIN_UNTIL + NOT_LITERAL + NEGATE + RANGE + REPEAT + REPEAT_ONE + SUBPATTERN + MIN_REPEAT_ONE + + GROUPREF_IGNORE + IN_IGNORE + LITERAL_IGNORE + NOT_LITERAL_IGNORE + + GROUPREF_LOC_IGNORE + IN_LOC_IGNORE + LITERAL_LOC_IGNORE + NOT_LITERAL_LOC_IGNORE + + GROUPREF_UNI_IGNORE + IN_UNI_IGNORE + LITERAL_UNI_IGNORE + NOT_LITERAL_UNI_IGNORE + RANGE_UNI_IGNORE + + MIN_REPEAT MAX_REPEAT + + AT_BEGINNING AT_BEGINNING_LINE AT_BEGINNING_STRING + AT_BOUNDARY AT_NON_BOUNDARY + AT_END AT_END_LINE AT_END_STRING + + AT_LOC_BOUNDARY AT_LOC_NON_BOUNDARY + + AT_UNI_BOUNDARY AT_UNI_NON_BOUNDARY + + CATEGORY_DIGIT CATEGORY_NOT_DIGIT + CATEGORY_SPACE CATEGORY_NOT_SPACE + CATEGORY_WORD CATEGORY_NOT_WORD + CATEGORY_LINEBREAK CATEGORY_NOT_LINEBREAK + + CATEGORY_LOC_WORD CATEGORY_LOC_NOT_WORD + + CATEGORY_UNI_DIGIT CATEGORY_UNI_NOT_DIGIT + CATEGORY_UNI_SPACE CATEGORY_UNI_NOT_SPACE + CATEGORY_UNI_WORD CATEGORY_UNI_NOT_WORD + CATEGORY_UNI_LINEBREAK CATEGORY_UNI_NOT_LINEBREAK +AT_BEGINNING_LINEAT_BEGINNINGAT_END_LINEAT_ENDAT_LOC_BOUNDARYAT_BOUNDARYAT_LOC_NON_BOUNDARYAT_NON_BOUNDARYAT_UNI_BOUNDARYAT_UNI_NON_BOUNDARYCATEGORY_DIGITCATEGORY_NOT_DIGITCATEGORY_SPACECATEGORY_NOT_SPACECATEGORY_LOC_WORDCATEGORY_WORDCATEGORY_LOC_NOT_WORDCATEGORY_NOT_WORDCATEGORY_LINEBREAKCATEGORY_NOT_LINEBREAKCATEGORY_UNI_DIGITCATEGORY_UNI_NOT_DIGITCATEGORY_UNI_SPACECATEGORY_UNI_NOT_SPACECATEGORY_UNI_WORDCATEGORY_UNI_NOT_WORDCATEGORY_UNI_LINEBREAKCATEGORY_UNI_NOT_LINEBREAK#define %s_%s %d +sre_constants.h/* + * Secret Labs' Regular Expression Engine + * + * regular expression matching engine + * + * NOTE: This file is generated by sre_constants.py. If you need + * to change anything in here, edit sre_constants.py and run it. + * + * Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. + * + * See the _sre.c file for information on usage and redistribution. + */ + +#define SRE_MAGIC %d +SRE_OPSRE#define SRE_FLAG_TEMPLATE %d +#define SRE_FLAG_IGNORECASE %d +#define SRE_FLAG_LOCALE %d +#define SRE_FLAG_MULTILINE %d +#define SRE_FLAG_DOTALL %d +#define SRE_FLAG_UNICODE %d +#define SRE_FLAG_VERBOSE %d +#define SRE_FLAG_DEBUG %d +#define SRE_FLAG_ASCII %d +#define SRE_INFO_PREFIX %d +#define SRE_INFO_LITERAL %d +#define SRE_INFO_CHARSET %d +# various symbols used by the regular expression engine.# run this script to update the _sre include files!# update when constants are added or removed# SRE standard exception (access as sre.error)# should this really be here?# operators# failure=0 success=1 (just because it looks better that way :-)# remove MIN_REPEAT and MAX_REPEAT# positions# categories# replacement operations for "ignore case" mode# flags# template mode (disable backtracking)# case insensitive# honour system locale# treat target as multiline string# treat target as a single string# use unicode "locale"# use ascii "locale"# flags for INFO primitive# has prefix# entire pattern is literal (given by prefix)# pattern starts with character from given setb'Exception raised for invalid regular expressions. + + Attributes: + + msg: The unformatted error message + pattern: The regular expression pattern + pos: The index in the pattern where compilation failed (may be None) + lineno: The line corresponding to pos (may be None) + colno: The column corresponding to pos (may be None) + 'u'Exception raised for invalid regular expressions. + + Attributes: + + msg: The unformatted error message + pattern: The regular expression pattern + pos: The index in the pattern where compilation failed (may be None) + lineno: The line corresponding to pos (may be None) + colno: The column corresponding to pos (may be None) + 'b're'b'%s at position %d'u'%s at position %d'b'%s (line %d, column %d)'u'%s (line %d, column %d)'b' + FAILURE SUCCESS + + ANY ANY_ALL + ASSERT ASSERT_NOT + AT + BRANCH + CALL + CATEGORY + CHARSET BIGCHARSET + GROUPREF GROUPREF_EXISTS + IN + INFO + JUMP + LITERAL + MARK + MAX_UNTIL + MIN_UNTIL + NOT_LITERAL + NEGATE + RANGE + REPEAT + REPEAT_ONE + SUBPATTERN + MIN_REPEAT_ONE + + GROUPREF_IGNORE + IN_IGNORE + LITERAL_IGNORE + NOT_LITERAL_IGNORE + + GROUPREF_LOC_IGNORE + IN_LOC_IGNORE + LITERAL_LOC_IGNORE + NOT_LITERAL_LOC_IGNORE + + GROUPREF_UNI_IGNORE + IN_UNI_IGNORE + LITERAL_UNI_IGNORE + NOT_LITERAL_UNI_IGNORE + RANGE_UNI_IGNORE + + MIN_REPEAT MAX_REPEAT +'u' + FAILURE SUCCESS + + ANY ANY_ALL + ASSERT ASSERT_NOT + AT + BRANCH + CALL + CATEGORY + CHARSET BIGCHARSET + GROUPREF GROUPREF_EXISTS + IN + INFO + JUMP + LITERAL + MARK + MAX_UNTIL + MIN_UNTIL + NOT_LITERAL + NEGATE + RANGE + REPEAT + REPEAT_ONE + SUBPATTERN + MIN_REPEAT_ONE + + GROUPREF_IGNORE + IN_IGNORE + LITERAL_IGNORE + NOT_LITERAL_IGNORE + + GROUPREF_LOC_IGNORE + IN_LOC_IGNORE + LITERAL_LOC_IGNORE + NOT_LITERAL_LOC_IGNORE + + GROUPREF_UNI_IGNORE + IN_UNI_IGNORE + LITERAL_UNI_IGNORE + NOT_LITERAL_UNI_IGNORE + RANGE_UNI_IGNORE + + MIN_REPEAT MAX_REPEAT +'b' + AT_BEGINNING AT_BEGINNING_LINE AT_BEGINNING_STRING + AT_BOUNDARY AT_NON_BOUNDARY + AT_END AT_END_LINE AT_END_STRING + + AT_LOC_BOUNDARY AT_LOC_NON_BOUNDARY + + AT_UNI_BOUNDARY AT_UNI_NON_BOUNDARY +'u' + AT_BEGINNING AT_BEGINNING_LINE AT_BEGINNING_STRING + AT_BOUNDARY AT_NON_BOUNDARY + AT_END AT_END_LINE AT_END_STRING + + AT_LOC_BOUNDARY AT_LOC_NON_BOUNDARY + + AT_UNI_BOUNDARY AT_UNI_NON_BOUNDARY +'b' + CATEGORY_DIGIT CATEGORY_NOT_DIGIT + CATEGORY_SPACE CATEGORY_NOT_SPACE + CATEGORY_WORD CATEGORY_NOT_WORD + CATEGORY_LINEBREAK CATEGORY_NOT_LINEBREAK + + CATEGORY_LOC_WORD CATEGORY_LOC_NOT_WORD + + CATEGORY_UNI_DIGIT CATEGORY_UNI_NOT_DIGIT + CATEGORY_UNI_SPACE CATEGORY_UNI_NOT_SPACE + CATEGORY_UNI_WORD CATEGORY_UNI_NOT_WORD + CATEGORY_UNI_LINEBREAK CATEGORY_UNI_NOT_LINEBREAK +'u' + CATEGORY_DIGIT CATEGORY_NOT_DIGIT + CATEGORY_SPACE CATEGORY_NOT_SPACE + CATEGORY_WORD CATEGORY_NOT_WORD + CATEGORY_LINEBREAK CATEGORY_NOT_LINEBREAK + + CATEGORY_LOC_WORD CATEGORY_LOC_NOT_WORD + + CATEGORY_UNI_DIGIT CATEGORY_UNI_NOT_DIGIT + CATEGORY_UNI_SPACE CATEGORY_UNI_NOT_SPACE + CATEGORY_UNI_WORD CATEGORY_UNI_NOT_WORD + CATEGORY_UNI_LINEBREAK CATEGORY_UNI_NOT_LINEBREAK +'b'#define %s_%s %d +'u'#define %s_%s %d +'b'sre_constants.h'u'sre_constants.h'b'/* + * Secret Labs' Regular Expression Engine + * + * regular expression matching engine + * + * NOTE: This file is generated by sre_constants.py. If you need + * to change anything in here, edit sre_constants.py and run it. + * + * Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. + * + * See the _sre.c file for information on usage and redistribution. + */ + +'u'/* + * Secret Labs' Regular Expression Engine + * + * regular expression matching engine + * + * NOTE: This file is generated by sre_constants.py. If you need + * to change anything in here, edit sre_constants.py and run it. + * + * Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. + * + * See the _sre.c file for information on usage and redistribution. + */ + +'b'#define SRE_MAGIC %d +'u'#define SRE_MAGIC %d +'b'SRE_OP'u'SRE_OP'b'SRE'u'SRE'b'#define SRE_FLAG_TEMPLATE %d +'u'#define SRE_FLAG_TEMPLATE %d +'b'#define SRE_FLAG_IGNORECASE %d +'u'#define SRE_FLAG_IGNORECASE %d +'b'#define SRE_FLAG_LOCALE %d +'u'#define SRE_FLAG_LOCALE %d +'b'#define SRE_FLAG_MULTILINE %d +'u'#define SRE_FLAG_MULTILINE %d +'b'#define SRE_FLAG_DOTALL %d +'u'#define SRE_FLAG_DOTALL %d +'b'#define SRE_FLAG_UNICODE %d +'u'#define SRE_FLAG_UNICODE %d +'b'#define SRE_FLAG_VERBOSE %d +'u'#define SRE_FLAG_VERBOSE %d +'b'#define SRE_FLAG_DEBUG %d +'u'#define SRE_FLAG_DEBUG %d +'b'#define SRE_FLAG_ASCII %d +'u'#define SRE_FLAG_ASCII %d +'b'#define SRE_INFO_PREFIX %d +'u'#define SRE_INFO_PREFIX %d +'b'#define SRE_INFO_LITERAL %d +'u'#define SRE_INFO_LITERAL %d +'b'#define SRE_INFO_CHARSET %d +'u'#define SRE_INFO_CHARSET %d +'b'done'u'done'u'sre_constants'.\[{()*+?^$|SPECIAL_CHARS*+?{REPEAT_CHARSDIGITS01234567OCTDIGITS0123456789abcdefABCDEFHEXDIGITSabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZASCIILETTERS + WHITESPACE_REPEATCODES_UNITCODES\a\f\r\t\vESCAPESAT_BEGINNING_STRING\A\B\d\D\s\S\w\WAT_END_STRING\ZCATEGORIESFLAGSGLOBAL_FLAGSVerbosegroupwidthslookbehindgroupstoo many groupsogidredefinition of group name %r as group %d; was group %d"redefinition of group name %r as group %d; ""was group %d"checkgroupchecklookbehindgroupcannot refer to an open groupcannot refer to group defined in the same lookbehind subpattern'cannot refer to group defined in the same ''lookbehind subpattern'seqtypesORcondgroupitem_yesitem_noELSETokenizeristextdecoded_string__nextbad escape (end of pattern)thisgetwhilegetuntilmissing missing %s, unterminated name_class_escapeincomplete escape %smissing {character namecharnameundefined character name %r\N{}0o377octal escape value %s outside of range 0-0o377'octal escape value %s outside of ''range 0-0o377'bad escape %sinvalid group reference %d_uniq_parse_subnesteditemsappendsourcematchsubpatternappendsourceget_ord|)setappendPossible nested set at position %dnegateunterminated character set-&~|Possible set %s at position %dsymmetric differencethatPossible set difference at position %dbad character range %s-%sthe repetition number is too largemin repeat greater than max repeatunsupported quantifier %rnothing to repeatmultiple repeatunexpected end of patterngroup namebad character in group name %runknown group name %runknown extension ?Pmissing ), unterminated comment=!<=!unknown extension ? should move set optimization to compiler!# optimization# charmap optimization can't be added here because# global flags still are not known# repeat previous item# figure out which item to repeat# options# python extensions# named group: skip forward to end of name# named backreference# non-capturing group# comment# lookahead assertions# lookbehind# conditional backreference group# global flags# truncate long regexes# parse group contents# unpack non-capturing groups# Check and fix flags according to the type of pattern (str or bytes)# parse 're' pattern into list of (opcode, argument) tuples# the VERBOSE flag was switched on inside the pattern. to be# on the safe side, we'll parse the whole thing again...# parse 're' replacement string into list of literals and# group references# end of replacement string# group# The tokenizer implicitly decodes bytes objects as latin-1, we must# therefore re-encode the final representation.b'.\[{()*+?^$|'u'.\[{()*+?^$|'b'*+?{'u'*+?{'b'01234567'u'01234567'b'0123456789abcdefABCDEF'u'0123456789abcdefABCDEF'b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'u'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'b' + 'u' + 'b'\a'u'\a'b'\f'u'\f'b'\r'u'\r'b'\t'u'\t'b'\v'u'\v'b'\A'u'\A'b'\B'u'\B'b'\d'u'\d'b'\D'u'\D'b'\s'u'\s'b'\S'u'\S'b'\w'u'\w'b'\W'u'\W'b'\Z'u'\Z'b'too many groups'u'too many groups'b'redefinition of group name %r as group %d; was group %d'u'redefinition of group name %r as group %d; was group %d'b'cannot refer to an open group'u'cannot refer to an open group'b'cannot refer to group defined in the same lookbehind subpattern'u'cannot refer to group defined in the same lookbehind subpattern'b'OR'u'OR'b'ELSE'u'ELSE'b'bad escape (end of pattern)'u'bad escape (end of pattern)'b'missing 'u'missing 'b'missing %s, unterminated name'u'missing %s, unterminated name'b'incomplete escape %s'u'incomplete escape %s'b'missing {'u'missing {'b'character name'u'character name'b'undefined character name %r'u'undefined character name %r'b'\N{}'u'\N{}'b'octal escape value %s outside of range 0-0o377'u'octal escape value %s outside of range 0-0o377'b'bad escape %s'u'bad escape %s'b'invalid group reference %d'u'invalid group reference %d'b'|)'u'|)'b'Possible nested set at position %d'u'Possible nested set at position %d'b'unterminated character set'u'unterminated character set'b'-&~|'u'-&~|'b'Possible set %s at position %d'u'Possible set %s at position %d'b'difference'u'difference'b'intersection'u'intersection'b'symmetric difference'u'symmetric difference'b'union'u'union'b'Possible set difference at position %d'u'Possible set difference at position %d'b'bad character range %s-%s'u'bad character range %s-%s'b'the repetition number is too large'u'the repetition number is too large'b'min repeat greater than max repeat'u'min repeat greater than max repeat'b'unsupported quantifier %r'u'unsupported quantifier %r'b'nothing to repeat'u'nothing to repeat'b'multiple repeat'u'multiple repeat'b'unexpected end of pattern'u'unexpected end of pattern'b'group name'u'group name'b'bad character in group name %r'u'bad character in group name %r'b'unknown group name %r'u'unknown group name %r'b'unknown extension ?P'u'unknown extension ?P'b'missing ), unterminated comment'u'missing ), unterminated comment'b'=!<'u'=!<'b'=!'u'=!'b'unknown extension ?<'u'unknown extension ?<'b'missing ), unterminated subpattern'u'missing ), unterminated subpattern'b'bad group number'u'bad group number'b'conditional backref with more than two branches'u'conditional backref with more than two branches'b'Flags not at the start of the expression %r%s'u'Flags not at the start of the expression %r%s'b' (truncated)'u' (truncated)'b'unknown extension ?'u'unknown extension ?'b'unsupported special character %r'u'unsupported special character %r'b'bad inline flags: cannot use 'L' flag with a str pattern'u'bad inline flags: cannot use 'L' flag with a str pattern'b'bad inline flags: cannot use 'u' flag with a bytes pattern'u'bad inline flags: cannot use 'u' flag with a bytes pattern'b'bad inline flags: flags 'a', 'u' and 'L' are incompatible'u'bad inline flags: flags 'a', 'u' and 'L' are incompatible'b'missing -, : or )'u'missing -, : or )'b')-:'u')-:'b'unknown flag'u'unknown flag'b'bad inline flags: cannot turn on global flag'u'bad inline flags: cannot turn on global flag'b'missing flag'u'missing flag'b'bad inline flags: cannot turn off flags 'a', 'u' and 'L''u'bad inline flags: cannot turn off flags 'a', 'u' and 'L''b'missing :'u'missing :'b'bad inline flags: cannot turn off global flag'u'bad inline flags: cannot turn off global flag'b'bad inline flags: flag turned on and off'u'bad inline flags: flag turned on and off'b'cannot use LOCALE flag with a str pattern'u'cannot use LOCALE flag with a str pattern'b'ASCII and UNICODE flags are incompatible'u'ASCII and UNICODE flags are incompatible'b'cannot use UNICODE flag with a bytes pattern'u'cannot use UNICODE flag with a bytes pattern'b'ASCII and LOCALE flags are incompatible'u'ASCII and LOCALE flags are incompatible'b'unbalanced parenthesis'u'unbalanced parenthesis'b'missing <'u'missing <'u'sre_parse'This module provides some more Pythonic support for SSL. + +Object types: + + SSLSocket -- subtype of socket.socket which does SSL over the socket + +Exceptions: + + SSLError -- exception raised for I/O errors + +Functions: + + cert_time_to_seconds -- convert time string used for certificate + notBefore and notAfter functions to integer + seconds past the Epoch (the time values + returned from time.time()) + + fetch_server_certificate (HOST, PORT) -- fetch the certificate provided + by the server running on HOST at port PORT. No + validation of the certificate is performed. + +Integer constants: + +SSL_ERROR_ZERO_RETURN +SSL_ERROR_WANT_READ +SSL_ERROR_WANT_WRITE +SSL_ERROR_WANT_X509_LOOKUP +SSL_ERROR_SYSCALL +SSL_ERROR_SSL +SSL_ERROR_WANT_CONNECT + +SSL_ERROR_EOF +SSL_ERROR_INVALID_ERROR_CODE + +The following group define certificate requirements that one side is +allowing/requiring from the other side: + +CERT_NONE - no certificates from the other side are required (or will + be looked at if provided) +CERT_OPTIONAL - certificates are not required, but if provided will be + validated, and if validation fails, the connection will + also fail +CERT_REQUIRED - certificates are required, and will be validated, and + if validation fails, the connection will also fail + +The following constants identify various SSL protocol variants: + +PROTOCOL_SSLv2 +PROTOCOL_SSLv3 +PROTOCOL_SSLv23 +PROTOCOL_TLS +PROTOCOL_TLS_CLIENT +PROTOCOL_TLS_SERVER +PROTOCOL_TLSv1 +PROTOCOL_TLSv1_1 +PROTOCOL_TLSv1_2 + +The following constants identify various SSL alert message descriptions as per +http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6 + +ALERT_DESCRIPTION_CLOSE_NOTIFY +ALERT_DESCRIPTION_UNEXPECTED_MESSAGE +ALERT_DESCRIPTION_BAD_RECORD_MAC +ALERT_DESCRIPTION_RECORD_OVERFLOW +ALERT_DESCRIPTION_DECOMPRESSION_FAILURE +ALERT_DESCRIPTION_HANDSHAKE_FAILURE +ALERT_DESCRIPTION_BAD_CERTIFICATE +ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE +ALERT_DESCRIPTION_CERTIFICATE_REVOKED +ALERT_DESCRIPTION_CERTIFICATE_EXPIRED +ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN +ALERT_DESCRIPTION_ILLEGAL_PARAMETER +ALERT_DESCRIPTION_UNKNOWN_CA +ALERT_DESCRIPTION_ACCESS_DENIED +ALERT_DESCRIPTION_DECODE_ERROR +ALERT_DESCRIPTION_DECRYPT_ERROR +ALERT_DESCRIPTION_PROTOCOL_VERSION +ALERT_DESCRIPTION_INSUFFICIENT_SECURITY +ALERT_DESCRIPTION_INTERNAL_ERROR +ALERT_DESCRIPTION_USER_CANCELLED +ALERT_DESCRIPTION_NO_RENEGOTIATION +ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION +ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +ALERT_DESCRIPTION_UNRECOGNIZED_NAME +ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE +ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE +ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY +_Enum_IntFlag_txt2obj_nid2objRAND_egd_SSLMethodPROTOCOL_OP_AlertDescriptionALERT_DESCRIPTION_SSLErrorNumberSSL_ERROR_VerifyFlagsVERIFY_VerifyModeCERT__PROTOCOL_NAMESPROTOCOL_SSLv2_SSLv2_IF_EXISTSTLSVersionMINIMUM_SUPPORTEDSSLv3TLSv1TLSv1_1TLSv1_2TLSv1_3MAXIMUM_SUPPORTED_TLSContentTypeContent types (record layer) + + See RFC 8446, section B.1 + CHANGE_CIPHER_SPECALERTHANDSHAKEAPPLICATION_DATAHEADER0x101INNER_CONTENT_TYPE_TLSAlertTypeAlert types for TLSContentType.ALERT messages + + See RFC 8466, section B.2 + CLOSE_NOTIFYUNEXPECTED_MESSAGEBAD_RECORD_MACDECRYPTION_FAILEDRECORD_OVERFLOWDECOMPRESSION_FAILUREHANDSHAKE_FAILURENO_CERTIFICATEBAD_CERTIFICATEUNSUPPORTED_CERTIFICATECERTIFICATE_REVOKEDCERTIFICATE_EXPIREDCERTIFICATE_UNKNOWNILLEGAL_PARAMETERUNKNOWN_CAACCESS_DENIEDDECODE_ERRORDECRYPT_ERROREXPORT_RESTRICTIONPROTOCOL_VERSIONINSUFFICIENT_SECURITYINAPPROPRIATE_FALLBACKUSER_CANCELEDNO_RENEGOTIATIONMISSING_EXTENSIONUNSUPPORTED_EXTENSIONCERTIFICATE_UNOBTAINABLEUNRECOGNIZED_NAMEBAD_CERTIFICATE_STATUS_RESPONSEBAD_CERTIFICATE_HASH_VALUEUNKNOWN_PSK_IDENTITYCERTIFICATE_REQUIRED120NO_APPLICATION_PROTOCOL_TLSMessageTypeMessage types (handshake protocol) + + See RFC 8446, section B.3 + HELLO_REQUESTCLIENT_HELLOSERVER_HELLOHELLO_VERIFY_REQUESTNEWSESSION_TICKETEND_OF_EARLY_DATAHELLO_RETRY_REQUESTENCRYPTED_EXTENSIONSCERTIFICATESERVER_KEY_EXCHANGECERTIFICATE_REQUESTSERVER_DONECERTIFICATE_VERIFYCLIENT_KEY_EXCHANGECERTIFICATE_URLCERTIFICATE_STATUSSUPPLEMENTAL_DATAKEY_UPDATENEXT_PROTOMESSAGE_HASH0x0101enum_certificatesenum_crlssocket_errortls-uniqueCHANNEL_BINDING_TYPESHAS_NEVER_CHECK_COMMON_NAME_RESTRICTED_SERVER_CIPHERSCertificateError_dnsname_matchMatching according to RFC 6125, section 6.4.3 + + - Hostnames are compared lower case. + - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE). + - Partial wildcards like 'www*.example.org', multiple wildcards, sole + wildcard or wildcards in labels other then the left-most label are not + supported and a CertificateError is raised. + - A wildcard must match at least one character. + too many wildcards in certificate DNS name: {!r}.dn_leftmostdn_remainderwildcard can only be present in the leftmost label: {!r}."wildcard can only be present in the leftmost label: ""{!r}."sole wildcard without additional labels are not support: {!r}."sole wildcard without additional labels are not support: "partial wildcards in leftmost label are not supported: {!r}."partial wildcards in leftmost label are not supported: "hostname_leftmosthostname_remainder_inet_patonipnameTry to convert an IP address to packed binary form + + Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 + support. + {!r} is not a quad-dotted IPv4 address.{!r} is neither an IPv4 nor an IP6 address."{!r} is neither an IPv4 nor an IP6 ""address."{!r} is not an IPv4 address._ipaddress_matchcert_ipaddresshost_ipExact matching of IP addresses. + + RFC 6125 explicitly doesn't define an algorithm for this + (section 1.7.2 - "Out of Scope"). + ipmatch_hostnamecertVerify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed. + + The function matches IP addresses rather than dNSNames if hostname is a + valid ipaddress string. IPv4 addresses are supported on all platforms. + IPv6 addresses are supported on platforms with IPv6 support (AF_INET6 + and inet_pton). + + CertificateError is raised on failure. On success, the function + returns nothing. + empty or no certificate, match_hostname needs a SSL socket or SSL context with either CERT_OPTIONAL or CERT_REQUIRED"empty or no certificate, match_hostname needs a ""SSL socket or SSL context with either ""CERT_OPTIONAL or CERT_REQUIRED"dnsnamessubjectAltNamesanDNSIP AddresscommonNamehostname %r doesn't match either of %s"hostname %r ""doesn't match either of %s"hostname %r doesn't match %r"doesn't match %r"no appropriate commonName or subjectAltName fields were found"no appropriate commonName or ""subjectAltName fields were found"DefaultVerifyPathscafile capath openssl_cafile_env openssl_cafile openssl_capath_env openssl_capath"cafile capath openssl_cafile_env openssl_cafile openssl_capath_env ""openssl_capath"Return paths to default cafile and capath. + _ASN1Objectnid shortname longname oidASN.1 object identifier lookup + oidfromnidnidCreate _ASN1Object from OpenSSL numeric ID + Create _ASN1Object from short name, long name or OID + SSLContext purpose flags with X509v3 Extended Key Usage objects + 1.3.6.1.5.5.7.3.11.3.6.1.5.5.7.3.2CLIENT_AUTHAn SSLContext holds various SSL-related configuration options and + data, such as certificates and possibly a private key.CAROOT_windows_cert_storessslsocket_classsslobject_class_encode_hostnamedo_handshake_on_connectsuppress_ragged_eofswrap_bioincomingoutgoingset_npn_protocolsnpn_protocolsprotosNPN protocols must be 1 to 255 in lengthset_servername_callbackserver_name_callbacknot a callable objectshim_cbsslobjservernamesslctxset_alpn_protocolsalpn_protocolsALPN protocols must be 1 to 255 in length_load_windows_store_certsstorenamepurposecertstrustx509_asnunable to enumerate Windows certificate storecadataload_default_certshostname_checks_common_namencsTLS message callback + + The message callback provides a debugging hook to analyze TLS + connections. The callback is called for any TLS protocol message + (header, handshake, alert, and more), but not for application data. + Due to technical limitations, the callback can't be used to filter + traffic or to abort a connection. Any exception raised in the + callback is delayed until the handshake, read, or write operation + has been performed. + + def msg_cb(conn, direction, version, content_type, msg_type, data): + pass + + conn + :class:`SSLSocket` or :class:`SSLObject` instance + direction + ``read`` or ``write`` + version + :class:`TLSVersion` enum member or int for unknown version. For a + frame header, it's the header version. + content_type + :class:`_TLSContentType` enum member or int for unsupported + content type. + msg_type + Either a :class:`_TLSContentType` enum number for a header + message, a :class:`_TLSAlertType` enum member for an alert + message, a :class:`_TLSMessageType` enum member for other + messages, or int for unsupported message types. + data + Raw, decrypted message content as bytes + is not callable.msg_typemsg_enumCreate a SSLContext object with default settings. + + NOTE: The protocol and settings may change anytime without prior + deprecation. The values represent a fair balance between maximum + compatibility and security. + SSLKEYLOGFILEkeylogfile_create_unverified_contextcert_reqsCreate a SSLContext object for Python stdlib modules + + All Python stdlib modules shall use this function to create SSLContext + objects in order to keep common settings in one place. The configuration + is less restrict than create_default_context()'s to increase backward + compatibility. + certfile must be specifiedSSLObjectThis class implements an interface on top of a low-level SSL object as + implemented by OpenSSL. This object captures the state of an SSL connection + but does not provide any network IO itself. IO needs to be performed + through separate "BIO" objects which are OpenSSL's IO abstraction layer. + + This class does not have a public constructor. Instances are returned by + ``SSLContext.wrap_bio``. This class is typically used by framework authors + that want to implement asynchronous IO for SSL through memory buffers. + + When compared to ``SSLSocket``, this object lacks the following features: + + * Any form of network IO, including methods such as ``recv`` and ``send``. + * The ``do_handshake_on_connect`` and ``suppress_ragged_eofs`` machinery. + does not have a public constructor. Instances are returned by SSLContext.wrap_bio()." does not have a public ""constructor. Instances are returned by SSLContext.wrap_bio()."_sslobjThe SSLContext that is currently in use.The SSLSession for client socket.Was the client session reused during handshakeWhether this is a server-side socket.The currently set server hostname (for SNI), or ``None`` if no + server hostname is set.Read up to 'len' bytes from the SSL object and return them. + + If 'buffer' is provided, read into this buffer and return the number of + bytes read. + Write 'data' to the SSL object and return the number of bytes + written. + + The 'data' argument must support the buffer interface. + binary_formReturns a formatted version of the data in the certificate provided + by the other end of the SSL channel. + + Return None if no certificate was provided, {} if a certificate was + provided, but not validated. + selected_npn_protocolReturn the currently selected NPN protocol as a string, or ``None`` + if a next protocol was not negotiated or if NPN is not supported by one + of the peers.Return the currently selected ALPN protocol as a string, or ``None`` + if a next protocol was not negotiated or if ALPN is not supported by one + of the peers.Return the currently selected cipher as a 3-tuple ``(name, + ssl_version, secret_bits)``.Return a list of ciphers shared by the client during the handshake or + None if this is not a valid server connection. + Return the current compression algorithm in use, or ``None`` if + compression was not negotiated or not supported by one of the peers.Return the number of bytes that can be read immediately.Start the SSL/TLS handshake.Start the SSL shutdown handshake.cb_typeGet channel binding data for current connection. Raise ValueError + if the requested `cb_type` is not supported. Return bytes of the data + or None if the data is not available (e.g. before the handshake).Return a string identifying the protocol version used by the + current SSL channel. _sslcopydocCopy docstring from SSLObject to SSLSocketThis class implements a subtype of socket.socket that wraps + the underlying OS socket in an SSL context when necessary, and + provides read and write methods over that channel. does not have a public constructor. Instances are returned by SSLContext.wrap_socket()."constructor. Instances are returned by ""SSLContext.wrap_socket()."only stream sockets are supportedserver_hostname can only be specified in client mode"server_hostname can only be specified ""in client mode"session can only be specified in client mode"session can only be specified in ""client mode"check_hostname requires server_hostname_sessionconnected_connecteddo_handshake_on_connect should not be specified for non-blocking socketsCan't dup() %s instances_check_connectedRead up to LEN bytes and return them. + Return zero-length string on EOF.Read on closed or unwrapped SSL socket.Write DATA to the underlying SSL channel. Returns + number of bytes of DATA actually transmitted.Write on closed or unwrapped SSL socket.non-zero flags not allowed in calls to send() on %sflags_or_addrsendto not allowed on instances of %ssendmsg not allowed on instances of %snon-zero flags not allowed in calls to sendall() on %sSend a file, possibly by using os.sendfile() if this is a + clear-text socket. Return the total number of bytes sent. + non-zero flags not allowed in calls to recv() on %snon-zero flags not allowed in calls to recv_into() on %srecvfrom not allowed on instances of %srecvfrom_into not allowed on instances of %srecvmsg not allowed on instances of %srecvmsg_into not allowed on instances of %s"recvmsg_into not allowed on instances of "No SSL wrapper around _real_connectcan't connect in server-side modeattempt to connect already-connected SSLSocket!Connects to remote ADDR, and then wraps the connection in + an SSL channel.Accepts a new connection from a remote client, and returns + a tuple containing that new connection wrapped with a server-side + SSL channel, and the address of the remote client.newsock{0} channel binding type not implementedca_certscipherscertfile must be specified for server-side operations"certfile must be specified for server-side ""operations"cert_time_to_secondscert_timeReturn the time in seconds since the Epoch, given the timestring + representing the "notBefore" or "notAfter" date from a certificate + in ``"%b %d %H:%M:%S %Y %Z"`` strptime format (C locale). + + "notBefore" or "notAfter" dates must use UTC (RFC 5280). + + Month is one of: Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec + UTC should be specified as GMT (see ASN1_TIME_print()) + %d %H:%M:%S %Y GMTtime_formatmonth_numbertime data %r does not match format "%%b%s"'time data %r does not match ''format "%%b%s"'-----BEGIN CERTIFICATE-----PEM_HEADER-----END CERTIFICATE-----PEM_FOOTERDER_cert_to_PEM_certder_cert_bytesTakes a certificate in binary DER format and returns the + PEM version of it as a string.PEM_cert_to_DER_certpem_cert_stringTakes a certificate in ASCII PEM format and returns the + DER-encoded version of it as a byte sequenceInvalid PEM encoding; must start with %sInvalid PEM encoding; must end with %sget_server_certificateRetrieve the certificate from the server at the specified address, + and return it as a PEM-encoded string. + If 'ca_certs' is specified, validate the server cert against it. + If 'ssl_version' is specified, use it in the connection attempt.sslsockdercertget_protocol_nameprotocol_code# Wrapper module for _ssl, providing some additional facilities# implemented in Python. Written by Bill Janssen.# if we can't import it, let the error propagate# LibreSSL does not provide RAND_egd# pseudo content types# for DER-to-PEM translation# keep that public name in module namespace# speed up common case w/o wildcards# Only match wildcard in leftmost segment.# no right side# no partial wildcard matching# wildcard must match at least one char# inet_aton() also accepts strings like '1', '127.1', some also trailing# data like '127.0.0.1 whatever'.# not an IPv4 address# only accept injective ipnames# refuse for short IPv4 notation and additional trailing data# AF_INET6 not available# OpenSSL may add a trailing newline to a subjectAltName's IP address,# commonly woth IPv6 addresses. Strip off trailing \n.# Not an IP address (common case)# The subject is only checked when there is no dNSName entry# in subjectAltName# XXX according to RFC 2818, the most specific Common Name# must be used.# environment vars shadow paths# SSLSocket is assigned later.# SSLObject is assigned later.# SSLSocket class handles server_hostname encoding before it calls# ctx._wrap_socket()# Need to encode server_hostname here because _wrap_bio() can only# handle ASCII str.# CA certs are never PKCS#7 encoded# SSLContext sets OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION,# OP_CIPHER_SERVER_PREFERENCE, OP_SINGLE_DH_USE and OP_SINGLE_ECDH_USE# by default.# verify certs and host name in client mode# no explicit cafile, capath or cadata but the verify mode is# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system# root CA certificates for the given purpose. This may fail silently.# OpenSSL 1.1.1 keylog file# load CA root certs# Used by http.client if no context is explicitly passed.# Backwards compatibility alias, even though it's not a public name.# See if we are connected# create the SSL object# non-blocking# raise an exception here if you wish to check for spurious closes# getpeername() will raise ENOTCONN if the socket is really# not connected; note that we can be connected even without# _connected being set, e.g. if connect() first returned# EAGAIN.# Ensure programs don't send data unencrypted if they try to# use this method.# os.sendfile() works with plain sockets only# Here we assume that the socket is client-side, and not# connected at the time of the call. We connect it, then wrap it.# Python does not support forward declaration of types.# some utility functions# NOTE: no month, fixed GMT# found valid month# return an integer, the previous mktime()-based implementation# returned a float (fractional seconds are always zero here).b'This module provides some more Pythonic support for SSL. + +Object types: + + SSLSocket -- subtype of socket.socket which does SSL over the socket + +Exceptions: + + SSLError -- exception raised for I/O errors + +Functions: + + cert_time_to_seconds -- convert time string used for certificate + notBefore and notAfter functions to integer + seconds past the Epoch (the time values + returned from time.time()) + + fetch_server_certificate (HOST, PORT) -- fetch the certificate provided + by the server running on HOST at port PORT. No + validation of the certificate is performed. + +Integer constants: + +SSL_ERROR_ZERO_RETURN +SSL_ERROR_WANT_READ +SSL_ERROR_WANT_WRITE +SSL_ERROR_WANT_X509_LOOKUP +SSL_ERROR_SYSCALL +SSL_ERROR_SSL +SSL_ERROR_WANT_CONNECT + +SSL_ERROR_EOF +SSL_ERROR_INVALID_ERROR_CODE + +The following group define certificate requirements that one side is +allowing/requiring from the other side: + +CERT_NONE - no certificates from the other side are required (or will + be looked at if provided) +CERT_OPTIONAL - certificates are not required, but if provided will be + validated, and if validation fails, the connection will + also fail +CERT_REQUIRED - certificates are required, and will be validated, and + if validation fails, the connection will also fail + +The following constants identify various SSL protocol variants: + +PROTOCOL_SSLv2 +PROTOCOL_SSLv3 +PROTOCOL_SSLv23 +PROTOCOL_TLS +PROTOCOL_TLS_CLIENT +PROTOCOL_TLS_SERVER +PROTOCOL_TLSv1 +PROTOCOL_TLSv1_1 +PROTOCOL_TLSv1_2 + +The following constants identify various SSL alert message descriptions as per +http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6 + +ALERT_DESCRIPTION_CLOSE_NOTIFY +ALERT_DESCRIPTION_UNEXPECTED_MESSAGE +ALERT_DESCRIPTION_BAD_RECORD_MAC +ALERT_DESCRIPTION_RECORD_OVERFLOW +ALERT_DESCRIPTION_DECOMPRESSION_FAILURE +ALERT_DESCRIPTION_HANDSHAKE_FAILURE +ALERT_DESCRIPTION_BAD_CERTIFICATE +ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE +ALERT_DESCRIPTION_CERTIFICATE_REVOKED +ALERT_DESCRIPTION_CERTIFICATE_EXPIRED +ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN +ALERT_DESCRIPTION_ILLEGAL_PARAMETER +ALERT_DESCRIPTION_UNKNOWN_CA +ALERT_DESCRIPTION_ACCESS_DENIED +ALERT_DESCRIPTION_DECODE_ERROR +ALERT_DESCRIPTION_DECRYPT_ERROR +ALERT_DESCRIPTION_PROTOCOL_VERSION +ALERT_DESCRIPTION_INSUFFICIENT_SECURITY +ALERT_DESCRIPTION_INTERNAL_ERROR +ALERT_DESCRIPTION_USER_CANCELLED +ALERT_DESCRIPTION_NO_RENEGOTIATION +ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION +ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +ALERT_DESCRIPTION_UNRECOGNIZED_NAME +ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE +ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE +ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY +'u'This module provides some more Pythonic support for SSL. + +Object types: + + SSLSocket -- subtype of socket.socket which does SSL over the socket + +Exceptions: + + SSLError -- exception raised for I/O errors + +Functions: + + cert_time_to_seconds -- convert time string used for certificate + notBefore and notAfter functions to integer + seconds past the Epoch (the time values + returned from time.time()) + + fetch_server_certificate (HOST, PORT) -- fetch the certificate provided + by the server running on HOST at port PORT. No + validation of the certificate is performed. + +Integer constants: + +SSL_ERROR_ZERO_RETURN +SSL_ERROR_WANT_READ +SSL_ERROR_WANT_WRITE +SSL_ERROR_WANT_X509_LOOKUP +SSL_ERROR_SYSCALL +SSL_ERROR_SSL +SSL_ERROR_WANT_CONNECT + +SSL_ERROR_EOF +SSL_ERROR_INVALID_ERROR_CODE + +The following group define certificate requirements that one side is +allowing/requiring from the other side: + +CERT_NONE - no certificates from the other side are required (or will + be looked at if provided) +CERT_OPTIONAL - certificates are not required, but if provided will be + validated, and if validation fails, the connection will + also fail +CERT_REQUIRED - certificates are required, and will be validated, and + if validation fails, the connection will also fail + +The following constants identify various SSL protocol variants: + +PROTOCOL_SSLv2 +PROTOCOL_SSLv3 +PROTOCOL_SSLv23 +PROTOCOL_TLS +PROTOCOL_TLS_CLIENT +PROTOCOL_TLS_SERVER +PROTOCOL_TLSv1 +PROTOCOL_TLSv1_1 +PROTOCOL_TLSv1_2 + +The following constants identify various SSL alert message descriptions as per +http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-6 + +ALERT_DESCRIPTION_CLOSE_NOTIFY +ALERT_DESCRIPTION_UNEXPECTED_MESSAGE +ALERT_DESCRIPTION_BAD_RECORD_MAC +ALERT_DESCRIPTION_RECORD_OVERFLOW +ALERT_DESCRIPTION_DECOMPRESSION_FAILURE +ALERT_DESCRIPTION_HANDSHAKE_FAILURE +ALERT_DESCRIPTION_BAD_CERTIFICATE +ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE +ALERT_DESCRIPTION_CERTIFICATE_REVOKED +ALERT_DESCRIPTION_CERTIFICATE_EXPIRED +ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN +ALERT_DESCRIPTION_ILLEGAL_PARAMETER +ALERT_DESCRIPTION_UNKNOWN_CA +ALERT_DESCRIPTION_ACCESS_DENIED +ALERT_DESCRIPTION_DECODE_ERROR +ALERT_DESCRIPTION_DECRYPT_ERROR +ALERT_DESCRIPTION_PROTOCOL_VERSION +ALERT_DESCRIPTION_INSUFFICIENT_SECURITY +ALERT_DESCRIPTION_INTERNAL_ERROR +ALERT_DESCRIPTION_USER_CANCELLED +ALERT_DESCRIPTION_NO_RENEGOTIATION +ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION +ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +ALERT_DESCRIPTION_UNRECOGNIZED_NAME +ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE +ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE +ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY +'b'_SSLMethod'u'_SSLMethod'b'PROTOCOL_'u'PROTOCOL_'b'PROTOCOL_SSLv23'u'PROTOCOL_SSLv23'b'OP_'u'OP_'b'AlertDescription'u'AlertDescription'b'ALERT_DESCRIPTION_'u'ALERT_DESCRIPTION_'b'SSLErrorNumber'u'SSLErrorNumber'b'SSL_ERROR_'u'SSL_ERROR_'b'VerifyFlags'u'VerifyFlags'b'VERIFY_'u'VERIFY_'b'VerifyMode'u'VerifyMode'b'CERT_'u'CERT_'b'PROTOCOL_SSLv2'u'PROTOCOL_SSLv2'b'Content types (record layer) + + See RFC 8446, section B.1 + 'u'Content types (record layer) + + See RFC 8446, section B.1 + 'b'Alert types for TLSContentType.ALERT messages + + See RFC 8466, section B.2 + 'u'Alert types for TLSContentType.ALERT messages + + See RFC 8466, section B.2 + 'b'Message types (handshake protocol) + + See RFC 8446, section B.3 + 'u'Message types (handshake protocol) + + See RFC 8446, section B.3 + 'b'tls-unique'u'tls-unique'b'HOSTFLAG_NEVER_CHECK_SUBJECT'u'HOSTFLAG_NEVER_CHECK_SUBJECT'b'Matching according to RFC 6125, section 6.4.3 + + - Hostnames are compared lower case. + - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE). + - Partial wildcards like 'www*.example.org', multiple wildcards, sole + wildcard or wildcards in labels other then the left-most label are not + supported and a CertificateError is raised. + - A wildcard must match at least one character. + 'u'Matching according to RFC 6125, section 6.4.3 + + - Hostnames are compared lower case. + - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE). + - Partial wildcards like 'www*.example.org', multiple wildcards, sole + wildcard or wildcards in labels other then the left-most label are not + supported and a CertificateError is raised. + - A wildcard must match at least one character. + 'b'too many wildcards in certificate DNS name: {!r}.'u'too many wildcards in certificate DNS name: {!r}.'b'wildcard can only be present in the leftmost label: {!r}.'u'wildcard can only be present in the leftmost label: {!r}.'b'sole wildcard without additional labels are not support: {!r}.'u'sole wildcard without additional labels are not support: {!r}.'b'partial wildcards in leftmost label are not supported: {!r}.'u'partial wildcards in leftmost label are not supported: {!r}.'b'Try to convert an IP address to packed binary form + + Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 + support. + 'u'Try to convert an IP address to packed binary form + + Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 + support. + 'b'{!r} is not a quad-dotted IPv4 address.'u'{!r} is not a quad-dotted IPv4 address.'b'{!r} is neither an IPv4 nor an IP6 address.'u'{!r} is neither an IPv4 nor an IP6 address.'b'{!r} is not an IPv4 address.'u'{!r} is not an IPv4 address.'b'Exact matching of IP addresses. + + RFC 6125 explicitly doesn't define an algorithm for this + (section 1.7.2 - "Out of Scope"). + 'u'Exact matching of IP addresses. + + RFC 6125 explicitly doesn't define an algorithm for this + (section 1.7.2 - "Out of Scope"). + 'b'Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed. + + The function matches IP addresses rather than dNSNames if hostname is a + valid ipaddress string. IPv4 addresses are supported on all platforms. + IPv6 addresses are supported on platforms with IPv6 support (AF_INET6 + and inet_pton). + + CertificateError is raised on failure. On success, the function + returns nothing. + 'u'Verify that *cert* (in decoded format as returned by + SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 + rules are followed. + + The function matches IP addresses rather than dNSNames if hostname is a + valid ipaddress string. IPv4 addresses are supported on all platforms. + IPv6 addresses are supported on platforms with IPv6 support (AF_INET6 + and inet_pton). + + CertificateError is raised on failure. On success, the function + returns nothing. + 'b'empty or no certificate, match_hostname needs a SSL socket or SSL context with either CERT_OPTIONAL or CERT_REQUIRED'u'empty or no certificate, match_hostname needs a SSL socket or SSL context with either CERT_OPTIONAL or CERT_REQUIRED'b'subjectAltName'u'subjectAltName'b'DNS'u'DNS'b'IP Address'u'IP Address'b'commonName'u'commonName'b'hostname %r doesn't match either of %s'u'hostname %r doesn't match either of %s'b'hostname %r doesn't match %r'u'hostname %r doesn't match %r'b'no appropriate commonName or subjectAltName fields were found'u'no appropriate commonName or subjectAltName fields were found'b'DefaultVerifyPaths'u'DefaultVerifyPaths'b'cafile capath openssl_cafile_env openssl_cafile openssl_capath_env openssl_capath'u'cafile capath openssl_cafile_env openssl_cafile openssl_capath_env openssl_capath'b'Return paths to default cafile and capath. + 'u'Return paths to default cafile and capath. + 'b'_ASN1Object'u'_ASN1Object'b'nid shortname longname oid'u'nid shortname longname oid'b'ASN.1 object identifier lookup + 'u'ASN.1 object identifier lookup + 'b'Create _ASN1Object from OpenSSL numeric ID + 'u'Create _ASN1Object from OpenSSL numeric ID + 'b'Create _ASN1Object from short name, long name or OID + 'u'Create _ASN1Object from short name, long name or OID + 'b'SSLContext purpose flags with X509v3 Extended Key Usage objects + 'u'SSLContext purpose flags with X509v3 Extended Key Usage objects + 'b'1.3.6.1.5.5.7.3.1'u'1.3.6.1.5.5.7.3.1'b'1.3.6.1.5.5.7.3.2'u'1.3.6.1.5.5.7.3.2'b'An SSLContext holds various SSL-related configuration options and + data, such as certificates and possibly a private key.'u'An SSLContext holds various SSL-related configuration options and + data, such as certificates and possibly a private key.'b'CA'u'CA'b'ROOT'u'ROOT'b'NPN protocols must be 1 to 255 in length'u'NPN protocols must be 1 to 255 in length'b'not a callable object'u'not a callable object'b'ALPN protocols must be 1 to 255 in length'u'ALPN protocols must be 1 to 255 in length'b'x509_asn'u'x509_asn'b'unable to enumerate Windows certificate store'u'unable to enumerate Windows certificate store'b'minimum_version'u'minimum_version'b'TLS message callback + + The message callback provides a debugging hook to analyze TLS + connections. The callback is called for any TLS protocol message + (header, handshake, alert, and more), but not for application data. + Due to technical limitations, the callback can't be used to filter + traffic or to abort a connection. Any exception raised in the + callback is delayed until the handshake, read, or write operation + has been performed. + + def msg_cb(conn, direction, version, content_type, msg_type, data): + pass + + conn + :class:`SSLSocket` or :class:`SSLObject` instance + direction + ``read`` or ``write`` + version + :class:`TLSVersion` enum member or int for unknown version. For a + frame header, it's the header version. + content_type + :class:`_TLSContentType` enum member or int for unsupported + content type. + msg_type + Either a :class:`_TLSContentType` enum number for a header + message, a :class:`_TLSAlertType` enum member for an alert + message, a :class:`_TLSMessageType` enum member for other + messages, or int for unsupported message types. + data + Raw, decrypted message content as bytes + 'u'TLS message callback + + The message callback provides a debugging hook to analyze TLS + connections. The callback is called for any TLS protocol message + (header, handshake, alert, and more), but not for application data. + Due to technical limitations, the callback can't be used to filter + traffic or to abort a connection. Any exception raised in the + callback is delayed until the handshake, read, or write operation + has been performed. + + def msg_cb(conn, direction, version, content_type, msg_type, data): + pass + + conn + :class:`SSLSocket` or :class:`SSLObject` instance + direction + ``read`` or ``write`` + version + :class:`TLSVersion` enum member or int for unknown version. For a + frame header, it's the header version. + content_type + :class:`_TLSContentType` enum member or int for unsupported + content type. + msg_type + Either a :class:`_TLSContentType` enum number for a header + message, a :class:`_TLSAlertType` enum member for an alert + message, a :class:`_TLSMessageType` enum member for other + messages, or int for unsupported message types. + data + Raw, decrypted message content as bytes + 'b' is not callable.'u' is not callable.'b'Create a SSLContext object with default settings. + + NOTE: The protocol and settings may change anytime without prior + deprecation. The values represent a fair balance between maximum + compatibility and security. + 'u'Create a SSLContext object with default settings. + + NOTE: The protocol and settings may change anytime without prior + deprecation. The values represent a fair balance between maximum + compatibility and security. + 'b'keylog_filename'u'keylog_filename'b'SSLKEYLOGFILE'u'SSLKEYLOGFILE'b'Create a SSLContext object for Python stdlib modules + + All Python stdlib modules shall use this function to create SSLContext + objects in order to keep common settings in one place. The configuration + is less restrict than create_default_context()'s to increase backward + compatibility. + 'u'Create a SSLContext object for Python stdlib modules + + All Python stdlib modules shall use this function to create SSLContext + objects in order to keep common settings in one place. The configuration + is less restrict than create_default_context()'s to increase backward + compatibility. + 'b'certfile must be specified'u'certfile must be specified'b'This class implements an interface on top of a low-level SSL object as + implemented by OpenSSL. This object captures the state of an SSL connection + but does not provide any network IO itself. IO needs to be performed + through separate "BIO" objects which are OpenSSL's IO abstraction layer. + + This class does not have a public constructor. Instances are returned by + ``SSLContext.wrap_bio``. This class is typically used by framework authors + that want to implement asynchronous IO for SSL through memory buffers. + + When compared to ``SSLSocket``, this object lacks the following features: + + * Any form of network IO, including methods such as ``recv`` and ``send``. + * The ``do_handshake_on_connect`` and ``suppress_ragged_eofs`` machinery. + 'u'This class implements an interface on top of a low-level SSL object as + implemented by OpenSSL. This object captures the state of an SSL connection + but does not provide any network IO itself. IO needs to be performed + through separate "BIO" objects which are OpenSSL's IO abstraction layer. + + This class does not have a public constructor. Instances are returned by + ``SSLContext.wrap_bio``. This class is typically used by framework authors + that want to implement asynchronous IO for SSL through memory buffers. + + When compared to ``SSLSocket``, this object lacks the following features: + + * Any form of network IO, including methods such as ``recv`` and ``send``. + * The ``do_handshake_on_connect`` and ``suppress_ragged_eofs`` machinery. + 'b' does not have a public constructor. Instances are returned by SSLContext.wrap_bio().'u' does not have a public constructor. Instances are returned by SSLContext.wrap_bio().'b'The SSLContext that is currently in use.'u'The SSLContext that is currently in use.'b'The SSLSession for client socket.'u'The SSLSession for client socket.'b'Was the client session reused during handshake'u'Was the client session reused during handshake'b'Whether this is a server-side socket.'b'The currently set server hostname (for SNI), or ``None`` if no + server hostname is set.'u'The currently set server hostname (for SNI), or ``None`` if no + server hostname is set.'b'Read up to 'len' bytes from the SSL object and return them. + + If 'buffer' is provided, read into this buffer and return the number of + bytes read. + 'u'Read up to 'len' bytes from the SSL object and return them. + + If 'buffer' is provided, read into this buffer and return the number of + bytes read. + 'b'Write 'data' to the SSL object and return the number of bytes + written. + + The 'data' argument must support the buffer interface. + 'u'Write 'data' to the SSL object and return the number of bytes + written. + + The 'data' argument must support the buffer interface. + 'b'Returns a formatted version of the data in the certificate provided + by the other end of the SSL channel. + + Return None if no certificate was provided, {} if a certificate was + provided, but not validated. + 'u'Returns a formatted version of the data in the certificate provided + by the other end of the SSL channel. + + Return None if no certificate was provided, {} if a certificate was + provided, but not validated. + 'b'Return the currently selected NPN protocol as a string, or ``None`` + if a next protocol was not negotiated or if NPN is not supported by one + of the peers.'u'Return the currently selected NPN protocol as a string, or ``None`` + if a next protocol was not negotiated or if NPN is not supported by one + of the peers.'b'Return the currently selected ALPN protocol as a string, or ``None`` + if a next protocol was not negotiated or if ALPN is not supported by one + of the peers.'u'Return the currently selected ALPN protocol as a string, or ``None`` + if a next protocol was not negotiated or if ALPN is not supported by one + of the peers.'b'Return the currently selected cipher as a 3-tuple ``(name, + ssl_version, secret_bits)``.'u'Return the currently selected cipher as a 3-tuple ``(name, + ssl_version, secret_bits)``.'b'Return a list of ciphers shared by the client during the handshake or + None if this is not a valid server connection. + 'u'Return a list of ciphers shared by the client during the handshake or + None if this is not a valid server connection. + 'b'Return the current compression algorithm in use, or ``None`` if + compression was not negotiated or not supported by one of the peers.'u'Return the current compression algorithm in use, or ``None`` if + compression was not negotiated or not supported by one of the peers.'b'Return the number of bytes that can be read immediately.'u'Return the number of bytes that can be read immediately.'b'Start the SSL/TLS handshake.'u'Start the SSL/TLS handshake.'b'Start the SSL shutdown handshake.'u'Start the SSL shutdown handshake.'b'Get channel binding data for current connection. Raise ValueError + if the requested `cb_type` is not supported. Return bytes of the data + or None if the data is not available (e.g. before the handshake).'u'Get channel binding data for current connection. Raise ValueError + if the requested `cb_type` is not supported. Return bytes of the data + or None if the data is not available (e.g. before the handshake).'b'Return a string identifying the protocol version used by the + current SSL channel. 'u'Return a string identifying the protocol version used by the + current SSL channel. 'b'Copy docstring from SSLObject to SSLSocket'u'Copy docstring from SSLObject to SSLSocket'b'This class implements a subtype of socket.socket that wraps + the underlying OS socket in an SSL context when necessary, and + provides read and write methods over that channel. 'u'This class implements a subtype of socket.socket that wraps + the underlying OS socket in an SSL context when necessary, and + provides read and write methods over that channel. 'b' does not have a public constructor. Instances are returned by SSLContext.wrap_socket().'u' does not have a public constructor. Instances are returned by SSLContext.wrap_socket().'b'only stream sockets are supported'u'only stream sockets are supported'b'server_hostname can only be specified in client mode'u'server_hostname can only be specified in client mode'b'session can only be specified in client mode'u'session can only be specified in client mode'b'check_hostname requires server_hostname'u'check_hostname requires server_hostname'b'do_handshake_on_connect should not be specified for non-blocking sockets'u'do_handshake_on_connect should not be specified for non-blocking sockets'b'Can't dup() %s instances'u'Can't dup() %s instances'b'Read up to LEN bytes and return them. + Return zero-length string on EOF.'u'Read up to LEN bytes and return them. + Return zero-length string on EOF.'b'Read on closed or unwrapped SSL socket.'u'Read on closed or unwrapped SSL socket.'b'Write DATA to the underlying SSL channel. Returns + number of bytes of DATA actually transmitted.'u'Write DATA to the underlying SSL channel. Returns + number of bytes of DATA actually transmitted.'b'Write on closed or unwrapped SSL socket.'u'Write on closed or unwrapped SSL socket.'b'non-zero flags not allowed in calls to send() on %s'u'non-zero flags not allowed in calls to send() on %s'b'sendto not allowed on instances of %s'u'sendto not allowed on instances of %s'b'sendmsg not allowed on instances of %s'u'sendmsg not allowed on instances of %s'b'non-zero flags not allowed in calls to sendall() on %s'u'non-zero flags not allowed in calls to sendall() on %s'b'Send a file, possibly by using os.sendfile() if this is a + clear-text socket. Return the total number of bytes sent. + 'u'Send a file, possibly by using os.sendfile() if this is a + clear-text socket. Return the total number of bytes sent. + 'b'non-zero flags not allowed in calls to recv() on %s'u'non-zero flags not allowed in calls to recv() on %s'b'non-zero flags not allowed in calls to recv_into() on %s'u'non-zero flags not allowed in calls to recv_into() on %s'b'recvfrom not allowed on instances of %s'u'recvfrom not allowed on instances of %s'b'recvfrom_into not allowed on instances of %s'u'recvfrom_into not allowed on instances of %s'b'recvmsg not allowed on instances of %s'u'recvmsg not allowed on instances of %s'b'recvmsg_into not allowed on instances of %s'u'recvmsg_into not allowed on instances of %s'b'No SSL wrapper around 'u'No SSL wrapper around 'b'can't connect in server-side mode'u'can't connect in server-side mode'b'attempt to connect already-connected SSLSocket!'u'attempt to connect already-connected SSLSocket!'b'Connects to remote ADDR, and then wraps the connection in + an SSL channel.'u'Connects to remote ADDR, and then wraps the connection in + an SSL channel.'b'Accepts a new connection from a remote client, and returns + a tuple containing that new connection wrapped with a server-side + SSL channel, and the address of the remote client.'u'Accepts a new connection from a remote client, and returns + a tuple containing that new connection wrapped with a server-side + SSL channel, and the address of the remote client.'b'{0} channel binding type not implemented'u'{0} channel binding type not implemented'b'certfile must be specified for server-side operations'u'certfile must be specified for server-side operations'b'Return the time in seconds since the Epoch, given the timestring + representing the "notBefore" or "notAfter" date from a certificate + in ``"%b %d %H:%M:%S %Y %Z"`` strptime format (C locale). + + "notBefore" or "notAfter" dates must use UTC (RFC 5280). + + Month is one of: Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec + UTC should be specified as GMT (see ASN1_TIME_print()) + 'u'Return the time in seconds since the Epoch, given the timestring + representing the "notBefore" or "notAfter" date from a certificate + in ``"%b %d %H:%M:%S %Y %Z"`` strptime format (C locale). + + "notBefore" or "notAfter" dates must use UTC (RFC 5280). + + Month is one of: Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec + UTC should be specified as GMT (see ASN1_TIME_print()) + 'b' %d %H:%M:%S %Y GMT'u' %d %H:%M:%S %Y GMT'b'time data %r does not match format "%%b%s"'u'time data %r does not match format "%%b%s"'b'-----BEGIN CERTIFICATE-----'u'-----BEGIN CERTIFICATE-----'b'-----END CERTIFICATE-----'u'-----END CERTIFICATE-----'b'Takes a certificate in binary DER format and returns the + PEM version of it as a string.'u'Takes a certificate in binary DER format and returns the + PEM version of it as a string.'b'Takes a certificate in ASCII PEM format and returns the + DER-encoded version of it as a byte sequence'u'Takes a certificate in ASCII PEM format and returns the + DER-encoded version of it as a byte sequence'b'Invalid PEM encoding; must start with %s'u'Invalid PEM encoding; must start with %s'b'Invalid PEM encoding; must end with %s'u'Invalid PEM encoding; must end with %s'b'Retrieve the certificate from the server at the specified address, + and return it as a PEM-encoded string. + If 'ca_certs' is specified, validate the server cert against it. + If 'ssl_version' is specified, use it in the connection attempt.'u'Retrieve the certificate from the server at the specified address, + and return it as a PEM-encoded string. + If 'ca_certs' is specified, validate the server cert against it. + If 'ssl_version' is specified, use it in the connection attempt.'_create_transport_contextServer side SSL needs a valid SSLContextUNWRAPPED_UNWRAPPEDDO_HANDSHAKE_DO_HANDSHAKEWRAPPED_WRAPPEDSHUTDOWN_SHUTDOWN_SSLPipeAn SSL "Pipe". + + An SSL pipe allows you to communicate with an SSL/TLS protocol instance + through memory buffers. It can be used to implement a security layer for an + existing connection where you don't have access to the connection's file + descriptor, or for some reason you don't want to use it. + + An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode, + data is passed through untransformed. In wrapped mode, application level + data is encrypted to SSL record level data and vice versa. The SSL record + level is the lowest level in the SSL protocol suite and is what travels + as-is over the wire. + + An SslPipe initially is in "unwrapped" mode. To start SSL, call + do_handshake(). To shutdown SSL again, call unwrap(). + + The *context* argument specifies the ssl.SSLContext to use. + + The *server_side* argument indicates whether this is a server side or + client side transport. + + The optional *server_hostname* argument can be used to specify the + hostname you are connecting to. You may only specify this parameter if + the _ssl module supports Server Name Indication (SNI). + _server_side_server_hostname_incoming_outgoing_need_ssldata_handshake_cb_shutdown_cbThe SSL context passed to the constructor.ssl_objectThe internal ssl.SSLObject instance. + + Return None if the pipe is not wrapped. + need_ssldataWhether more record level data is needed to complete a handshake + that is currently in progress. + Whether a security layer is currently in effect. + + Return False during handshake. + Start the SSL handshake. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the handshake is complete. The callback will be + called with None if successful, else an exception instance. + handshake in progress or completedfeed_ssldataonly_handshakessldataappdataStart the SSL shutdown sequence. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the shutdown is complete. The callback will be + called without arguments. + no security layer presentshutdown in progressfeed_eofSend a potentially "ragged" EOF. + + This method will raise an SSL_ERROR_EOF exception if the EOF is + unexpected. + Feed SSL record level data into the pipe. + + The data must be a bytes instance. It is OK to send an empty bytes + instance. This can be used to get ssldata for a handshake initiated by + this endpoint. + + Return a (ssldata, appdata) tuple. The ssldata element is a list of + buffers containing SSL data that needs to be sent to the remote SSL. + + The appdata element is a list of buffers containing plaintext data that + needs to be forwarded to the application. The appdata list may contain + an empty buffer indicating an SSL "close_notify" alert. This alert must + be acknowledged by calling shutdown(). + exc_errnofeed_appdataFeed plaintext data into the pipe. + + Return an (ssldata, offset) tuple. The ssldata element is a list of + buffers containing record level data that needs to be sent to the + remote SSL instance. The offset is the number of plaintext bytes that + were processed, which may be less than the length of data. + + NOTE: In case of short writes, this call MUST be retried with the SAME + buffer passed into the *data* argument (i.e. the id() must be the + same). This is an OpenSSL requirement. A further particularity is that + a short write will always have offset == 0, because the _ssl module + does not enable partial writes. And even though the offset is zero, + there will still be encrypted data in ssldata. + PROTOCOL_IS_SHUTDOWN_SSLProtocolTransport_ssl_protocolGet optional transport information._get_extra_info_set_app_protocol_app_protocolClose the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) called + with None as its argument. + _start_shutdownSSL transport has not been initialized yetPause the receiving end. + + No data will be passed to the protocol's data_received() + method until resume_reading() is called. + Resume the receiving end. + + Data received will once again be passed to the protocol's + data_received() method. + set_write_buffer_limitsSet the high- and low-water limits for write flow control. + + These two values control when to call the protocol's + pause_writing() and resume_writing() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to an + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_writing() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_writing() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + Return the current size of the write buffer.Write some data bytes to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + data: expecting a bytes-like instance, got "data: expecting a bytes-like instance, ""got "_write_appdataReturn True if this transport supports write_eof(), False if not.Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + _abortSSL protocol. + + Implementation of SSL on top of a socket using incoming and outgoing + buffers which are ssl.MemoryBIO objects. + app_protocolstdlib ssl module not availablessl_handshake_timeout should be a positive number, got "ssl_handshake_timeout should be a positive number, "_sslcontext_write_backlog_write_buffer_size_waiter_sslpipe_session_established_in_handshake_in_shutdown_call_connection_made_app_protocol_is_buffer_wakeup_waiterCalled when the low-level connection is made. + + Start the SSL handshake. + _start_handshakeCalled when the low-level connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + _handshake_timeout_handleCalled when the low-level transport's buffer goes over + the high-water mark. + Called when the low-level transport's buffer drains below + the low-water mark. + Called when some SSL data is received. + + The argument is a bytes object. + SSL error in data receivedapplication protocol failed to receive SSL dataCalled when the other end of the low-level stream + is half-closed. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + returning true from eof_received() has no effect when using ssl'returning true from eof_received() ''has no effect when using ssl'_process_write_backlog%r starts SSL handshake_handshake_start_time_check_handshake_timeoutSSL handshake is taking longer than "SSL handshake is taking longer than " seconds: aborting the connection" seconds: ""aborting the connection"_on_handshake_completehandshake_excpeercertSSL handshake failed on verifying the certificateSSL handshake failed%r: SSL handshake took %.1f ms_finalizeFatal error on SSL transport# Client side may pass ssl=True to use a default# context; in that case the sslcontext passed is None.# The default is secure for client connections.# Python 3.4+: use up-to-date strong settings.# States of an _SSLPipe.# Buffer size passed to read()# If unwrapped, pass plaintext data straight through.# Call do_handshake() until it doesn't raise anymore.# Handshake done: execute the wrapped block# Main state: read data from SSL until close_notify# close_notify# Call shutdown() until it doesn't raise anymore.# Drain possible plaintext data after close_notify.# Check for record level data that needs to be sent back.# Happens for the initial handshake and renegotiations.# pass through data in unwrapped mode# It is not allowed to call write() after unwrap() until the# close_notify is acknowledged. We return the condition to the# caller as a short write.# See if there's any record level data back for us.# SSLProtocol instance# Required for sendfile fallback pause_writing/resume_writing logic# SSL-specific extra info. More info are set when the handshake# completes.# App data write buffering# _SSLPipe instance (None until the connection is made)# transport, ex: SelectorSocketTransport# Most likely an exception occurred while in SSL handshake.# Just mark the app transport as closed so that its __del__# doesn't complain.# transport closing, sslpipe is destroyed# (b'', 1) is a special value in _process_write_backlog() to do# the SSL handshake# Add extra info that becomes available after handshake.# In case transport.write() was already called. Don't call# immediately _process_write_backlog(), but schedule it:# _on_handshake_complete() can be called indirectly from# _process_write_backlog(), and _process_write_backlog() is not# reentrant.# Try to make progress on the write backlog.# A short write means that a write is blocked on a read# We need to enable reading if it is paused!# An entire chunk from the backlog was processed. We can# delete it and reduce the outstanding buffer size.# Exceptions will be re-raised in _on_handshake_complete.b'Server side SSL needs a valid SSLContext'u'Server side SSL needs a valid SSLContext'b'UNWRAPPED'u'UNWRAPPED'b'DO_HANDSHAKE'u'DO_HANDSHAKE'b'WRAPPED'u'WRAPPED'b'SHUTDOWN'u'SHUTDOWN'b'An SSL "Pipe". + + An SSL pipe allows you to communicate with an SSL/TLS protocol instance + through memory buffers. It can be used to implement a security layer for an + existing connection where you don't have access to the connection's file + descriptor, or for some reason you don't want to use it. + + An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode, + data is passed through untransformed. In wrapped mode, application level + data is encrypted to SSL record level data and vice versa. The SSL record + level is the lowest level in the SSL protocol suite and is what travels + as-is over the wire. + + An SslPipe initially is in "unwrapped" mode. To start SSL, call + do_handshake(). To shutdown SSL again, call unwrap(). + 'u'An SSL "Pipe". + + An SSL pipe allows you to communicate with an SSL/TLS protocol instance + through memory buffers. It can be used to implement a security layer for an + existing connection where you don't have access to the connection's file + descriptor, or for some reason you don't want to use it. + + An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode, + data is passed through untransformed. In wrapped mode, application level + data is encrypted to SSL record level data and vice versa. The SSL record + level is the lowest level in the SSL protocol suite and is what travels + as-is over the wire. + + An SslPipe initially is in "unwrapped" mode. To start SSL, call + do_handshake(). To shutdown SSL again, call unwrap(). + 'b' + The *context* argument specifies the ssl.SSLContext to use. + + The *server_side* argument indicates whether this is a server side or + client side transport. + + The optional *server_hostname* argument can be used to specify the + hostname you are connecting to. You may only specify this parameter if + the _ssl module supports Server Name Indication (SNI). + 'u' + The *context* argument specifies the ssl.SSLContext to use. + + The *server_side* argument indicates whether this is a server side or + client side transport. + + The optional *server_hostname* argument can be used to specify the + hostname you are connecting to. You may only specify this parameter if + the _ssl module supports Server Name Indication (SNI). + 'b'The SSL context passed to the constructor.'u'The SSL context passed to the constructor.'b'The internal ssl.SSLObject instance. + + Return None if the pipe is not wrapped. + 'u'The internal ssl.SSLObject instance. + + Return None if the pipe is not wrapped. + 'b'Whether more record level data is needed to complete a handshake + that is currently in progress.'u'Whether more record level data is needed to complete a handshake + that is currently in progress.'b' + Whether a security layer is currently in effect. + + Return False during handshake. + 'u' + Whether a security layer is currently in effect. + + Return False during handshake. + 'b'Start the SSL handshake. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the handshake is complete. The callback will be + called with None if successful, else an exception instance. + 'u'Start the SSL handshake. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the handshake is complete. The callback will be + called with None if successful, else an exception instance. + 'b'handshake in progress or completed'u'handshake in progress or completed'b'Start the SSL shutdown sequence. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the shutdown is complete. The callback will be + called without arguments. + 'u'Start the SSL shutdown sequence. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the shutdown is complete. The callback will be + called without arguments. + 'b'no security layer present'u'no security layer present'b'shutdown in progress'u'shutdown in progress'b'Send a potentially "ragged" EOF. + + This method will raise an SSL_ERROR_EOF exception if the EOF is + unexpected. + 'u'Send a potentially "ragged" EOF. + + This method will raise an SSL_ERROR_EOF exception if the EOF is + unexpected. + 'b'Feed SSL record level data into the pipe. + + The data must be a bytes instance. It is OK to send an empty bytes + instance. This can be used to get ssldata for a handshake initiated by + this endpoint. + + Return a (ssldata, appdata) tuple. The ssldata element is a list of + buffers containing SSL data that needs to be sent to the remote SSL. + + The appdata element is a list of buffers containing plaintext data that + needs to be forwarded to the application. The appdata list may contain + an empty buffer indicating an SSL "close_notify" alert. This alert must + be acknowledged by calling shutdown(). + 'u'Feed SSL record level data into the pipe. + + The data must be a bytes instance. It is OK to send an empty bytes + instance. This can be used to get ssldata for a handshake initiated by + this endpoint. + + Return a (ssldata, appdata) tuple. The ssldata element is a list of + buffers containing SSL data that needs to be sent to the remote SSL. + + The appdata element is a list of buffers containing plaintext data that + needs to be forwarded to the application. The appdata list may contain + an empty buffer indicating an SSL "close_notify" alert. This alert must + be acknowledged by calling shutdown(). + 'b'Feed plaintext data into the pipe. + + Return an (ssldata, offset) tuple. The ssldata element is a list of + buffers containing record level data that needs to be sent to the + remote SSL instance. The offset is the number of plaintext bytes that + were processed, which may be less than the length of data. + + NOTE: In case of short writes, this call MUST be retried with the SAME + buffer passed into the *data* argument (i.e. the id() must be the + same). This is an OpenSSL requirement. A further particularity is that + a short write will always have offset == 0, because the _ssl module + does not enable partial writes. And even though the offset is zero, + there will still be encrypted data in ssldata. + 'u'Feed plaintext data into the pipe. + + Return an (ssldata, offset) tuple. The ssldata element is a list of + buffers containing record level data that needs to be sent to the + remote SSL instance. The offset is the number of plaintext bytes that + were processed, which may be less than the length of data. + + NOTE: In case of short writes, this call MUST be retried with the SAME + buffer passed into the *data* argument (i.e. the id() must be the + same). This is an OpenSSL requirement. A further particularity is that + a short write will always have offset == 0, because the _ssl module + does not enable partial writes. And even though the offset is zero, + there will still be encrypted data in ssldata. + 'b'PROTOCOL_IS_SHUTDOWN'u'PROTOCOL_IS_SHUTDOWN'b'Get optional transport information.'u'Get optional transport information.'b'Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) called + with None as its argument. + 'u'Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) called + with None as its argument. + 'b'SSL transport has not been initialized yet'u'SSL transport has not been initialized yet'b'Pause the receiving end. + + No data will be passed to the protocol's data_received() + method until resume_reading() is called. + 'u'Pause the receiving end. + + No data will be passed to the protocol's data_received() + method until resume_reading() is called. + 'b'Resume the receiving end. + + Data received will once again be passed to the protocol's + data_received() method. + 'u'Resume the receiving end. + + Data received will once again be passed to the protocol's + data_received() method. + 'b'Set the high- and low-water limits for write flow control. + + These two values control when to call the protocol's + pause_writing() and resume_writing() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to an + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_writing() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_writing() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + 'u'Set the high- and low-water limits for write flow control. + + These two values control when to call the protocol's + pause_writing() and resume_writing() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to an + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_writing() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_writing() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + 'b'Return the current size of the write buffer.'u'Return the current size of the write buffer.'b'Write some data bytes to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + 'u'Write some data bytes to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + 'b'data: expecting a bytes-like instance, got 'u'data: expecting a bytes-like instance, got 'b'Return True if this transport supports write_eof(), False if not.'u'Return True if this transport supports write_eof(), False if not.'b'Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + 'u'Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + 'b'SSL protocol. + + Implementation of SSL on top of a socket using incoming and outgoing + buffers which are ssl.MemoryBIO objects. + 'u'SSL protocol. + + Implementation of SSL on top of a socket using incoming and outgoing + buffers which are ssl.MemoryBIO objects. + 'b'stdlib ssl module not available'u'stdlib ssl module not available'b'ssl_handshake_timeout should be a positive number, got 'u'ssl_handshake_timeout should be a positive number, got 'b'Called when the low-level connection is made. + + Start the SSL handshake. + 'u'Called when the low-level connection is made. + + Start the SSL handshake. + 'b'Called when the low-level connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + 'u'Called when the low-level connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + 'b'_handshake_timeout_handle'u'_handshake_timeout_handle'b'Called when the low-level transport's buffer goes over + the high-water mark. + 'u'Called when the low-level transport's buffer goes over + the high-water mark. + 'b'Called when the low-level transport's buffer drains below + the low-water mark. + 'u'Called when the low-level transport's buffer drains below + the low-water mark. + 'b'Called when some SSL data is received. + + The argument is a bytes object. + 'u'Called when some SSL data is received. + + The argument is a bytes object. + 'b'SSL error in data received'u'SSL error in data received'b'application protocol failed to receive SSL data'u'application protocol failed to receive SSL data'b'Called when the other end of the low-level stream + is half-closed. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + 'u'Called when the other end of the low-level stream + is half-closed. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + 'b'returning true from eof_received() has no effect when using ssl'u'returning true from eof_received() has no effect when using ssl'b'%r starts SSL handshake'u'%r starts SSL handshake'b'SSL handshake is taking longer than 'u'SSL handshake is taking longer than 'b' seconds: aborting the connection'u' seconds: aborting the connection'b'SSL handshake failed on verifying the certificate'u'SSL handshake failed on verifying the certificate'b'SSL handshake failed'u'SSL handshake failed'b'%r: SSL handshake took %.1f ms'u'%r: SSL handshake took %.1f ms'b'Fatal error on SSL transport'u'Fatal error on SSL transport'u'asyncio.sslproto'u'sslproto'Support for running coroutines in parallel with staggered start times.exceptions_modOptionalAnycoro_fnsRun coroutines with staggered start times and take the first to finish. + + This method takes an iterable of coroutine functions. The first one is + started immediately. From then on, whenever the immediately preceding one + fails (raises an exception), or when *delay* seconds has passed, the next + coroutine is started. This continues until one of the coroutines complete + successfully, in which case all others are cancelled, or until all + coroutines fail. + + The coroutines provided should be well-behaved in the following way: + + * They should only ``return`` if completed successfully. + + * They should always raise an exception if they did not complete + successfully. In particular, if they handle cancellation, they should + probably reraise, like this:: + + try: + # do work + except asyncio.CancelledError: + # undo partially completed work + raise + + Args: + coro_fns: an iterable of coroutine functions, i.e. callables that + return a coroutine object when called. Use ``functools.partial`` or + lambdas to pass arguments. + + delay: amount of time, in seconds, between starting coroutines. If + ``None``, the coroutines will run sequentially. + + loop: the event loop to use. + + Returns: + tuple *(winner_result, winner_index, exceptions)* where + + - *winner_result*: the result of the winning coroutine, or ``None`` + if no coroutines won. + + - *winner_index*: the index of the winning coroutine in + ``coro_fns``, or ``None`` if no coroutines won. If the winning + coroutine may return None on success, *winner_index* can be used + to definitively determine whether any coroutine won. + + - *exceptions*: list of exceptions returned by the coroutines. + ``len(exceptions)`` is equal to the number of coroutines actually + started, and the order is the same as in ``coro_fns``. The winning + coroutine's entry is ``None``. + + enum_coro_fnswinner_resultwinner_indexrunning_tasksrun_one_coroprevious_failedthis_indexcoro_fnthis_failednext_taskfirst_taskdone_count# TODO: when we have aiter() and anext(), allow async iterables in coro_fns.# Wait for the previous task to finish, or for delay seconds# Use asyncio.wait_for() instead of asyncio.wait() here, so# that if we get cancelled at this point, Event.wait() is also# cancelled, otherwise there will be a "Task destroyed but it is# pending" later.# Get the next coroutine to run# Start task that will run the next coroutine# Prepare place to put this coroutine's exceptions if not won# Kickstart the next coroutine# Store winner's results# Cancel all other tasks. We take care to not cancel the current# task as well. If we do so, then since there is no `await` after# here and CancelledError are usually thrown at one, we will# encounter a curious corner case where the current task will end# up as done() == True, cancelled() == False, exception() ==# asyncio.CancelledError. This behavior is specified in# https://bugs.python.org/issue30048# Wait for a growing list of tasks to all finish: poor man's version of# curio's TaskGroup or trio's nursery# If run_one_coro raises an unhandled exception, it's probably a# programming error, and I want to see it.# Make sure no tasks are left running if we leave this functionb'Support for running coroutines in parallel with staggered start times.'u'Support for running coroutines in parallel with staggered start times.'b'staggered_race'u'staggered_race'b'Run coroutines with staggered start times and take the first to finish. + + This method takes an iterable of coroutine functions. The first one is + started immediately. From then on, whenever the immediately preceding one + fails (raises an exception), or when *delay* seconds has passed, the next + coroutine is started. This continues until one of the coroutines complete + successfully, in which case all others are cancelled, or until all + coroutines fail. + + The coroutines provided should be well-behaved in the following way: + + * They should only ``return`` if completed successfully. + + * They should always raise an exception if they did not complete + successfully. In particular, if they handle cancellation, they should + probably reraise, like this:: + + try: + # do work + except asyncio.CancelledError: + # undo partially completed work + raise + + Args: + coro_fns: an iterable of coroutine functions, i.e. callables that + return a coroutine object when called. Use ``functools.partial`` or + lambdas to pass arguments. + + delay: amount of time, in seconds, between starting coroutines. If + ``None``, the coroutines will run sequentially. + + loop: the event loop to use. + + Returns: + tuple *(winner_result, winner_index, exceptions)* where + + - *winner_result*: the result of the winning coroutine, or ``None`` + if no coroutines won. + + - *winner_index*: the index of the winning coroutine in + ``coro_fns``, or ``None`` if no coroutines won. If the winning + coroutine may return None on success, *winner_index* can be used + to definitively determine whether any coroutine won. + + - *exceptions*: list of exceptions returned by the coroutines. + ``len(exceptions)`` is equal to the number of coroutines actually + started, and the order is the same as in ``coro_fns``. The winning + coroutine's entry is ``None``. + + 'u'Run coroutines with staggered start times and take the first to finish. + + This method takes an iterable of coroutine functions. The first one is + started immediately. From then on, whenever the immediately preceding one + fails (raises an exception), or when *delay* seconds has passed, the next + coroutine is started. This continues until one of the coroutines complete + successfully, in which case all others are cancelled, or until all + coroutines fail. + + The coroutines provided should be well-behaved in the following way: + + * They should only ``return`` if completed successfully. + + * They should always raise an exception if they did not complete + successfully. In particular, if they handle cancellation, they should + probably reraise, like this:: + + try: + # do work + except asyncio.CancelledError: + # undo partially completed work + raise + + Args: + coro_fns: an iterable of coroutine functions, i.e. callables that + return a coroutine object when called. Use ``functools.partial`` or + lambdas to pass arguments. + + delay: amount of time, in seconds, between starting coroutines. If + ``None``, the coroutines will run sequentially. + + loop: the event loop to use. + + Returns: + tuple *(winner_result, winner_index, exceptions)* where + + - *winner_result*: the result of the winning coroutine, or ``None`` + if no coroutines won. + + - *winner_index*: the index of the winning coroutine in + ``coro_fns``, or ``None`` if no coroutines won. If the winning + coroutine may return None on success, *winner_index* can be used + to definitively determine whether any coroutine won. + + - *exceptions*: list of exceptions returned by the coroutines. + ``len(exceptions)`` is equal to the number of coroutines actually + started, and the order is the same as in ``coro_fns``. The winning + coroutine's entry is ``None``. + + 'u'asyncio.staggered'u'staggered'Constants/functions for interpreting results of os.stat() and os.lstat(). + +Suggested usage: from stat import * +Return the portion of the file's mode that can be set by + os.chmod(). + 0o7777Return the portion of the file's mode that describes the + file type. + 0o0200000o0600000o0100000o1200000o140000Return True if mode is from a directory.Return True if mode is from a character special device file.Return True if mode is from a block special device file.Return True if mode is from a regular file.Return True if mode is from a FIFO (named pipe).Return True if mode is from a symbolic link.Return True if mode is from a socket.Return True if mode is from a door.Return True if mode is from an event port.Return True if mode is from a whiteout.0o40000o20000o10000o04000o02000o01000o07000o00700o00400o00200o00100o00070o00040o00020o00010x000000010x000000020x000000040x000000080x000000100x000000200x000080000x000100000x000200000x000400000x001000000x00200000_filemode_tableConvert a file's mode to a string of the form '-rwxrwxrwx'.FILE_ATTRIBUTE_ARCHIVEFILE_ATTRIBUTE_COMPRESSEDFILE_ATTRIBUTE_DEVICEFILE_ATTRIBUTE_DIRECTORYFILE_ATTRIBUTE_ENCRYPTEDFILE_ATTRIBUTE_HIDDENFILE_ATTRIBUTE_INTEGRITY_STREAMFILE_ATTRIBUTE_NORMALFILE_ATTRIBUTE_NOT_CONTENT_INDEXEDFILE_ATTRIBUTE_NO_SCRUB_DATAFILE_ATTRIBUTE_OFFLINEFILE_ATTRIBUTE_READONLYFILE_ATTRIBUTE_SPARSE_FILEFILE_ATTRIBUTE_SYSTEMFILE_ATTRIBUTE_TEMPORARYFILE_ATTRIBUTE_VIRTUAL# Indices for stat struct members in the tuple returned by os.stat()# Extract bits from the mode# Constants used as S_IFMT() for various file types# (not all are implemented on all systems)# directory# character device# block device# regular file# fifo (named pipe)# symbolic link# socket file# Fallbacks for uncommon platform-specific constants# Functions to test for each file type# Names for permission bits# set UID bit# set GID bit# file locking enforcement# sticky bit# Unix V7 synonym for S_IRUSR# Unix V7 synonym for S_IWUSR# Unix V7 synonym for S_IXUSR# mask for owner permissions# read by owner# write by owner# execute by owner# mask for group permissions# read by group# write by group# execute by group# mask for others (not in group) permissions# read by others# write by others# execute by others# Names for file flags# do not dump file# file may not be changed# file may only be appended to# directory is opaque when viewed through a union stack# file may not be renamed or deleted# OS X: file is hfs-compressed# OS X: file should not be displayed# file may be archived# file is a snapshot file# Must appear before IFREG and IFDIR as IFSOCK == IFREG | IFDIR# Windows FILE_ATTRIBUTE constants for interpreting os.stat()'s# "st_file_attributes" memberb'Constants/functions for interpreting results of os.stat() and os.lstat(). + +Suggested usage: from stat import * +'u'Constants/functions for interpreting results of os.stat() and os.lstat(). + +Suggested usage: from stat import * +'b'Return the portion of the file's mode that can be set by + os.chmod(). + 'u'Return the portion of the file's mode that can be set by + os.chmod(). + 'b'Return the portion of the file's mode that describes the + file type. + 'u'Return the portion of the file's mode that describes the + file type. + 'b'Return True if mode is from a directory.'u'Return True if mode is from a directory.'b'Return True if mode is from a character special device file.'u'Return True if mode is from a character special device file.'b'Return True if mode is from a block special device file.'u'Return True if mode is from a block special device file.'b'Return True if mode is from a regular file.'u'Return True if mode is from a regular file.'b'Return True if mode is from a FIFO (named pipe).'u'Return True if mode is from a FIFO (named pipe).'b'Return True if mode is from a symbolic link.'u'Return True if mode is from a symbolic link.'b'Return True if mode is from a socket.'u'Return True if mode is from a socket.'b'Return True if mode is from a door.'u'Return True if mode is from a door.'b'Return True if mode is from an event port.'u'Return True if mode is from an event port.'b'Return True if mode is from a whiteout.'u'Return True if mode is from a whiteout.'b'Convert a file's mode to a string of the form '-rwxrwxrwx'.'u'Convert a file's mode to a string of the form '-rwxrwxrwx'.'StreamReaderProtocolopen_connectionopen_unix_connectionstart_unix_server_DEFAULT_LIMITA wrapper for create_connection() returning a (reader, writer) pair. + + The reader returned is a StreamReader instance; the writer is a + StreamWriter instance. + + The arguments are all the usual arguments to create_connection() + except protocol_factory; most common are positional host and port, + with various optional keyword arguments following. + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + (If you want to customize the StreamReader and/or + StreamReaderProtocol classes, just copy the code -- there's + really nothing special here except some convenience.) + client_connected_cbStart a socket server, call back for each client connected. + + The first parameter, `client_connected_cb`, takes two parameters: + client_reader, client_writer. client_reader is a StreamReader + object, while client_writer is a StreamWriter object. This + parameter can either be a plain callback function or a coroutine; + if it is a coroutine, it will be automatically converted into a + Task. + + The rest of the arguments are all the usual arguments to + loop.create_server() except protocol_factory; most common are + positional host and port, with various optional keyword arguments + following. The return value is the same as loop.create_server(). + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + The return value is the same as loop.create_server(), i.e. a + Server object which can be used to stop the service. + Similar to `open_connection` but works with UNIX Domain Sockets.Similar to `start_server` but works with UNIX Domain Sockets.FlowControlMixinReusable flow control logic for StreamWriter.drain(). + + This implements the protocol methods pause_writing(), + resume_writing() and connection_lost(). If the subclass overrides + these it must call the super methods. + + StreamWriter.drain() must wait for _drain_helper() coroutine. + _drain_waiter_connection_lost%r pauses writing%r resumes writing_drain_helperConnection lost_get_close_waiterHelper class to adapt between Protocol and StreamReader. + + (This is a helper class instead of making StreamReader itself a + Protocol subclass, because the StreamReader has other potential + uses, and to prevent the user of the StreamReader to accidentally + call inappropriate methods of the protocol.) + stream_reader_stream_reader_wr_strong_reader_reject_connection_stream_writer_client_connected_cb_over_ssl_stream_readerAn open stream was garbage collected prior to establishing network connection; call "stream.close()" explicitly.'An open stream was garbage collected prior to ''establishing network connection; ''call "stream.close()" explicitly.'set_transportfeed_dataWraps a Transport. + + This exposes write(), writelines(), [can_]write_eof(), + get_extra_info() and close(). It adds drain() which returns an + optional Future on which you can wait for flow control. It also + adds a transport property which references the Transport + directly. + _complete_futtransport=reader=Flush the write buffer. + + The intended use is to write + + w.write(data) + await w.drain() + Limit cannot be <= 0_limit byteslimit=waiter=pausedWakeup read*() functions waiting for data or EOF.Transport already set_maybe_resume_transportat_eofReturn True if the buffer is empty and 'feed_eof' was called.feed_data after feed_eof_wait_for_dataWait until feed_data() or feed_eof() is called. + + If stream was paused, automatically resume it. + () called while another coroutine is already waiting for incoming data'() called while another coroutine is ''already waiting for incoming data'_wait_for_data after EOFRead chunk of data from the stream until newline (b' +') is found. + + On success, return chunk that ends with newline. If only partial + line can be read due to EOF, return incomplete line without + terminating newline. When EOF was reached while no bytes read, empty + bytes object is returned. + + If limit is reached, ValueError will be raised. In that case, if + newline was found, complete line including newline will be removed + from internal buffer. Else, internal buffer will be cleared. Limit is + compared against part of the line without newline. + + If stream was paused, this function will automatically resume it if + needed. + seplenreaduntilRead data from the stream until ``separator`` is found. + + On success, the data and separator will be removed from the + internal buffer (consumed). Returned data will include the + separator at the end. + + Configured stream limit is used to check result. Limit sets the + maximal length of data that can be returned, not counting the + separator. + + If an EOF occurs and the complete separator is still not found, + an IncompleteReadError exception will be raised, and the internal + buffer will be reset. The IncompleteReadError.partial attribute + may contain the separator partially. + + If the data cannot be read because of over limit, a + LimitOverrunError exception will be raised, and the data + will be left in the internal buffer, so it can be read again. + Separator should be at least one-byte stringisepSeparator is not found, and chunk exceed the limitSeparator is found, but chunk is longer than limitRead up to `n` bytes from the stream. + + If n is not provided, or set to -1, read until EOF and return all read + bytes. If the EOF was received and the internal buffer is empty, return + an empty bytes object. + + If n is zero, return empty bytes object immediately. + + If n is positive, this function try to read `n` bytes, and may return + less or equal bytes than requested, but at least one byte. If EOF was + received before any byte is read, this function returns empty byte + object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + blocksreadexactlyRead exactly `n` bytes. + + Raise an IncompleteReadError if EOF is reached before `n` bytes can be + read. The IncompleteReadError.partial attribute of the exception will + contain the partial read bytes. + + if n is zero, return empty bytes object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + readexactly size can not be less than zeroincomplete# 64 KiB# UNIX Domain Sockets are supported on this platform# Wake up the writer if currently paused.# This is a stream created by the `create_server()` function.# Keep a strong reference to the reader until a connection# is established.# Prevent a warning in SSLProtocol.eof_received:# "returning true from eof_received()# has no effect when using ssl"# Prevent reports about unhandled exceptions.# Better than self._closed._log_traceback = False hack# drain() expects that the reader has an exception() method# Wait for protocol.connection_lost() call# Raise connection closing error if any,# ConnectionResetError otherwise# Yield to the event loop so connection_lost() may be# called. Without this, _drain_helper() would return# immediately, and code that calls# write(...); await drain()# in a loop would never call connection_lost(), so it# would not see an error when the socket is closed.# The line length limit is a security feature;# it also doubles as half the buffer limit.# Whether we're done.# A future used by _wait_for_data()# The transport can't be paused.# We'll just have to buffer all data.# Forget the transport so we don't keep trying.# StreamReader uses a future to link the protocol feed_data() method# to a read coroutine. Running two read coroutines at the same time# would have an unexpected behaviour. It would not possible to know# which coroutine would get the next data.# Waiting for data while paused will make deadlock, so prevent it.# This is essential for readexactly(n) for case when n > self._limit.# Consume whole buffer except last bytes, which length is# one less than seplen. Let's check corner cases with# separator='SEPARATOR':# * we have received almost complete separator (without last# byte). i.e buffer='some textSEPARATO'. In this case we# can safely consume len(separator) - 1 bytes.# * last byte of buffer is first byte of separator, i.e.# buffer='abcdefghijklmnopqrS'. We may safely consume# everything except that last byte, but this require to# analyze bytes of buffer that match partial separator.# This is slow and/or require FSM. For this case our# implementation is not optimal, since require rescanning# of data that is known to not belong to separator. In# real world, separator will not be so long to notice# performance problems. Even when reading MIME-encoded# messages :)# `offset` is the number of bytes from the beginning of the buffer# where there is no occurrence of `separator`.# Loop until we find `separator` in the buffer, exceed the buffer size,# or an EOF has happened.# Check if we now have enough data in the buffer for `separator` to# fit.# `separator` is in the buffer. `isep` will be used later# to retrieve the data.# see upper comment for explanation.# Complete message (with full separator) may be present in buffer# even when EOF flag is set. This may happen when the last chunk# adds data which makes separator be found. That's why we check for# EOF *ater* inspecting the buffer.# _wait_for_data() will resume reading if stream was paused.# This used to just loop creating a new waiter hoping to# collect everything in self._buffer, but that would# deadlock if the subprocess sends more than self.limit# bytes. So just call self.read(self._limit) until EOF.# This will work right even if buffer is less than n bytesb'StreamReaderProtocol'u'StreamReaderProtocol'b'open_connection'u'open_connection'b'start_server'u'start_server'b'open_unix_connection'u'open_unix_connection'b'start_unix_server'u'start_unix_server'b'A wrapper for create_connection() returning a (reader, writer) pair. + + The reader returned is a StreamReader instance; the writer is a + StreamWriter instance. + + The arguments are all the usual arguments to create_connection() + except protocol_factory; most common are positional host and port, + with various optional keyword arguments following. + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + (If you want to customize the StreamReader and/or + StreamReaderProtocol classes, just copy the code -- there's + really nothing special here except some convenience.) + 'u'A wrapper for create_connection() returning a (reader, writer) pair. + + The reader returned is a StreamReader instance; the writer is a + StreamWriter instance. + + The arguments are all the usual arguments to create_connection() + except protocol_factory; most common are positional host and port, + with various optional keyword arguments following. + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + (If you want to customize the StreamReader and/or + StreamReaderProtocol classes, just copy the code -- there's + really nothing special here except some convenience.) + 'b'Start a socket server, call back for each client connected. + + The first parameter, `client_connected_cb`, takes two parameters: + client_reader, client_writer. client_reader is a StreamReader + object, while client_writer is a StreamWriter object. This + parameter can either be a plain callback function or a coroutine; + if it is a coroutine, it will be automatically converted into a + Task. + + The rest of the arguments are all the usual arguments to + loop.create_server() except protocol_factory; most common are + positional host and port, with various optional keyword arguments + following. The return value is the same as loop.create_server(). + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + The return value is the same as loop.create_server(), i.e. a + Server object which can be used to stop the service. + 'u'Start a socket server, call back for each client connected. + + The first parameter, `client_connected_cb`, takes two parameters: + client_reader, client_writer. client_reader is a StreamReader + object, while client_writer is a StreamWriter object. This + parameter can either be a plain callback function or a coroutine; + if it is a coroutine, it will be automatically converted into a + Task. + + The rest of the arguments are all the usual arguments to + loop.create_server() except protocol_factory; most common are + positional host and port, with various optional keyword arguments + following. The return value is the same as loop.create_server(). + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + The return value is the same as loop.create_server(), i.e. a + Server object which can be used to stop the service. + 'b'Similar to `open_connection` but works with UNIX Domain Sockets.'u'Similar to `open_connection` but works with UNIX Domain Sockets.'b'Similar to `start_server` but works with UNIX Domain Sockets.'u'Similar to `start_server` but works with UNIX Domain Sockets.'b'Reusable flow control logic for StreamWriter.drain(). + + This implements the protocol methods pause_writing(), + resume_writing() and connection_lost(). If the subclass overrides + these it must call the super methods. + + StreamWriter.drain() must wait for _drain_helper() coroutine. + 'u'Reusable flow control logic for StreamWriter.drain(). + + This implements the protocol methods pause_writing(), + resume_writing() and connection_lost(). If the subclass overrides + these it must call the super methods. + + StreamWriter.drain() must wait for _drain_helper() coroutine. + 'b'%r pauses writing'u'%r pauses writing'b'%r resumes writing'u'%r resumes writing'b'Connection lost'u'Connection lost'b'Helper class to adapt between Protocol and StreamReader. + + (This is a helper class instead of making StreamReader itself a + Protocol subclass, because the StreamReader has other potential + uses, and to prevent the user of the StreamReader to accidentally + call inappropriate methods of the protocol.) + 'u'Helper class to adapt between Protocol and StreamReader. + + (This is a helper class instead of making StreamReader itself a + Protocol subclass, because the StreamReader has other potential + uses, and to prevent the user of the StreamReader to accidentally + call inappropriate methods of the protocol.) + 'b'An open stream was garbage collected prior to establishing network connection; call "stream.close()" explicitly.'u'An open stream was garbage collected prior to establishing network connection; call "stream.close()" explicitly.'b'sslcontext'u'sslcontext'b'Wraps a Transport. + + This exposes write(), writelines(), [can_]write_eof(), + get_extra_info() and close(). It adds drain() which returns an + optional Future on which you can wait for flow control. It also + adds a transport property which references the Transport + directly. + 'u'Wraps a Transport. + + This exposes write(), writelines(), [can_]write_eof(), + get_extra_info() and close(). It adds drain() which returns an + optional Future on which you can wait for flow control. It also + adds a transport property which references the Transport + directly. + 'b'transport='u'transport='b'reader='u'reader='b'Flush the write buffer. + + The intended use is to write + + w.write(data) + await w.drain() + 'u'Flush the write buffer. + + The intended use is to write + + w.write(data) + await w.drain() + 'b'Limit cannot be <= 0'u'Limit cannot be <= 0'b' bytes'u' bytes'b'eof'u'eof'b'limit='u'limit='b'waiter='u'waiter='b'paused'u'paused'b'Wakeup read*() functions waiting for data or EOF.'u'Wakeup read*() functions waiting for data or EOF.'b'Transport already set'u'Transport already set'b'Return True if the buffer is empty and 'feed_eof' was called.'u'Return True if the buffer is empty and 'feed_eof' was called.'b'feed_data after feed_eof'u'feed_data after feed_eof'b'Wait until feed_data() or feed_eof() is called. + + If stream was paused, automatically resume it. + 'u'Wait until feed_data() or feed_eof() is called. + + If stream was paused, automatically resume it. + 'b'() called while another coroutine is already waiting for incoming data'u'() called while another coroutine is already waiting for incoming data'b'_wait_for_data after EOF'u'_wait_for_data after EOF'b'Read chunk of data from the stream until newline (b' +') is found. + + On success, return chunk that ends with newline. If only partial + line can be read due to EOF, return incomplete line without + terminating newline. When EOF was reached while no bytes read, empty + bytes object is returned. + + If limit is reached, ValueError will be raised. In that case, if + newline was found, complete line including newline will be removed + from internal buffer. Else, internal buffer will be cleared. Limit is + compared against part of the line without newline. + + If stream was paused, this function will automatically resume it if + needed. + 'u'Read chunk of data from the stream until newline (b' +') is found. + + On success, return chunk that ends with newline. If only partial + line can be read due to EOF, return incomplete line without + terminating newline. When EOF was reached while no bytes read, empty + bytes object is returned. + + If limit is reached, ValueError will be raised. In that case, if + newline was found, complete line including newline will be removed + from internal buffer. Else, internal buffer will be cleared. Limit is + compared against part of the line without newline. + + If stream was paused, this function will automatically resume it if + needed. + 'b'Read data from the stream until ``separator`` is found. + + On success, the data and separator will be removed from the + internal buffer (consumed). Returned data will include the + separator at the end. + + Configured stream limit is used to check result. Limit sets the + maximal length of data that can be returned, not counting the + separator. + + If an EOF occurs and the complete separator is still not found, + an IncompleteReadError exception will be raised, and the internal + buffer will be reset. The IncompleteReadError.partial attribute + may contain the separator partially. + + If the data cannot be read because of over limit, a + LimitOverrunError exception will be raised, and the data + will be left in the internal buffer, so it can be read again. + 'u'Read data from the stream until ``separator`` is found. + + On success, the data and separator will be removed from the + internal buffer (consumed). Returned data will include the + separator at the end. + + Configured stream limit is used to check result. Limit sets the + maximal length of data that can be returned, not counting the + separator. + + If an EOF occurs and the complete separator is still not found, + an IncompleteReadError exception will be raised, and the internal + buffer will be reset. The IncompleteReadError.partial attribute + may contain the separator partially. + + If the data cannot be read because of over limit, a + LimitOverrunError exception will be raised, and the data + will be left in the internal buffer, so it can be read again. + 'b'Separator should be at least one-byte string'u'Separator should be at least one-byte string'b'Separator is not found, and chunk exceed the limit'u'Separator is not found, and chunk exceed the limit'b'readuntil'u'readuntil'b'Separator is found, but chunk is longer than limit'u'Separator is found, but chunk is longer than limit'b'Read up to `n` bytes from the stream. + + If n is not provided, or set to -1, read until EOF and return all read + bytes. If the EOF was received and the internal buffer is empty, return + an empty bytes object. + + If n is zero, return empty bytes object immediately. + + If n is positive, this function try to read `n` bytes, and may return + less or equal bytes than requested, but at least one byte. If EOF was + received before any byte is read, this function returns empty byte + object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + 'u'Read up to `n` bytes from the stream. + + If n is not provided, or set to -1, read until EOF and return all read + bytes. If the EOF was received and the internal buffer is empty, return + an empty bytes object. + + If n is zero, return empty bytes object immediately. + + If n is positive, this function try to read `n` bytes, and may return + less or equal bytes than requested, but at least one byte. If EOF was + received before any byte is read, this function returns empty byte + object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + 'b'Read exactly `n` bytes. + + Raise an IncompleteReadError if EOF is reached before `n` bytes can be + read. The IncompleteReadError.partial attribute of the exception will + contain the partial read bytes. + + if n is zero, return empty bytes object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + 'u'Read exactly `n` bytes. + + Raise an IncompleteReadError if EOF is reached before `n` bytes can be + read. The IncompleteReadError.partial attribute of the exception will + contain the partial read bytes. + + if n is zero, return empty bytes object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + 'b'readexactly size can not be less than zero'u'readexactly size can not be less than zero'b'readexactly'u'readexactly'u'asyncio.streams'u'streams'A collection of string constants. + +Public module variables: + +whitespace -- a string containing all ASCII whitespace +ascii_lowercase -- a string containing all ASCII lowercase letters +ascii_uppercase -- a string containing all ASCII uppercase letters +ascii_letters -- a string containing all ASCII letters +digits -- a string containing all ASCII decimal digits +hexdigits -- a string containing all ASCII hexadecimal digits +octdigits -- a string containing all ASCII octal digits +punctuation -- a string containing all ASCII punctuation characters +printable -- a string containing all ASCII characters considered printable + +ascii_lowercaseascii_uppercasecapwordsoctdigitsprintableabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefABCDEFcapwords(s [,sep]) -> string + + Split the argument into words using split, capitalize each + word using capitalize, and join the capitalized words using + join. If the optional second argument sep is absent or None, + runs of whitespace characters are replaced by a single space + and leading and trailing whitespace are removed, otherwise + sep is used to split and join the words. + + _ChainMap_sentinel_dict_TemplateMetaclass + %(delim)s(?: + (?P%(delim)s) | # Escape sequence of two delimiters + (?P%(id)s) | # delimiter and a Python identifier + {(?P%(bid)s)} | # delimiter and a braced identifier + (?P) # Other ill-formed delimiter exprs + ) + delimiteridpatternbraceidpatternbidA string class for supporting $-substitutions.(?a:[_a-z][_a-z0-9]*)_invalidInvalid placeholder in string: line %d, col %dUnrecognized named group in patternsafe_substitutevformatused_args_vformatcheck_unused_argsrecursion_depthauto_arg_indexMax string recursion exceededliteral_textcannot switch from manual field specification to automatic field numbering'cannot switch from manual field ''specification to automatic field ''numbering'get_fieldarg_usedconvert_fieldformat_fieldget_valueUnknown conversion specifier {0!s}is_attr# Some strings for ctype-style character classification# Functions which aren't available as string methods.# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".##################################################################### r'[a-z]' matches to non-ASCII letters when used with IGNORECASE, but# without the ASCII flag. We can't add re.ASCII to flags because of# backward compatibility. So we use the ?a local flag and [a-z] pattern.# See https://bugs.python.org/issue31672# Search for $$, $identifier, ${identifier}, and any bare $'s# Helper function for .sub()# Check the most common path first.# the Formatter class# see PEP 3101 for details and purpose of this class# The hard parts are reused from the C implementation. They're exposed as "_"# prefixed methods of str.# The overall parser is implemented in _string.formatter_parser.# The field name parser is implemented in _string.formatter_field_name_split# output the literal text# if there's a field, output it# this is some markup, find the object and do# the formatting# handle arg indexing when empty field_names are given.# disable auto arg incrementing, if it gets# used later on, then an exception will be raised# given the field_name, find the object it references# and the argument it came from# do any conversion on the resulting object# expand the format spec, if needed# format the object and append to the result# returns an iterable that contains tuples of the form:# (literal_text, field_name, format_spec, conversion)# literal_text can be zero length# field_name can be None, in which case there's no# object to format and output# if field_name is not None, it is looked up, formatted# with format_spec and conversion and then used# given a field_name, find the object it references.# field_name: the field being looked up, e.g. "0.name"# or "lookup[3]"# used_args: a set of which args have been used# args, kwargs: as passed in to vformat# loop through the rest of the field_name, doing# getattr or getitem as neededb'A collection of string constants. + +Public module variables: + +whitespace -- a string containing all ASCII whitespace +ascii_lowercase -- a string containing all ASCII lowercase letters +ascii_uppercase -- a string containing all ASCII uppercase letters +ascii_letters -- a string containing all ASCII letters +digits -- a string containing all ASCII decimal digits +hexdigits -- a string containing all ASCII hexadecimal digits +octdigits -- a string containing all ASCII octal digits +punctuation -- a string containing all ASCII punctuation characters +printable -- a string containing all ASCII characters considered printable + +'u'A collection of string constants. + +Public module variables: + +whitespace -- a string containing all ASCII whitespace +ascii_lowercase -- a string containing all ASCII lowercase letters +ascii_uppercase -- a string containing all ASCII uppercase letters +ascii_letters -- a string containing all ASCII letters +digits -- a string containing all ASCII decimal digits +hexdigits -- a string containing all ASCII hexadecimal digits +octdigits -- a string containing all ASCII octal digits +punctuation -- a string containing all ASCII punctuation characters +printable -- a string containing all ASCII characters considered printable + +'b'ascii_letters'u'ascii_letters'b'ascii_lowercase'u'ascii_lowercase'b'ascii_uppercase'u'ascii_uppercase'b'capwords'u'capwords'b'digits'b'hexdigits'u'hexdigits'b'octdigits'u'octdigits'b'printable'u'printable'b'punctuation'u'punctuation'b'whitespace'u'whitespace'b'Template'u'Template'u'abcdefghijklmnopqrstuvwxyz'u'ABCDEFGHIJKLMNOPQRSTUVWXYZ'b'abcdef'u'abcdef'b'ABCDEF'u'ABCDEF'b'capwords(s [,sep]) -> string + + Split the argument into words using split, capitalize each + word using capitalize, and join the capitalized words using + join. If the optional second argument sep is absent or None, + runs of whitespace characters are replaced by a single space + and leading and trailing whitespace are removed, otherwise + sep is used to split and join the words. + + 'u'capwords(s [,sep]) -> string + + Split the argument into words using split, capitalize each + word using capitalize, and join the capitalized words using + join. If the optional second argument sep is absent or None, + runs of whitespace characters are replaced by a single space + and leading and trailing whitespace are removed, otherwise + sep is used to split and join the words. + + 'b' + %(delim)s(?: + (?P%(delim)s) | # Escape sequence of two delimiters + (?P%(id)s) | # delimiter and a Python identifier + {(?P%(bid)s)} | # delimiter and a braced identifier + (?P) # Other ill-formed delimiter exprs + ) + 'u' + %(delim)s(?: + (?P%(delim)s) | # Escape sequence of two delimiters + (?P%(id)s) | # delimiter and a Python identifier + {(?P%(bid)s)} | # delimiter and a braced identifier + (?P) # Other ill-formed delimiter exprs + ) + 'b'delim'u'delim'b'bid'u'bid'b'A string class for supporting $-substitutions.'u'A string class for supporting $-substitutions.'b'(?a:[_a-z][_a-z0-9]*)'u'(?a:[_a-z][_a-z0-9]*)'b'invalid'u'invalid'b'Invalid placeholder in string: line %d, col %d'u'Invalid placeholder in string: line %d, col %d'b'escaped'u'escaped'b'Unrecognized named group in pattern'u'Unrecognized named group in pattern'b'Max string recursion exceeded'u'Max string recursion exceeded'b'cannot switch from manual field specification to automatic field numbering'u'cannot switch from manual field specification to automatic field numbering'b'Unknown conversion specifier {0!s}'u'Unknown conversion specifier {0!s}'b'calcsize'u'calcsize'b'pack_into'u'pack_into'b'unpack'u'unpack'b'unpack_from'u'unpack_from'b'iter_unpack'u'iter_unpack'b'Struct'u'Struct'Subprocesses with accessible I/O streams + +This module allows you to spawn processes, connect to their +input/output/error pipes, and obtain their return codes. + +For a complete description of this module see the Python documentation. + +Main API +======== +run(...): Runs a command, waits for it to complete, then returns a + CompletedProcess instance. +Popen(...): A class for flexibly executing a command in a new process + +Constants +--------- +DEVNULL: Special value that indicates that os.devnull should be used +PIPE: Special value that indicates a pipe should be created +STDOUT: Special value that indicates that stderr should go to stdout + + +Older API +========= +call(...): Runs a command, waits for it to complete, then returns + the return code. +check_call(...): Same as call() but raises CalledProcessError() + if return code is not 0 +check_output(...): Same as check_call() but returns the contents of + stdout instead of a return code +getoutput(...): Runs a command in the shell, waits for it to complete, + then returns the output +getstatusoutput(...): Runs a command in the shell, waits for it to complete, + then returns a (exitcode, output) tuple +check_callgetstatusoutputgetoutputSubprocessErrorTimeoutExpiredCompletedProcess_mswindowsCREATE_NEW_CONSOLECREATE_NEW_PROCESS_GROUPSTD_INPUT_HANDLESTD_OUTPUT_HANDLESTD_ERROR_HANDLESW_HIDESTARTF_USESTDHANDLESSTARTF_USESHOWWINDOWABOVE_NORMAL_PRIORITY_CLASSBELOW_NORMAL_PRIORITY_CLASSHIGH_PRIORITY_CLASSIDLE_PRIORITY_CLASSNORMAL_PRIORITY_CLASSREALTIME_PRIORITY_CLASSCREATE_NO_WINDOWDETACHED_PROCESSCREATE_DEFAULT_ERROR_MODECREATE_BREAKAWAY_FROM_JOBSTARTUPINFORaised when run() is called with check=True and the process + returns a non-zero exit status. + + Attributes: + cmd, returncode, stdout, stderr, output + Command '%s' died with %r.Command '%s' died with unknown signal %d.Command '%s' returned non-zero exit status %d.Alias for output attribute, to match stderrThis exception is raised when the timeout expires while waiting for a + child process. + + Attributes: + cmd, output, stdout, stderr, timeout + Command '%s' timed out after %s secondshStdInputhStdOutputhStdErrorwShowWindowlpAttributeListhandle_listDetachalready closed%s(%d)_PIPE_BUF_PopenSelector_active_internal_poll_deadstateReturn a list of command-line arguments reproducing the current + settings in sys.flags, sys.warnoptions and sys._xoptions.flag_opt_map-I-Ewarnoptsxoptionserror::BytesWarningdefault::BytesWarning-WimporttimeshowalloccountshowrefcountpopenargsRun command with arguments. Wait for command to complete or + timeout, then return the returncode attribute. + + The arguments are the same as for the Popen constructor. Example: + + retcode = call(["ls", "-l"]) + Run command with arguments. Wait for command to complete. If + the exit code was zero then return, otherwise raise + CalledProcessError. The CalledProcessError object will have the + return code in the returncode attribute. + + The arguments are the same as for the call function. Example: + + check_call(["ls", "-l"]) + retcodeRun command with arguments and return its output. + + If the exit code was non-zero it raises a CalledProcessError. The + CalledProcessError object will have the return code in the returncode + attribute and output in the output attribute. + + The arguments are the same as for the Popen constructor. Example: + + >>> check_output(["ls", "-l", "/dev/null"]) + b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' + + The stdout argument is not allowed as it is used internally. + To capture standard error in the result, use stderr=STDOUT. + + >>> check_output(["/bin/sh", "-c", + ... "ls -l non_existent_file ; exit 0"], + ... stderr=STDOUT) + b'ls: non_existent_file: No such file or directory\n' + + There is an additional optional argument, "input", allowing you to + pass a string to the subprocess's stdin. If you use this argument + you may not also use the Popen constructor's "stdin" argument, as + it too will be used internally. Example: + + >>> check_output(["sed", "-e", "s/foo/bar/"], + ... input=b"when in the course of fooman events\n") + b'when in the course of barman events\n' + + By default, all communication is in bytes, and therefore any "input" + should be bytes, and the return value will be bytes. If in text mode, + any "input" should be a string, and the return value will be a string + decoded according to locale encoding, or by "encoding" if set. Text mode + is triggered by setting any of text, encoding, errors or universal_newlines. + stdout argument not allowed, it will be overridden.A process that has finished running. + + This is returned by run(). + + Attributes: + args: The list or str args passed to run(). + returncode: The exit code of the process, negative for signals. + stdout: The standard output (None if not captured). + stderr: The standard error (None if not captured). + args={!r}returncode={!r}stdout={!r}stderr={!r}check_returncodeRaise CalledProcessError if the exit code is non-zero.Run command with arguments and return a CompletedProcess instance. + + The returned instance will have attributes args, returncode, stdout and + stderr. By default, stdout and stderr are not captured, and those attributes + will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them. + + If check is True and the exit code was non-zero, it raises a + CalledProcessError. The CalledProcessError object will have the return code + in the returncode attribute, and output & stderr attributes if those streams + were captured. + + If timeout is given, and the process takes too long, a TimeoutExpired + exception will be raised. + + There is an optional argument "input", allowing you to + pass bytes or a string to the subprocess's stdin. If you use this argument + you may not also use the Popen constructor's "stdin" argument, as + it will be used internally. + + By default, all communication is in bytes, and therefore any "input" should + be bytes, and the stdout and stderr will be bytes. If in text mode, any + "input" should be a string, and stdout and stderr will be strings decoded + according to locale encoding, or by "encoding" if set. Text mode is + triggered by setting any of text, encoding, errors or universal_newlines. + + The other arguments are the same as for the Popen constructor. + stdin and input arguments may not both be used.stdout and stderr arguments may not be used with capture_output.'stdout and stderr arguments may not be used ''with capture_output.' + Translate a sequence of arguments into a command line + string, using the same rules as the MS C runtime: + + 1) Arguments are delimited by white space, which is either a + space or a tab. + + 2) A string surrounded by double quotation marks is + interpreted as a single argument, regardless of white space + contained within. A quoted string can be embedded in an + argument. + + 3) A double quotation mark preceded by a backslash is + interpreted as a literal double quotation mark. + + 4) Backslashes are interpreted literally, unless they + immediately precede a double quotation mark. + + 5) If backslashes immediately precede a double quotation mark, + every pair of backslashes is interpreted as a literal + backslash. If the number of backslashes is odd, the last + backslash escapes the next double quotation mark as + described in rule 3. + needquotebs_bufReturn (exitcode, output) of executing cmd in a shell. + + Execute the string 'cmd' in a shell with 'check_output' and + return a 2-tuple (status, output). The locale encoding is used + to decode the output and process newlines. + + A trailing newline is stripped from the output. + The exit status for the command can be interpreted + according to the rules for the function 'wait'. Example: + + >>> import subprocess + >>> subprocess.getstatusoutput('ls /bin/ls') + (0, '/bin/ls') + >>> subprocess.getstatusoutput('cat /bin/junk') + (1, 'cat: /bin/junk: No such file or directory') + >>> subprocess.getstatusoutput('/bin/junk') + (127, 'sh: /bin/junk: not found') + >>> subprocess.getstatusoutput('/bin/kill $$') + (-15, '') + Return output (stdout or stderr) of executing cmd in a shell. + + Like getstatusoutput(), except the exit status is ignored and the return + value is a string containing the command's output. Example: + + >>> import subprocess + >>> subprocess.getoutput('ls /bin/ls') + '/bin/ls' + _use_posix_spawnCheck if posix_spawn() can be used for subprocess. + + subprocess requires a posix_spawn() implementation that properly reports + errors to the parent process, & sets errno on the following failures: + + * Process attribute actions failed. + * File actions failed. + * exec() failed. + + Prefer an implementation which can use vfork() in some cases for best + performance. + _USE_POSIX_SPAWN Execute a child program in a new process. + + For a complete description of the arguments see the Python documentation. + + Arguments: + args: A string, or a sequence of program arguments. + + bufsize: supplied as the buffering argument to the open() function when + creating the stdin/stdout/stderr pipe file objects + + executable: A replacement program to execute. + + stdin, stdout and stderr: These specify the executed programs' standard + input, standard output and standard error file handles, respectively. + + preexec_fn: (POSIX only) An object to be called in the child process + just before the child is executed. + + close_fds: Controls closing or inheriting of file descriptors. + + shell: If true, the command will be executed through the shell. + + cwd: Sets the current directory before the child is executed. + + env: Defines the environment variables for the new process. + + text: If true, decode stdin, stdout and stderr using the given encoding + (if set) or the system default otherwise. + + universal_newlines: Alias of text, provided for backwards compatibility. + + startupinfo and creationflags (Windows only) + + restore_signals (POSIX only) + + start_new_session (POSIX only) + + pass_fds (POSIX only) + + encoding and errors: Text mode encoding and error handling to use for + file objects stdin, stdout and stderr. + + Attributes: + stdin, stdout, stderr, pid, returncode + _child_createdpreexec_fnclose_fdsstartupinfocreationflagsrestore_signalsstart_new_sessionpass_fdsCreate new Popen instance._waitpid_lock_communication_startedbufsize must be an integerpreexec_fn is not supported on Windows platforms"preexec_fn is not supported on Windows ""platforms"pass_fds overriding close_fds.startupinfo is only supported on Windows platforms"startupinfo is only supported on Windows "creationflags is only supported on Windows platforms"creationflags is only supported on Windows "Cannot disambiguate when both text and universal_newlines are supplied but different. Pass one or the other.'Cannot disambiguate when both text ''and universal_newlines are supplied but ''different. Pass one or the other.'_get_handlesp2creadp2cwritec2preadc2pwriteerrreaderrwritetext_mode0.25_sigint_wait_secs_closed_child_pipe_fds_execute_childto_close_devnull_translate_newlinessubprocess %s is still running_get_devnull_stdin_writeInteract with process: Send data to stdin and close it. + Read data from stdout and stderr, until end-of-file is + reached. Wait for process to terminate. + + The optional "input" argument should be data to be sent to the + child process, or None, if no data should be sent to the child. + communicate() returns a tuple (stdout, stderr). + + By default, all communication is in bytes, and therefore any + "input" should be bytes, and the (stdout, stderr) will be bytes. + If in text mode (indicated by self.text_mode), any "input" should + be a string, and (stdout, stderr) will be strings decoded + according to locale encoding, or by "encoding" if set. Text mode + is triggered by setting any of text, encoding, errors or + universal_newlines. + Cannot send input after starting communication_communicate_remaining_timesigint_timeoutCheck if child process has terminated. Set and return returncode + attribute.Convenience for _communicate when computing timeouts.orig_timeoutstdout_seqstderr_seqskip_check_and_raiseConvenience for checking if a timeout has expired.Wait for child process to terminate; returns self.returncode._close_pipe_fdsdevnull_fdConstruct and return tuple with IO objects: + p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite + GetStdHandleCreatePipeget_osfhandle_make_inheritableReturn a duplicate of handle, which is inheritable_filter_handle_listFilter out console handles that can't be used + in lpAttributeList["handle_list"] and make sure the list + isn't empty. This also removes duplicate handles.GetFileTypeFILE_TYPE_CHARunused_restore_signalsunused_start_new_sessionExecute program (MS Windows version)pass_fds not supported on Windows.bytes args is not allowed on Windowspath-like args is not allowed when shell is true'path-like args is not allowed when ''shell is true'use_std_handlesattribute_listhave_handle_liststartupinfo.lpAttributeList['handle_list'] overriding close_fds"startupinfo.lpAttributeList['handle_list'] ""overriding close_fds"COMSPECcmd.execomspec{} /c "{}"subprocess.PopenCreateProcesshphtWaitForSingleObjectGetExitCodeProcess_WaitForSingleObject_WAIT_OBJECT_0_GetExitCodeProcessCheck if child process has terminated. Returns returncode + attribute. + + This method is called by __del__, so it can only refer to objects + in its local scope. + + Internal implementation of wait() on Windows.timeout_millis_readerthreadfh_stdout_buffstdout_thread_stderr_buffstderr_threadSend a signal to the process.CTRL_C_EVENTCTRL_BREAK_EVENTUnsupported signal: {}Terminates the process.TerminateProcessSTILL_ACTIVE_posix_spawnExecute program using os.posix_spawn().SIGXFZsetsigdeffile_actionsExecute program (POSIX version)orig_executableerrpipe_readerrpipe_writelow_fds_to_closelow_fdenv_listillegal environment variable nameexecutable_listfds_to_keeperrpipe_data50000_handle_exitstatusexception_namehex_errnoerr_msgBad exception data from child: {!r}child_exception_typeerrno_numnoexecchild_exec_never_callederr_filename_WIFSIGNALED_WTERMSIG_WIFEXITED_WEXITSTATUS_WIFSTOPPED_WSTOPSIGAll callers to this function MUST hold self._waitpid_lock.Unknown child exit status!_waitpid_WNOHANG_ECHILDCheck if child process has terminated. Returns returncode + attribute. + + This method is called by __del__, so it cannot reference anything + outside of the local scope (nor can any methods it calls). + + _try_waitwait_flagsInternal implementation of wait() on POSIX.0.00050.05.05_fileobj2output_save_inputinput_view_check_timeout(..., skip_check_and_raise=True) failed to raise TimeoutExpired.'_check_timeout(..., skip_check_and_raise=True) ''failed to raise TimeoutExpired.'_input_offsetTerminate the process with SIGTERM + Kill the process with SIGKILL + # subprocess - Subprocesses with accessible I/O streams# For more information about this module, see PEP 324.# Copyright (c) 2003-2005 by Peter Astrand # See http://www.python.org/2.4/license for licensing details.# NOTE: We intentionally exclude list2cmdline as it is# considered an internal implementation detail. issue10838.# Exception classes used by this module.# There's no obvious reason to set this, but allow it anyway so# .stdout is a transparent alias for .output# When select or poll has indicated that the file is writable,# we can write up to _PIPE_BUF bytes without risk of blocking.# POSIX defines PIPE_BUF as >= 512.# On Windows we just need to close `Popen._handle` when we no longer need# it, so that the kernel can free it. `Popen._handle` gets closed# implicitly when the `Popen` instance is finalized (see `Handle.__del__`,# which is calling `CloseHandle` as requested in [1]), so there is nothing# for `_cleanup` to do.# [1] https://docs.microsoft.com/en-us/windows/desktop/ProcThread/# creating-processes# This lists holds Popen instances for which the underlying process had not# exited at the time its __del__ method got called: those processes are# wait()ed for synchronously from _cleanup() when a new Popen object is# created, to avoid zombie processes.# This can happen if two threads create a new Popen instance.# It's harmless that it was already removed, so ignore.# XXX This function is only used by multiprocessing and the test suite,# but it's here so that it can be imported when Python is compiled without# threads.# 'inspect': 'i',# 'interactive': 'i',# -O is handled in _optim_args_from_interpreter_flags()# -W options# -X options# Including KeyboardInterrupt, wait handled that.# We don't call p.wait() again as p.__exit__ does that for us.# Explicitly passing input=None was previously equivalent to passing an# empty string. That is maintained here for backwards compatibility.# Windows accumulates the output in a single blocking# read() call run on child threads, with the timeout# being done in a join() on those threads. communicate()# _after_ kill() is required to collect that and add it# to the exception.# POSIX _communicate already populated the output so# far into the TimeoutExpired exception.# Including KeyboardInterrupt, communicate handled that.# We don't call process.wait() as .__exit__ does that for us.# See# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx# or search http://msdn.microsoft.com for# "Parsing C++ Command-Line Arguments"# Add a space to separate this argument from the others# Don't know if we need to double yet.# Double backslashes.# Normal char# Add remaining backslashes, if any.# Various tools for executing commands and looking at their output and status.# os.posix_spawn() is not available# posix_spawn() is a syscall on macOS and properly reports errors# Check libc name and runtime libc version# parse 'glibc 2.28' as ('glibc', (2, 28))# reject unknown format# glibc 2.24 has a new Linux posix_spawn implementation using vfork# which properly reports errors to the parent process.# Note: Don't use the implementation in earlier glibc because it doesn't# use vfork (even if glibc 2.26 added a pipe to properly report errors# to the parent process).# By default, assume that posix_spawn() does not properly report errors.# Set here since __del__ checks it# Held while anything is calling waitpid before returncode has been# updated to prevent clobbering returncode if wait() or poll() are# called from multiple threads at once. After acquiring the lock,# code must re-check self.returncode to see if another thread just# finished a waitpid() call.# Restore default# POSIX# Validate the combinations of text and universal_newlines# Input and output objects. The general principle is like# this:# Parent Child# ------ -----# p2cwrite ---stdin---> p2cread# c2pread <--stdout--- c2pwrite# errread <--stderr--- errwrite# On POSIX, the child objects are file descriptors. On# Windows, these are Windows file handles. The parent objects# are file descriptors on both platforms. The parent objects# are -1 when not using PIPEs. The child objects are -1# when not redirecting.# We wrap OS handles *before* launching the child, otherwise a# quickly terminating child could make our fds unwrappable# (see #8458).# How long to resume waiting on a child after the first ^C.# There is no right value for this. The purpose is to be polite# yet remain good for interactive users trying to exit a tool.# 1/xkcd221.getRandomNumber()# Use the default buffer size for the underlying binary streams# since they don't support line buffering.# Cleanup if the child failed starting.# Ignore EBADF or other errors.# universal_newlines as retained as an alias of text_mode for API# compatibility. bpo-31756# Flushing a BufferedWriter may raise an error# https://bugs.python.org/issue25942# In the case of a KeyboardInterrupt we assume the SIGINT# was also already sent to our child processes. We can't# block indefinitely as that is not user friendly.# If we have not already waited a brief amount of time in# an interrupted .wait() or .communicate() call, do so here# for consistency.# Note that this has been done.# resume the KeyboardInterrupt# Wait for the process to terminate, to avoid zombies.# We didn't get to successfully create a child process.# Not reading subprocess exit status creates a zombie process which# is only destroyed at the parent python process exit# In case the child hasn't been waited on, check if it's done.# Child is still running, keep us alive until we can wait on it.# communicate() must ignore broken pipe errors.# bpo-19612, bpo-30418: On Windows, stdin.write() fails# with EINVAL if the child process exited or if the child# process is still running but closed the pipe.# Optimization: If we are not worried about timeouts, we haven't# started communicating, and we have one or zero pipes, using select()# or threads is unnecessary.# See the detailed comment in .wait().# nothing else should wait.# The first keyboard interrupt waits briefly for the child to# exit under the common assumption that it also received the ^C# generated SIGINT and will exit rapidly.# self._devnull is not always defined.# Prevent a double close of these handles/fds from __init__ on error.# Windows methods# Assuming file-like object# An handle with it's lowest two bits set might be a special console# handle that if passed in lpAttributeList["handle_list"], will# cause it to fail.# Process startup details# bpo-34044: Copy STARTUPINFO since it is modified above,# so the caller can reuse it multiple times.# If we were given an handle_list or need to create one# When using the handle_list we always request to inherit# handles but the only handles that will be inherited are# the ones in the handle_list# Start the process# no special security# Child is launched. Close the parent's copy of those pipe# handles that only the child should have open. You need# to make sure that no handles to the write end of the# output pipe are maintained in this process or else the# pipe will not close when the child process exits and the# ReadFile will hang.# Retain the process handle, but close the thread handle# API note: Returns immediately if timeout_millis == 0.# Start reader threads feeding into a list hanging off of this# object, unless they've already been started.# Wait for the reader threads, or time out. If we time out, the# threads remain reading and the fds left open in case the user# calls communicate again.# Collect the output from and close both pipes, now that we know# both have been read successfully.# All data exchanged. Translate lists into strings.# Don't signal a process that we know has already died.# Don't terminate a process that we know has already died.# ERROR_ACCESS_DENIED (winerror 5) is received when the# process already died.# POSIX methods# child's stdout is not set, use parent's stdout# See _Py_RestoreSignals() in Python/pylifecycle.c# On Android the default shell is at '/system/bin/sh'.# For transferring possible exec failure from child to parent.# Data format: "exception name:hex errno:description"# Pickle is not used; it is complex and involves memory allocation.# errpipe_write must not be in the standard io 0, 1, or 2 fd range.# We must avoid complex work that could involve# malloc or free in the child process to avoid# potential deadlocks, thus we do all this here.# and pass it to fork_exec()# Use execv instead of execve.# This matches the behavior of os._execvpe().# be sure the FD is closed no matter what# Wait for exec to fail or succeed; possibly raising an# exception (limited in size)# The encoding here should match the encoding# written in by the subprocess implementations# like _posixsubprocess# The error must be from chdir(cwd).# This method is called (indirectly) by __del__, so it cannot# refer to anything outside of its local scope.# Should never happen# Something else is busy calling waitpid. Don't allow two# at once. We know nothing yet.# Another thread waited.# This happens if SIGCLD is set to be ignored or# waiting for child processes has otherwise been# disabled for our process. This child is dead, we# can't get the status.# http://bugs.python.org/issue15756# This happens if SIGCLD is set to be ignored or waiting# for child processes has otherwise been disabled for our# process. This child is dead, we can't get the status.# Enter a busy loop if we have a timeout. This busy loop was# cribbed from Lib/threading.py in Thread.wait() at r71065.# 500 us -> initial delay of 1 ms# Check the pid and loop as waitpid has been known to# return 0 even without WNOHANG in odd situations.# http://bugs.python.org/issue14396.# Flush stdio buffer. This might block, if the user has# been writing to .stdin in an uncontrolled fashion.# communicate() must ignore BrokenPipeError.# Only create this mapping if we haven't already.# Impossible :)# XXX Rewrite these to use non-blocking I/O on the file# objects; they are no longer using C stdio!# Translate newlines, if requested.# This also turns bytes into strings.# This method is called from the _communicate_with_*() methods# so that if we time out while communicating, we can continue# sending input if we retry.# Skip signalling a process that we know has already died.b'Subprocesses with accessible I/O streams + +This module allows you to spawn processes, connect to their +input/output/error pipes, and obtain their return codes. + +For a complete description of this module see the Python documentation. + +Main API +======== +run(...): Runs a command, waits for it to complete, then returns a + CompletedProcess instance. +Popen(...): A class for flexibly executing a command in a new process + +Constants +--------- +DEVNULL: Special value that indicates that os.devnull should be used +PIPE: Special value that indicates a pipe should be created +STDOUT: Special value that indicates that stderr should go to stdout + + +Older API +========= +call(...): Runs a command, waits for it to complete, then returns + the return code. +check_call(...): Same as call() but raises CalledProcessError() + if return code is not 0 +check_output(...): Same as check_call() but returns the contents of + stdout instead of a return code +getoutput(...): Runs a command in the shell, waits for it to complete, + then returns the output +getstatusoutput(...): Runs a command in the shell, waits for it to complete, + then returns a (exitcode, output) tuple +'u'Subprocesses with accessible I/O streams + +This module allows you to spawn processes, connect to their +input/output/error pipes, and obtain their return codes. + +For a complete description of this module see the Python documentation. + +Main API +======== +run(...): Runs a command, waits for it to complete, then returns a + CompletedProcess instance. +Popen(...): A class for flexibly executing a command in a new process + +Constants +--------- +DEVNULL: Special value that indicates that os.devnull should be used +PIPE: Special value that indicates a pipe should be created +STDOUT: Special value that indicates that stderr should go to stdout + + +Older API +========= +call(...): Runs a command, waits for it to complete, then returns + the return code. +check_call(...): Same as call() but raises CalledProcessError() + if return code is not 0 +check_output(...): Same as check_call() but returns the contents of + stdout instead of a return code +getoutput(...): Runs a command in the shell, waits for it to complete, + then returns the output +getstatusoutput(...): Runs a command in the shell, waits for it to complete, + then returns a (exitcode, output) tuple +'b'PIPE'u'PIPE'b'STDOUT'u'STDOUT'b'check_call'u'check_call'b'getstatusoutput'u'getstatusoutput'b'getoutput'u'getoutput'b'check_output'u'check_output'b'CalledProcessError'u'CalledProcessError'b'DEVNULL'u'DEVNULL'b'SubprocessError'u'SubprocessError'b'TimeoutExpired'u'TimeoutExpired'b'CompletedProcess'u'CompletedProcess'b'CREATE_NEW_CONSOLE'u'CREATE_NEW_CONSOLE'b'CREATE_NEW_PROCESS_GROUP'u'CREATE_NEW_PROCESS_GROUP'b'STD_INPUT_HANDLE'u'STD_INPUT_HANDLE'b'STD_OUTPUT_HANDLE'u'STD_OUTPUT_HANDLE'b'STD_ERROR_HANDLE'u'STD_ERROR_HANDLE'b'SW_HIDE'u'SW_HIDE'b'STARTF_USESTDHANDLES'u'STARTF_USESTDHANDLES'b'STARTF_USESHOWWINDOW'u'STARTF_USESHOWWINDOW'b'STARTUPINFO'u'STARTUPINFO'b'ABOVE_NORMAL_PRIORITY_CLASS'u'ABOVE_NORMAL_PRIORITY_CLASS'b'BELOW_NORMAL_PRIORITY_CLASS'u'BELOW_NORMAL_PRIORITY_CLASS'b'HIGH_PRIORITY_CLASS'u'HIGH_PRIORITY_CLASS'b'IDLE_PRIORITY_CLASS'u'IDLE_PRIORITY_CLASS'b'NORMAL_PRIORITY_CLASS'u'NORMAL_PRIORITY_CLASS'b'REALTIME_PRIORITY_CLASS'u'REALTIME_PRIORITY_CLASS'b'CREATE_NO_WINDOW'u'CREATE_NO_WINDOW'b'DETACHED_PROCESS'u'DETACHED_PROCESS'b'CREATE_DEFAULT_ERROR_MODE'u'CREATE_DEFAULT_ERROR_MODE'b'CREATE_BREAKAWAY_FROM_JOB'u'CREATE_BREAKAWAY_FROM_JOB'b'Raised when run() is called with check=True and the process + returns a non-zero exit status. + + Attributes: + cmd, returncode, stdout, stderr, output + 'u'Raised when run() is called with check=True and the process + returns a non-zero exit status. + + Attributes: + cmd, returncode, stdout, stderr, output + 'b'Command '%s' died with %r.'u'Command '%s' died with %r.'b'Command '%s' died with unknown signal %d.'u'Command '%s' died with unknown signal %d.'b'Command '%s' returned non-zero exit status %d.'u'Command '%s' returned non-zero exit status %d.'b'Alias for output attribute, to match stderr'u'Alias for output attribute, to match stderr'b'This exception is raised when the timeout expires while waiting for a + child process. + + Attributes: + cmd, output, stdout, stderr, timeout + 'u'This exception is raised when the timeout expires while waiting for a + child process. + + Attributes: + cmd, output, stdout, stderr, timeout + 'b'Command '%s' timed out after %s seconds'u'Command '%s' timed out after %s seconds'b'handle_list'u'handle_list'b'already closed'u'already closed'b'%s(%d)'u'%s(%d)'b'PIPE_BUF'u'PIPE_BUF'b'Return a list of command-line arguments reproducing the current + settings in sys.flags, sys.warnoptions and sys._xoptions.'u'Return a list of command-line arguments reproducing the current + settings in sys.flags, sys.warnoptions and sys._xoptions.'b'dont_write_bytecode'u'dont_write_bytecode'b'no_site'u'no_site'b'bytes_warning'u'bytes_warning'b'-I'u'-I'b'-E'u'-E'b'_xoptions'u'_xoptions'b'error::BytesWarning'u'error::BytesWarning'b'default::BytesWarning'u'default::BytesWarning'b'-W'u'-W'b'faulthandler'b'tracemalloc'u'tracemalloc'b'importtime'u'importtime'b'showalloccount'u'showalloccount'b'showrefcount'u'showrefcount'b'Run command with arguments. Wait for command to complete or + timeout, then return the returncode attribute. + + The arguments are the same as for the Popen constructor. Example: + + retcode = call(["ls", "-l"]) + 'u'Run command with arguments. Wait for command to complete or + timeout, then return the returncode attribute. + + The arguments are the same as for the Popen constructor. Example: + + retcode = call(["ls", "-l"]) + 'b'Run command with arguments. Wait for command to complete. If + the exit code was zero then return, otherwise raise + CalledProcessError. The CalledProcessError object will have the + return code in the returncode attribute. + + The arguments are the same as for the call function. Example: + + check_call(["ls", "-l"]) + 'u'Run command with arguments. Wait for command to complete. If + the exit code was zero then return, otherwise raise + CalledProcessError. The CalledProcessError object will have the + return code in the returncode attribute. + + The arguments are the same as for the call function. Example: + + check_call(["ls", "-l"]) + 'b'Run command with arguments and return its output. + + If the exit code was non-zero it raises a CalledProcessError. The + CalledProcessError object will have the return code in the returncode + attribute and output in the output attribute. + + The arguments are the same as for the Popen constructor. Example: + + >>> check_output(["ls", "-l", "/dev/null"]) + b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' + + The stdout argument is not allowed as it is used internally. + To capture standard error in the result, use stderr=STDOUT. + + >>> check_output(["/bin/sh", "-c", + ... "ls -l non_existent_file ; exit 0"], + ... stderr=STDOUT) + b'ls: non_existent_file: No such file or directory\n' + + There is an additional optional argument, "input", allowing you to + pass a string to the subprocess's stdin. If you use this argument + you may not also use the Popen constructor's "stdin" argument, as + it too will be used internally. Example: + + >>> check_output(["sed", "-e", "s/foo/bar/"], + ... input=b"when in the course of fooman events\n") + b'when in the course of barman events\n' + + By default, all communication is in bytes, and therefore any "input" + should be bytes, and the return value will be bytes. If in text mode, + any "input" should be a string, and the return value will be a string + decoded according to locale encoding, or by "encoding" if set. Text mode + is triggered by setting any of text, encoding, errors or universal_newlines. + 'u'Run command with arguments and return its output. + + If the exit code was non-zero it raises a CalledProcessError. The + CalledProcessError object will have the return code in the returncode + attribute and output in the output attribute. + + The arguments are the same as for the Popen constructor. Example: + + >>> check_output(["ls", "-l", "/dev/null"]) + b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n' + + The stdout argument is not allowed as it is used internally. + To capture standard error in the result, use stderr=STDOUT. + + >>> check_output(["/bin/sh", "-c", + ... "ls -l non_existent_file ; exit 0"], + ... stderr=STDOUT) + b'ls: non_existent_file: No such file or directory\n' + + There is an additional optional argument, "input", allowing you to + pass a string to the subprocess's stdin. If you use this argument + you may not also use the Popen constructor's "stdin" argument, as + it too will be used internally. Example: + + >>> check_output(["sed", "-e", "s/foo/bar/"], + ... input=b"when in the course of fooman events\n") + b'when in the course of barman events\n' + + By default, all communication is in bytes, and therefore any "input" + should be bytes, and the return value will be bytes. If in text mode, + any "input" should be a string, and the return value will be a string + decoded according to locale encoding, or by "encoding" if set. Text mode + is triggered by setting any of text, encoding, errors or universal_newlines. + 'b'stdout argument not allowed, it will be overridden.'u'stdout argument not allowed, it will be overridden.'b'universal_newlines'u'universal_newlines'b'A process that has finished running. + + This is returned by run(). + + Attributes: + args: The list or str args passed to run(). + returncode: The exit code of the process, negative for signals. + stdout: The standard output (None if not captured). + stderr: The standard error (None if not captured). + 'u'A process that has finished running. + + This is returned by run(). + + Attributes: + args: The list or str args passed to run(). + returncode: The exit code of the process, negative for signals. + stdout: The standard output (None if not captured). + stderr: The standard error (None if not captured). + 'b'args={!r}'u'args={!r}'b'returncode={!r}'u'returncode={!r}'b'stdout={!r}'u'stdout={!r}'b'stderr={!r}'u'stderr={!r}'b'Raise CalledProcessError if the exit code is non-zero.'u'Raise CalledProcessError if the exit code is non-zero.'b'Run command with arguments and return a CompletedProcess instance. + + The returned instance will have attributes args, returncode, stdout and + stderr. By default, stdout and stderr are not captured, and those attributes + will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them. + + If check is True and the exit code was non-zero, it raises a + CalledProcessError. The CalledProcessError object will have the return code + in the returncode attribute, and output & stderr attributes if those streams + were captured. + + If timeout is given, and the process takes too long, a TimeoutExpired + exception will be raised. + + There is an optional argument "input", allowing you to + pass bytes or a string to the subprocess's stdin. If you use this argument + you may not also use the Popen constructor's "stdin" argument, as + it will be used internally. + + By default, all communication is in bytes, and therefore any "input" should + be bytes, and the stdout and stderr will be bytes. If in text mode, any + "input" should be a string, and stdout and stderr will be strings decoded + according to locale encoding, or by "encoding" if set. Text mode is + triggered by setting any of text, encoding, errors or universal_newlines. + + The other arguments are the same as for the Popen constructor. + 'u'Run command with arguments and return a CompletedProcess instance. + + The returned instance will have attributes args, returncode, stdout and + stderr. By default, stdout and stderr are not captured, and those attributes + will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them. + + If check is True and the exit code was non-zero, it raises a + CalledProcessError. The CalledProcessError object will have the return code + in the returncode attribute, and output & stderr attributes if those streams + were captured. + + If timeout is given, and the process takes too long, a TimeoutExpired + exception will be raised. + + There is an optional argument "input", allowing you to + pass bytes or a string to the subprocess's stdin. If you use this argument + you may not also use the Popen constructor's "stdin" argument, as + it will be used internally. + + By default, all communication is in bytes, and therefore any "input" should + be bytes, and the stdout and stderr will be bytes. If in text mode, any + "input" should be a string, and stdout and stderr will be strings decoded + according to locale encoding, or by "encoding" if set. Text mode is + triggered by setting any of text, encoding, errors or universal_newlines. + + The other arguments are the same as for the Popen constructor. + 'b'stdin and input arguments may not both be used.'u'stdin and input arguments may not both be used.'b'stdout and stderr arguments may not be used with capture_output.'u'stdout and stderr arguments may not be used with capture_output.'b' + Translate a sequence of arguments into a command line + string, using the same rules as the MS C runtime: + + 1) Arguments are delimited by white space, which is either a + space or a tab. + + 2) A string surrounded by double quotation marks is + interpreted as a single argument, regardless of white space + contained within. A quoted string can be embedded in an + argument. + + 3) A double quotation mark preceded by a backslash is + interpreted as a literal double quotation mark. + + 4) Backslashes are interpreted literally, unless they + immediately precede a double quotation mark. + + 5) If backslashes immediately precede a double quotation mark, + every pair of backslashes is interpreted as a literal + backslash. If the number of backslashes is odd, the last + backslash escapes the next double quotation mark as + described in rule 3. + 'u' + Translate a sequence of arguments into a command line + string, using the same rules as the MS C runtime: + + 1) Arguments are delimited by white space, which is either a + space or a tab. + + 2) A string surrounded by double quotation marks is + interpreted as a single argument, regardless of white space + contained within. A quoted string can be embedded in an + argument. + + 3) A double quotation mark preceded by a backslash is + interpreted as a literal double quotation mark. + + 4) Backslashes are interpreted literally, unless they + immediately precede a double quotation mark. + + 5) If backslashes immediately precede a double quotation mark, + every pair of backslashes is interpreted as a literal + backslash. If the number of backslashes is odd, the last + backslash escapes the next double quotation mark as + described in rule 3. + 'b'Return (exitcode, output) of executing cmd in a shell. + + Execute the string 'cmd' in a shell with 'check_output' and + return a 2-tuple (status, output). The locale encoding is used + to decode the output and process newlines. + + A trailing newline is stripped from the output. + The exit status for the command can be interpreted + according to the rules for the function 'wait'. Example: + + >>> import subprocess + >>> subprocess.getstatusoutput('ls /bin/ls') + (0, '/bin/ls') + >>> subprocess.getstatusoutput('cat /bin/junk') + (1, 'cat: /bin/junk: No such file or directory') + >>> subprocess.getstatusoutput('/bin/junk') + (127, 'sh: /bin/junk: not found') + >>> subprocess.getstatusoutput('/bin/kill $$') + (-15, '') + 'u'Return (exitcode, output) of executing cmd in a shell. + + Execute the string 'cmd' in a shell with 'check_output' and + return a 2-tuple (status, output). The locale encoding is used + to decode the output and process newlines. + + A trailing newline is stripped from the output. + The exit status for the command can be interpreted + according to the rules for the function 'wait'. Example: + + >>> import subprocess + >>> subprocess.getstatusoutput('ls /bin/ls') + (0, '/bin/ls') + >>> subprocess.getstatusoutput('cat /bin/junk') + (1, 'cat: /bin/junk: No such file or directory') + >>> subprocess.getstatusoutput('/bin/junk') + (127, 'sh: /bin/junk: not found') + >>> subprocess.getstatusoutput('/bin/kill $$') + (-15, '') + 'b'Return output (stdout or stderr) of executing cmd in a shell. + + Like getstatusoutput(), except the exit status is ignored and the return + value is a string containing the command's output. Example: + + >>> import subprocess + >>> subprocess.getoutput('ls /bin/ls') + '/bin/ls' + 'u'Return output (stdout or stderr) of executing cmd in a shell. + + Like getstatusoutput(), except the exit status is ignored and the return + value is a string containing the command's output. Example: + + >>> import subprocess + >>> subprocess.getoutput('ls /bin/ls') + '/bin/ls' + 'b'Check if posix_spawn() can be used for subprocess. + + subprocess requires a posix_spawn() implementation that properly reports + errors to the parent process, & sets errno on the following failures: + + * Process attribute actions failed. + * File actions failed. + * exec() failed. + + Prefer an implementation which can use vfork() in some cases for best + performance. + 'u'Check if posix_spawn() can be used for subprocess. + + subprocess requires a posix_spawn() implementation that properly reports + errors to the parent process, & sets errno on the following failures: + + * Process attribute actions failed. + * File actions failed. + * exec() failed. + + Prefer an implementation which can use vfork() in some cases for best + performance. + 'b'posix_spawn'u'posix_spawn'b' Execute a child program in a new process. + + For a complete description of the arguments see the Python documentation. + + Arguments: + args: A string, or a sequence of program arguments. + + bufsize: supplied as the buffering argument to the open() function when + creating the stdin/stdout/stderr pipe file objects + + executable: A replacement program to execute. + + stdin, stdout and stderr: These specify the executed programs' standard + input, standard output and standard error file handles, respectively. + + preexec_fn: (POSIX only) An object to be called in the child process + just before the child is executed. + + close_fds: Controls closing or inheriting of file descriptors. + + shell: If true, the command will be executed through the shell. + + cwd: Sets the current directory before the child is executed. + + env: Defines the environment variables for the new process. + + text: If true, decode stdin, stdout and stderr using the given encoding + (if set) or the system default otherwise. + + universal_newlines: Alias of text, provided for backwards compatibility. + + startupinfo and creationflags (Windows only) + + restore_signals (POSIX only) + + start_new_session (POSIX only) + + pass_fds (POSIX only) + + encoding and errors: Text mode encoding and error handling to use for + file objects stdin, stdout and stderr. + + Attributes: + stdin, stdout, stderr, pid, returncode + 'u' Execute a child program in a new process. + + For a complete description of the arguments see the Python documentation. + + Arguments: + args: A string, or a sequence of program arguments. + + bufsize: supplied as the buffering argument to the open() function when + creating the stdin/stdout/stderr pipe file objects + + executable: A replacement program to execute. + + stdin, stdout and stderr: These specify the executed programs' standard + input, standard output and standard error file handles, respectively. + + preexec_fn: (POSIX only) An object to be called in the child process + just before the child is executed. + + close_fds: Controls closing or inheriting of file descriptors. + + shell: If true, the command will be executed through the shell. + + cwd: Sets the current directory before the child is executed. + + env: Defines the environment variables for the new process. + + text: If true, decode stdin, stdout and stderr using the given encoding + (if set) or the system default otherwise. + + universal_newlines: Alias of text, provided for backwards compatibility. + + startupinfo and creationflags (Windows only) + + restore_signals (POSIX only) + + start_new_session (POSIX only) + + pass_fds (POSIX only) + + encoding and errors: Text mode encoding and error handling to use for + file objects stdin, stdout and stderr. + + Attributes: + stdin, stdout, stderr, pid, returncode + 'b'Create new Popen instance.'u'Create new Popen instance.'b'bufsize must be an integer'u'bufsize must be an integer'b'preexec_fn is not supported on Windows platforms'u'preexec_fn is not supported on Windows platforms'b'pass_fds overriding close_fds.'u'pass_fds overriding close_fds.'b'startupinfo is only supported on Windows platforms'u'startupinfo is only supported on Windows platforms'b'creationflags is only supported on Windows platforms'u'creationflags is only supported on Windows platforms'b'Cannot disambiguate when both text and universal_newlines are supplied but different. Pass one or the other.'u'Cannot disambiguate when both text and universal_newlines are supplied but different. Pass one or the other.'b'_devnull'u'_devnull'b'subprocess %s is still running'u'subprocess %s is still running'b'Interact with process: Send data to stdin and close it. + Read data from stdout and stderr, until end-of-file is + reached. Wait for process to terminate. + + The optional "input" argument should be data to be sent to the + child process, or None, if no data should be sent to the child. + communicate() returns a tuple (stdout, stderr). + + By default, all communication is in bytes, and therefore any + "input" should be bytes, and the (stdout, stderr) will be bytes. + If in text mode (indicated by self.text_mode), any "input" should + be a string, and (stdout, stderr) will be strings decoded + according to locale encoding, or by "encoding" if set. Text mode + is triggered by setting any of text, encoding, errors or + universal_newlines. + 'u'Interact with process: Send data to stdin and close it. + Read data from stdout and stderr, until end-of-file is + reached. Wait for process to terminate. + + The optional "input" argument should be data to be sent to the + child process, or None, if no data should be sent to the child. + communicate() returns a tuple (stdout, stderr). + + By default, all communication is in bytes, and therefore any + "input" should be bytes, and the (stdout, stderr) will be bytes. + If in text mode (indicated by self.text_mode), any "input" should + be a string, and (stdout, stderr) will be strings decoded + according to locale encoding, or by "encoding" if set. Text mode + is triggered by setting any of text, encoding, errors or + universal_newlines. + 'b'Cannot send input after starting communication'u'Cannot send input after starting communication'b'Check if child process has terminated. Set and return returncode + attribute.'u'Check if child process has terminated. Set and return returncode + attribute.'b'Convenience for _communicate when computing timeouts.'u'Convenience for _communicate when computing timeouts.'b'Convenience for checking if a timeout has expired.'u'Convenience for checking if a timeout has expired.'b'Wait for child process to terminate; returns self.returncode.'u'Wait for child process to terminate; returns self.returncode.'b'Construct and return tuple with IO objects: + p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite + 'u'Construct and return tuple with IO objects: + p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite + 'b'Return a duplicate of handle, which is inheritable'u'Return a duplicate of handle, which is inheritable'b'Filter out console handles that can't be used + in lpAttributeList["handle_list"] and make sure the list + isn't empty. This also removes duplicate handles.'u'Filter out console handles that can't be used + in lpAttributeList["handle_list"] and make sure the list + isn't empty. This also removes duplicate handles.'b'Execute program (MS Windows version)'u'Execute program (MS Windows version)'b'pass_fds not supported on Windows.'u'pass_fds not supported on Windows.'b'bytes args is not allowed on Windows'u'bytes args is not allowed on Windows'b'path-like args is not allowed when shell is true'u'path-like args is not allowed when shell is true'b'startupinfo.lpAttributeList['handle_list'] overriding close_fds'u'startupinfo.lpAttributeList['handle_list'] overriding close_fds'b'COMSPEC'u'COMSPEC'b'cmd.exe'u'cmd.exe'b'{} /c "{}"'u'{} /c "{}"'b'subprocess.Popen'u'subprocess.Popen'b'Check if child process has terminated. Returns returncode + attribute. + + This method is called by __del__, so it can only refer to objects + in its local scope. + + 'u'Check if child process has terminated. Returns returncode + attribute. + + This method is called by __del__, so it can only refer to objects + in its local scope. + + 'b'Internal implementation of wait() on Windows.'u'Internal implementation of wait() on Windows.'b'_stdout_buff'u'_stdout_buff'b'_stderr_buff'u'_stderr_buff'b'Send a signal to the process.'u'Send a signal to the process.'b'Unsupported signal: {}'u'Unsupported signal: {}'b'Terminates the process.'u'Terminates the process.'b'Execute program using os.posix_spawn().'u'Execute program using os.posix_spawn().'b'SIGPIPE'u'SIGPIPE'b'SIGXFZ'u'SIGXFZ'b'SIGXFSZ'u'SIGXFSZ'b'setsigdef'u'setsigdef'b'file_actions'u'file_actions'b'Execute program (POSIX version)'u'Execute program (POSIX version)'b'illegal environment variable name'u'illegal environment variable name'b'Bad exception data from child: {!r}'u'Bad exception data from child: {!r}'b'noexec'u'noexec'b'All callers to this function MUST hold self._waitpid_lock.'u'All callers to this function MUST hold self._waitpid_lock.'b'Unknown child exit status!'u'Unknown child exit status!'b'Check if child process has terminated. Returns returncode + attribute. + + This method is called by __del__, so it cannot reference anything + outside of the local scope (nor can any methods it calls). + + 'u'Check if child process has terminated. Returns returncode + attribute. + + This method is called by __del__, so it cannot reference anything + outside of the local scope (nor can any methods it calls). + + 'b'Internal implementation of wait() on POSIX.'u'Internal implementation of wait() on POSIX.'b'_check_timeout(..., skip_check_and_raise=True) failed to raise TimeoutExpired.'u'_check_timeout(..., skip_check_and_raise=True) failed to raise TimeoutExpired.'b'Terminate the process with SIGTERM + 'u'Terminate the process with SIGTERM + 'b'Kill the process with SIGKILL + 'u'Kill the process with SIGKILL + 'create_subprocess_execcreate_subprocess_shellSubprocessStreamProtocolLike StreamReaderProtocol, but for a subprocess._pipe_fds_stdin_closedstdout_transportstderr_transportstdin_transport_maybe_close_transportWait until the process exit and return the process return code._feed_stdin%r communicate: feed stdin (%s bytes)%r communicate: stdin got %r%r communicate: close stdin_read_stream%r communicate: read %s%r communicate: close %sThe loop argument is deprecated since Python 3.8 and scheduled for removal in Python 3.10."The loop argument is deprecated since Python 3.8 "# communicate() ignores BrokenPipeError and ConnectionResetErrorb'create_subprocess_exec'u'create_subprocess_exec'b'create_subprocess_shell'u'create_subprocess_shell'b'Like StreamReaderProtocol, but for a subprocess.'u'Like StreamReaderProtocol, but for a subprocess.'b'Wait until the process exit and return the process return code.'u'Wait until the process exit and return the process return code.'b'%r communicate: feed stdin (%s bytes)'u'%r communicate: feed stdin (%s bytes)'b'%r communicate: stdin got %r'u'%r communicate: stdin got %r'b'%r communicate: close stdin'u'%r communicate: close stdin'b'%r communicate: read %s'u'%r communicate: read %s'b'%r communicate: close %s'u'%r communicate: close %s'b'The loop argument is deprecated since Python 3.8 and scheduled for removal in Python 3.10.'u'The loop argument is deprecated since Python 3.8 and scheduled for removal in Python 3.10.'u'asyncio.subprocess'_call_if_existsA simple test suite that doesn't provide class or module shared fixtures. + _removed_tests<%s tests=%s>cases{} is not callableTestCases and TestSuites must be instantiated before passing them to addTest()"TestCases and TestSuites must be instantiated ""before passing them to addTest()"tests must be an iterable of tests, not a stringStop holding a reference to the TestCase at index.Run the tests without collecting errors in a TestResultA test suite is a composite test consisting of a number of TestCases. + + For use, create an instance of TestSuite, then add test case instances. + When all tests have been added, the suite can be passed to a test + runner, such as TextTestRunner. It will run the individual test cases + in the order in which they were added, aggregating the results. When + subclassing, do not forget to call the base class constructor. + topLevel_isnotsuite_tearDownPreviousClass_handleModuleFixture_handleClassSetUp_handleModuleTearDown_DebugResultpreviousClasscurrentClass_createClassOrModuleLevelException_get_previous_modulepreviousModulecurrentModulesetUpModuleerrorName_addClassOrModuleLevelException_ErrorHoldertearDownModule + Placeholder for a TestCase inside a result. As far as a TestResult + is concerned, this looks exactly like a unit test. Used to insert + arbitrary errors into a test suite run. + A crude way to tell apart testcases and suites with duck-typingUsed by the TestSuite to hold previous class when running in debug.# sanity checks# support for suite implementations that have overridden self._tests# Some unittest tests add non TestCase/TestSuite objects to# the suite.################################# test may actually be a function# so its class will be a builtin-type# Inspired by the ErrorHolder from Twisted:# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py# attribute used by TestResult._exc_info_to_string# could call result.addError(...) - but this test-like object# shouldn't be run anywayb'A simple test suite that doesn't provide class or module shared fixtures. + 'u'A simple test suite that doesn't provide class or module shared fixtures. + 'b'<%s tests=%s>'u'<%s tests=%s>'b'{} is not callable'u'{} is not callable'b'TestCases and TestSuites must be instantiated before passing them to addTest()'u'TestCases and TestSuites must be instantiated before passing them to addTest()'b'tests must be an iterable of tests, not a string'u'tests must be an iterable of tests, not a string'b'Stop holding a reference to the TestCase at index.'u'Stop holding a reference to the TestCase at index.'b'countTestCases'u'countTestCases'b'Run the tests without collecting errors in a TestResult'u'Run the tests without collecting errors in a TestResult'b'A test suite is a composite test consisting of a number of TestCases. + + For use, create an instance of TestSuite, then add test case instances. + When all tests have been added, the suite can be passed to a test + runner, such as TextTestRunner. It will run the individual test cases + in the order in which they were added, aggregating the results. When + subclassing, do not forget to call the base class constructor. + 'u'A test suite is a composite test consisting of a number of TestCases. + + For use, create an instance of TestSuite, then add test case instances. + When all tests have been added, the suite can be passed to a test + runner, such as TextTestRunner. It will run the individual test cases + in the order in which they were added, aggregating the results. When + subclassing, do not forget to call the base class constructor. + 'b'_testRunEntered'u'_testRunEntered'b'_classSetupFailed'u'_classSetupFailed'b'_moduleSetUpFailed'u'_moduleSetUpFailed'b'_previousTestClass'u'_previousTestClass'b'setUpClass'u'setUpClass'b'_setupStdout'u'_setupStdout'b'_restoreStdout'u'_restoreStdout'b'setUpModule'u'setUpModule'b'tearDownModule'u'tearDownModule'b'tearDownClass'u'tearDownClass'b' + Placeholder for a TestCase inside a result. As far as a TestResult + is concerned, this looks exactly like a unit test. Used to insert + arbitrary errors into a test suite run. + 'u' + Placeholder for a TestCase inside a result. As far as a TestResult + is concerned, this looks exactly like a unit test. Used to insert + arbitrary errors into a test suite run. + 'b''u''b'A crude way to tell apart testcases and suites with duck-typing'u'A crude way to tell apart testcases and suites with duck-typing'b'Used by the TestSuite to hold previous class when running in debug.'u'Used by the TestSuite to hold previous class when running in debug.'u'unittest.suite'u'suite'Access to Python's configuration information.get_config_h_filenameget_config_varsget_makefile_filenameget_path_namesget_pathsget_platformget_python_versionget_scheme_namesparse_config_h_ALWAYS_STR{installed_base}/lib/python{py_version_short}{platbase}/lib/python{py_version_short}platstdlib{base}/lib/python{py_version_short}/site-packagespurelib{platbase}/lib/python{py_version_short}/site-packagesplatlib{installed_base}/include/python{py_version_short}{abiflags}{installed_platbase}/include/python{py_version_short}{abiflags}platinclude{base}/binscripts{base}posix_prefix{installed_base}/lib/python{base}/lib/python{installed_base}/include/pythonposix_home{installed_base}/Lib{base}/Lib{base}/Lib/site-packages{installed_base}/Include{base}/Scripts{userbase}/Python{py_version_nodot}{userbase}/Python{py_version_nodot}/site-packages{userbase}/Python{py_version_nodot}/Include{userbase}/Python{py_version_nodot}/Scripts{userbase}nt_user{userbase}/lib/python{py_version_short}{userbase}/lib/python{py_version_short}/site-packages{userbase}/include/python{py_version_short}{userbase}/binposix_user{userbase}/lib/python{userbase}/lib/python/site-packages{userbase}/includeosx_framework_user_INSTALL_SCHEMES_SCHEME_KEYS_PY_VERSION_PY_VERSION_SHORT%d%d_PY_VERSION_SHORT_NO_DOT_PREFIX_BASE_PREFIX_EXEC_PREFIX_BASE_EXEC_PREFIX_CONFIG_VARS_USER_BASE_safe_realpath_PROJECT_BASE\pcbuild\win32\pcbuild\amd64_PYTHON_PROJECT_BASE_is_python_source_dirSetupSetup.local_home_sys_home_fix_pcbuildPCbuildcheck_home_PYTHON_BUILD{srcdir}/Include{projectbase}/._subst_varslocal_vars_extend_dicttarget_dictother_dicttarget_keys_expand_vars_get_default_scheme_getuserbasePYTHONUSERBASEenv_basejoinuserAPPDATAPythonLibrary.local_parse_makefileParse a Makefile-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + ([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)_variable_rx\$\(([A-Za-z][A-Za-z0-9_]*)\)_findvar1_rx\${([A-Za-z][A-Za-z0-9_]*)}_findvar2_rxnotdone$$tmpvvariablesrenamed_variablesm1PY_Return the path of the Makefile.Makefileconfig-%s%sconfig_dir_name_multiarch_get_sysconfigdata_name_PYTHON_SYSCONFIGDATA_NAME_sysconfigdata_{abi}_{platform}_{multiarch}abimultiarch_generate_posix_varsGenerate the Python module containing build-time variables.invalid Python installation: unable to open %sconfig_hbuild_time_varsbuild/lib.%s-%spybuilddirgettotalrefcount-pydebugdestfile# system configuration generated and used by the sysconfig module +'# system configuration generated and used by'' the sysconfig module\n'build_time_vars = pybuilddir.txt_init_posixInitialize the module as appropriate for POSIX systems._temp_init_non_posixInitialize the module as appropriate for NTLIBDESTBINLIBDESTINCLUDEPYEXT_SUFFIXEXEBINDIRParse a config.h-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + #define ([A-Z][A-Za-z0-9_]+) (.*) +define_rx/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/ +undef_rxReturn the path of pyconfig.h.PCinc_dirpyconfig.hReturn a tuple containing the schemes names.Return a tuple containing the paths names.expandReturn a mapping containing an install scheme. + + ``scheme`` is the install scheme name. If not provided, it will + return the default scheme for the current platform. + Return a path corresponding to the scheme. + + ``scheme`` is the install scheme name. + With no arguments, return a dictionary of all configuration + variables relevant for the current platform. + + On Unix, this means every variable defined in Python's installed Makefile; + On Windows it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + py_versionpy_version_shortpy_version_nodotinstalled_baseinstalled_platbaseplatbaseprojectbaseSOuserbasesrcdir_osx_supportvalsReturn the value of a single variable using the dictionary returned by + 'get_config_vars()'. + + Equivalent to get_config_vars().get(name) + SO is deprecated, use EXT_SUFFIXReturn a string that identifies the current platform. + + This is used mainly to distinguish platform-specific build directories and + platform-specific built distributions. Typically includes the OS name and + version and the architecture (as supplied by 'os.uname()'), although the + exact information included depends on the OS; on Linux, the kernel version + isn't particularly important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + + amd64win-amd64(arm)win-arm32(arm64)win-arm64_PYTHON_HOST_PLATFORM%s-%ssunossolaris%d.%sbitness.%s%s-%s.%s[\d.]+rel_re%s-%s-%s_print_dict %s = "%s"Display all information sysconfig detains.--generate-posix-varsPlatform: "%s"Python version: "%s"Current installation scheme: "%s"PathsVariables# Keys for get_config_var() that are never converted to Python integers.# NOTE: When modifying "purelib" scheme, update site._get_path() too.# FIXME don't rely on sys.version here, its format is an implementation detail# of CPython, use sys.version_info or sys.hexversion# sys.executable can be empty if argv[0] has been changed and Python is# unable to retrieve the real program name# set for cross builds# the default scheme for posix is posix_prefix# NOTE: site.py has copy of this function.# Sync it when modify this function.# Regexes needed for parsing Makefile (and similar syntaxes,# like old-style Setup files).# `$$' is a literal `$' in make# insert literal `$'# do variable interpolation here# Variables with a 'PY_' prefix in the makefile. These need to# be made available without that prefix through sysconfig.# Special care is needed to ensure that variable expansion works, even# if the expansion uses the name without a prefix.# get it on a subsequent round# do it like make: fall back to environment# bogus variable reference (e.g. "prefix=$/opt/python");# just drop it since we can't deal# strip spurious spaces# save the results in the global dictionary# load the installed Makefile:# load the installed pyconfig.h:# On AIX, there are wrong paths to the linker scripts in the Makefile# -- these paths are relative to the Python source, but when installed# the scripts are in another directory.# There's a chicken-and-egg situation on OS X with regards to the# _sysconfigdata module after the changes introduced by #15298:# get_config_vars() is called by get_platform() as part of the# `make pybuilddir.txt` target -- which is a precursor to the# _sysconfigdata.py module being constructed. Unfortunately,# get_config_vars() eventually calls _init_posix(), which attempts# to import _sysconfigdata, which we won't have built yet. In order# for _init_posix() to work, if we're on Darwin, just mock up the# _sysconfigdata module manually and populate it with the build vars.# This is more than sufficient for ensuring the subsequent call to# get_platform() succeeds.# Create file used for sys.path fixup -- see Modules/getpath.c# _sysconfigdata is generated at build time, see _generate_posix_vars()# set basic install directories# public APIs# Normalized versions of prefix and exec_prefix are handy to have;# in fact, these are the standard versions used most places in the# Distutils.# sys.abiflags may not be defined on all platforms.# For backward compatibility, see issue19555# Setting 'userbase' is done below the call to the# init function to enable using 'get_config_var' in# the init-function.# Always convert srcdir to an absolute path# If srcdir is a relative path (typically '.' or '..')# then it should be interpreted relative to the directory# containing Makefile.# srcdir is not meaningful since the installation is# spread about the filesystem. We choose the# directory containing the Makefile since we know it# exists.# OS X platforms require special customization to handle# multi-architecture, multi-os-version installers# XXX what about the architecture? NT is Intel or Alpha# Set for cross builds explicitly# Try to distinguish various flavours of Unix# Convert the OS name to lowercase, remove '/' characters, and translate# spaces (for "Power Macintosh")# At least on Linux/Intel, 'machine' is the processor --# i386, etc.# XXX what about Alpha, SPARC, etc?# SunOS 5 == Solaris 2# We can't use "platform.architecture()[0]" because a# bootstrap problem. We use a dict to get an error# if some suspicious happens.# fall through to standard osname-release-machine representationb'Access to Python's configuration information.'u'Access to Python's configuration information.'b'get_config_h_filename'u'get_config_h_filename'b'get_config_var'u'get_config_var'b'get_config_vars'u'get_config_vars'b'get_makefile_filename'u'get_makefile_filename'b'get_path'u'get_path'b'get_path_names'u'get_path_names'b'get_paths'u'get_paths'b'get_platform'u'get_platform'b'get_python_version'u'get_python_version'b'get_scheme_names'u'get_scheme_names'b'parse_config_h'u'parse_config_h'b'{installed_base}/lib/python{py_version_short}'u'{installed_base}/lib/python{py_version_short}'b'{platbase}/lib/python{py_version_short}'u'{platbase}/lib/python{py_version_short}'b'platstdlib'u'platstdlib'b'{base}/lib/python{py_version_short}/site-packages'u'{base}/lib/python{py_version_short}/site-packages'b'purelib'u'purelib'b'{platbase}/lib/python{py_version_short}/site-packages'u'{platbase}/lib/python{py_version_short}/site-packages'b'platlib'u'platlib'b'{installed_base}/include/python{py_version_short}{abiflags}'u'{installed_base}/include/python{py_version_short}{abiflags}'b'{installed_platbase}/include/python{py_version_short}{abiflags}'u'{installed_platbase}/include/python{py_version_short}{abiflags}'b'platinclude'u'platinclude'b'{base}/bin'u'{base}/bin'b'scripts'u'scripts'b'{base}'u'{base}'b'posix_prefix'u'posix_prefix'b'{installed_base}/lib/python'u'{installed_base}/lib/python'b'{base}/lib/python'u'{base}/lib/python'b'{installed_base}/include/python'u'{installed_base}/include/python'b'posix_home'u'posix_home'b'{installed_base}/Lib'u'{installed_base}/Lib'b'{base}/Lib'u'{base}/Lib'b'{base}/Lib/site-packages'u'{base}/Lib/site-packages'b'{installed_base}/Include'u'{installed_base}/Include'b'{base}/Scripts'u'{base}/Scripts'b'{userbase}/Python{py_version_nodot}'u'{userbase}/Python{py_version_nodot}'b'{userbase}/Python{py_version_nodot}/site-packages'u'{userbase}/Python{py_version_nodot}/site-packages'b'{userbase}/Python{py_version_nodot}/Include'u'{userbase}/Python{py_version_nodot}/Include'b'{userbase}/Python{py_version_nodot}/Scripts'u'{userbase}/Python{py_version_nodot}/Scripts'b'{userbase}'u'{userbase}'b'nt_user'u'nt_user'b'{userbase}/lib/python{py_version_short}'u'{userbase}/lib/python{py_version_short}'b'{userbase}/lib/python{py_version_short}/site-packages'u'{userbase}/lib/python{py_version_short}/site-packages'b'{userbase}/include/python{py_version_short}'u'{userbase}/include/python{py_version_short}'b'{userbase}/bin'u'{userbase}/bin'b'posix_user'u'posix_user'b'{userbase}/lib/python'u'{userbase}/lib/python'b'{userbase}/lib/python/site-packages'u'{userbase}/lib/python/site-packages'b'{userbase}/include'u'{userbase}/include'b'osx_framework_user'u'osx_framework_user'b'%d%d'u'%d%d'b'\pcbuild\win32'u'\pcbuild\win32'b'\pcbuild\amd64'u'\pcbuild\amd64'b'_PYTHON_PROJECT_BASE'u'_PYTHON_PROJECT_BASE'b'Setup'u'Setup'b'Setup.local'u'Setup.local'b'_home'u'_home'b'PCbuild'u'PCbuild'b'{srcdir}/Include'u'{srcdir}/Include'b'{projectbase}/.'u'{projectbase}/.'b'PYTHONUSERBASE'u'PYTHONUSERBASE'b'APPDATA'u'APPDATA'b'Python'u'Python'b'Library'u'Library'b'.local'u'.local'b'Parse a Makefile-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + 'u'Parse a Makefile-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + 'b'([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)'u'([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)'b'\$\(([A-Za-z][A-Za-z0-9_]*)\)'u'\$\(([A-Za-z][A-Za-z0-9_]*)\)'b'\${([A-Za-z][A-Za-z0-9_]*)}'u'\${([A-Za-z][A-Za-z0-9_]*)}'b'$$'u'$$'b'PY_'u'PY_'b'Return the path of the Makefile.'u'Return the path of the Makefile.'b'Makefile'u'Makefile'b'abiflags'u'abiflags'b'config-%s%s'u'config-%s%s'b'_multiarch'u'_multiarch'b'_PYTHON_SYSCONFIGDATA_NAME'u'_PYTHON_SYSCONFIGDATA_NAME'b'_sysconfigdata_{abi}_{platform}_{multiarch}'u'_sysconfigdata_{abi}_{platform}_{multiarch}'b'Generate the Python module containing build-time variables.'u'Generate the Python module containing build-time variables.'b'invalid Python installation: unable to open %s'u'invalid Python installation: unable to open %s'b'strerror'u'strerror'b'build/lib.%s-%s'u'build/lib.%s-%s'b'gettotalrefcount'u'gettotalrefcount'b'-pydebug'u'-pydebug'b'# system configuration generated and used by the sysconfig module +'u'# system configuration generated and used by the sysconfig module +'b'build_time_vars = 'u'build_time_vars = 'b'pybuilddir.txt'u'pybuilddir.txt'b'Initialize the module as appropriate for POSIX systems.'u'Initialize the module as appropriate for POSIX systems.'b'build_time_vars'u'build_time_vars'b'Initialize the module as appropriate for NT'u'Initialize the module as appropriate for NT'b'LIBDEST'u'LIBDEST'b'BINLIBDEST'u'BINLIBDEST'b'INCLUDEPY'u'INCLUDEPY'b'EXT_SUFFIX'u'EXT_SUFFIX'b'EXE'u'EXE'b'BINDIR'u'BINDIR'b'Parse a config.h-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + 'u'Parse a config.h-style file. + + A dictionary containing name/value pairs is returned. If an + optional dictionary is passed in as the second argument, it is + used instead of a new dictionary. + 'b'#define ([A-Z][A-Za-z0-9_]+) (.*) +'u'#define ([A-Z][A-Za-z0-9_]+) (.*) +'b'/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/ +'u'/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/ +'b'Return the path of pyconfig.h.'u'Return the path of pyconfig.h.'b'PC'u'PC'b'pyconfig.h'u'pyconfig.h'b'Return a tuple containing the schemes names.'u'Return a tuple containing the schemes names.'b'Return a tuple containing the paths names.'u'Return a tuple containing the paths names.'b'Return a mapping containing an install scheme. + + ``scheme`` is the install scheme name. If not provided, it will + return the default scheme for the current platform. + 'u'Return a mapping containing an install scheme. + + ``scheme`` is the install scheme name. If not provided, it will + return the default scheme for the current platform. + 'b'Return a path corresponding to the scheme. + + ``scheme`` is the install scheme name. + 'u'Return a path corresponding to the scheme. + + ``scheme`` is the install scheme name. + 'b'With no arguments, return a dictionary of all configuration + variables relevant for the current platform. + + On Unix, this means every variable defined in Python's installed Makefile; + On Windows it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + 'u'With no arguments, return a dictionary of all configuration + variables relevant for the current platform. + + On Unix, this means every variable defined in Python's installed Makefile; + On Windows it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + 'b'prefix'u'prefix'b'exec_prefix'u'exec_prefix'b'py_version'u'py_version'b'py_version_short'u'py_version_short'b'py_version_nodot'u'py_version_nodot'b'installed_base'u'installed_base'b'installed_platbase'u'installed_platbase'b'platbase'u'platbase'b'projectbase'u'projectbase'b'SO'u'SO'b'userbase'u'userbase'b'srcdir'u'srcdir'b'Return the value of a single variable using the dictionary returned by + 'get_config_vars()'. + + Equivalent to get_config_vars().get(name) + 'u'Return the value of a single variable using the dictionary returned by + 'get_config_vars()'. + + Equivalent to get_config_vars().get(name) + 'b'SO is deprecated, use EXT_SUFFIX'u'SO is deprecated, use EXT_SUFFIX'b'Return a string that identifies the current platform. + + This is used mainly to distinguish platform-specific build directories and + platform-specific built distributions. Typically includes the OS name and + version and the architecture (as supplied by 'os.uname()'), although the + exact information included depends on the OS; on Linux, the kernel version + isn't particularly important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + + 'u'Return a string that identifies the current platform. + + This is used mainly to distinguish platform-specific build directories and + platform-specific built distributions. Typically includes the OS name and + version and the architecture (as supplied by 'os.uname()'), although the + exact information included depends on the OS; on Linux, the kernel version + isn't particularly important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + + 'b'amd64'u'amd64'b'win-amd64'u'win-amd64'b'(arm)'u'(arm)'b'win-arm32'u'win-arm32'b'(arm64)'u'(arm64)'b'win-arm64'u'win-arm64'b'_PYTHON_HOST_PLATFORM'u'_PYTHON_HOST_PLATFORM'b'%s-%s'u'%s-%s'b'sunos'u'sunos'b'solaris'u'solaris'b'%d.%s'u'%d.%s'b'.%s'u'.%s'b'%s-%s.%s'u'%s-%s.%s'b'[\d.]+'u'[\d.]+'b'%s-%s-%s'u'%s-%s-%s'b' %s = "%s"'u' %s = "%s"'b'Display all information sysconfig detains.'u'Display all information sysconfig detains.'b'--generate-posix-vars'u'--generate-posix-vars'b'Platform: "%s"'u'Platform: "%s"'b'Python version: "%s"'u'Python version: "%s"'b'Current installation scheme: "%s"'u'Current installation scheme: "%s"'b'Paths'u'Paths'b'Variables'u'Variables'u'sysconfig'Provide access to Python's configuration information. The specific +configuration variables available depend heavily on the platform and +configuration. The values may be retrieved using +get_config_var(name), and the list of variables is available via +get_config_vars().keys(). Additional convenience functions are also +available. + +Written by: Fred L. Drake, Jr. +Email: +get_host_platformPREFIXEXEC_PREFIXBASE_PREFIXBASE_EXEC_PREFIXproject_base_python_buildbuild_flagsReturn a string containing the major and minor Python version, + leaving off the patchlevel. Sample return values could be '1.5' + or '2.2'. + get_python_incplat_specificReturn the directory containing installed Python header files. + + If 'plat_specific' is false (the default), this is the path to the + non-platform-specific header files, i.e. Python.h and so on; + otherwise, this is the path to platform-specific header files + (namely pyconfig.h). + + If 'prefix' is supplied, use it instead of sys.base_prefix or + sys.base_exec_prefix -- i.e., ignore 'plat_specific'. + Includeincdirpython_dirI don't know where Python installs its C header files on platform '%s'"I don't know where Python installs its C header files ""on platform '%s'"get_python_libstandard_libReturn the directory containing the Python library (standard or + site additions). + + If 'plat_specific' is true, return the directory containing + platform-specific modules, i.e. any module from a non-pure-Python + module distribution; otherwise, return the platform-shared library + directory. If 'standard_lib' is true, return the directory + containing standard Python library modules; otherwise, return the + directory for site-specific modules. + + If 'prefix' is supplied, use it instead of sys.base_prefix or + sys.base_exec_prefix -- i.e., ignore 'plat_specific'. + libpythonLibI don't know where Python installs its library on platform '%s'"I don't know where Python installs its library "Do any platform-specific customization of a CCompiler instance. + + Mainly needed on Unix, so we can plug in the information that + varies across Unices and is stored in Python's Makefile. + CUSTOMIZED_OSX_COMPILERCCSHAREDSHLIB_SUFFIXARARFLAGScxxccsharedldsharedshlib_suffixar_flagsnewccCPPcpp -Earchivercc_cmdpreprocessorcompiler_cxxlinker_solinker_exeReturn full pathname of installed pyconfig.h file.Return full pathname of installed Makefile from the Python build.config-{}{}config_fileparse_makefiledistutils.text_fileTextFilestrip_commentsskip_blanksjoin_linesexpand_makefile_varsExpand Makefile-style variables -- "${foo}" or "$(foo)" -- in + 'string' according to 'vars' (a dictionary mapping variable names to + values). Variables not present in 'vars' are silently expanded to the + empty string. The variable values in 'vars' should not contain further + variable expansions; if 'vars' is the output of 'parse_makefile()', + you're fine. Returns a variable-expanded version of 's'. + beg_init_ntWith no arguments, return a dictionary of all configuration + variables relevant for the current platform. Generally this includes + everything needed to build extensions and install both pure modules and + extensions. On Unix, this means every variable defined in Python's + installed Makefile; on Windows it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + _init_Return the value of a single variable using the dictionary + returned by 'get_config_vars()'. Equivalent to + get_config_vars().get(name) + # These are needed in a couple of spots, so just compute them once.# Path to the base directory of the project. On Windows the binary may# live in project/PCbuild/win32 or project/PCbuild/amd64.# python_build: (Boolean) if true, we're either building Python or# building an extension with an un-installed Python, so we use# different (hard-wired) directories.# Calculate the build qualifier flags if they are defined. Adding the flags# to the include and lib directories only makes sense for an installation, not# an in-source build.# It's not a configure-based build, so the sys module doesn't have# this attribute, which is fine.# Assume the executable is in the build directory. The# pyconfig.h file should be in the same directory. Since# the build directory may not be the source directory, we# must use "srcdir" from the makefile to find the "Include"# Include both the include and PC dir to ensure we can find# pyconfig.h# Perform first-time customization of compiler-related# config vars on OS X now that we know we need a compiler.# This is primarily to support Pythons from binary# installers. The kind and paths to build tools on# the user system may vary significantly from the system# that Python itself was built on. Also the user OS# version and build tools may not support the same set# of CPU architectures for universal builds.# Use get_config_var() to ensure _config_vars is initialized.# On OS X, if CC is overridden, use that as the default# command for LDSHARED as well# not always# bogus variable reference; just drop it since we can't deal# This algorithm does multiple expansion, so if vars['foo'] contains# "${bar}", it will expand ${foo} to ${bar}, and then expand# ${bar}... and so forth. This is fine as long as 'vars' comes from# 'parse_makefile()', which takes care of such expansions eagerly,# according to make's variable expansion semantics.# _sysconfigdata is generated at build time, see the sysconfig module# XXX hmmm.. a normal install puts include files here# Convert srcdir into an absolute path if it appears necessary.# Normally it is relative to the build directory. However, during# testing, for example, we might be running a non-installed python# from a different directory.# srcdir is relative and we are not in the same directory# as the executable. Assume executable is in the build# directory and make srcdir absolute.b'Provide access to Python's configuration information. The specific +configuration variables available depend heavily on the platform and +configuration. The values may be retrieved using +get_config_var(name), and the list of variables is available via +get_config_vars().keys(). Additional convenience functions are also +available. + +Written by: Fred L. Drake, Jr. +Email: +'u'Provide access to Python's configuration information. The specific +configuration variables available depend heavily on the platform and +configuration. The values may be retrieved using +get_config_var(name), and the list of variables is available via +get_config_vars().keys(). Additional convenience functions are also +available. + +Written by: Fred L. Drake, Jr. +Email: +'b'Return a string containing the major and minor Python version, + leaving off the patchlevel. Sample return values could be '1.5' + or '2.2'. + 'u'Return a string containing the major and minor Python version, + leaving off the patchlevel. Sample return values could be '1.5' + or '2.2'. + 'b'Return the directory containing installed Python header files. + + If 'plat_specific' is false (the default), this is the path to the + non-platform-specific header files, i.e. Python.h and so on; + otherwise, this is the path to platform-specific header files + (namely pyconfig.h). + + If 'prefix' is supplied, use it instead of sys.base_prefix or + sys.base_exec_prefix -- i.e., ignore 'plat_specific'. + 'u'Return the directory containing installed Python header files. + + If 'plat_specific' is false (the default), this is the path to the + non-platform-specific header files, i.e. Python.h and so on; + otherwise, this is the path to platform-specific header files + (namely pyconfig.h). + + If 'prefix' is supplied, use it instead of sys.base_prefix or + sys.base_exec_prefix -- i.e., ignore 'plat_specific'. + 'b'Include'u'Include'b'I don't know where Python installs its C header files on platform '%s''u'I don't know where Python installs its C header files on platform '%s''b'Return the directory containing the Python library (standard or + site additions). + + If 'plat_specific' is true, return the directory containing + platform-specific modules, i.e. any module from a non-pure-Python + module distribution; otherwise, return the platform-shared library + directory. If 'standard_lib' is true, return the directory + containing standard Python library modules; otherwise, return the + directory for site-specific modules. + + If 'prefix' is supplied, use it instead of sys.base_prefix or + sys.base_exec_prefix -- i.e., ignore 'plat_specific'. + 'u'Return the directory containing the Python library (standard or + site additions). + + If 'plat_specific' is true, return the directory containing + platform-specific modules, i.e. any module from a non-pure-Python + module distribution; otherwise, return the platform-shared library + directory. If 'standard_lib' is true, return the directory + containing standard Python library modules; otherwise, return the + directory for site-specific modules. + + If 'prefix' is supplied, use it instead of sys.base_prefix or + sys.base_exec_prefix -- i.e., ignore 'plat_specific'. + 'b'Lib'u'Lib'b'I don't know where Python installs its library on platform '%s''u'I don't know where Python installs its library on platform '%s''b'Do any platform-specific customization of a CCompiler instance. + + Mainly needed on Unix, so we can plug in the information that + varies across Unices and is stored in Python's Makefile. + 'u'Do any platform-specific customization of a CCompiler instance. + + Mainly needed on Unix, so we can plug in the information that + varies across Unices and is stored in Python's Makefile. + 'b'CUSTOMIZED_OSX_COMPILER'u'CUSTOMIZED_OSX_COMPILER'b'CCSHARED'u'CCSHARED'b'SHLIB_SUFFIX'u'SHLIB_SUFFIX'b'AR'u'AR'b'ARFLAGS'u'ARFLAGS'b'CPP'u'CPP'b' -E'u' -E'b'Return full pathname of installed pyconfig.h file.'u'Return full pathname of installed pyconfig.h file.'b'Return full pathname of installed Makefile from the Python build.'u'Return full pathname of installed Makefile from the Python build.'b'config-{}{}'u'config-{}{}'b'Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in + 'string' according to 'vars' (a dictionary mapping variable names to + values). Variables not present in 'vars' are silently expanded to the + empty string. The variable values in 'vars' should not contain further + variable expansions; if 'vars' is the output of 'parse_makefile()', + you're fine. Returns a variable-expanded version of 's'. + 'u'Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in + 'string' according to 'vars' (a dictionary mapping variable names to + values). Variables not present in 'vars' are silently expanded to the + empty string. The variable values in 'vars' should not contain further + variable expansions; if 'vars' is the output of 'parse_makefile()', + you're fine. Returns a variable-expanded version of 's'. + 'b'With no arguments, return a dictionary of all configuration + variables relevant for the current platform. Generally this includes + everything needed to build extensions and install both pure modules and + extensions. On Unix, this means every variable defined in Python's + installed Makefile; on Windows it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + 'u'With no arguments, return a dictionary of all configuration + variables relevant for the current platform. Generally this includes + everything needed to build extensions and install both pure modules and + extensions. On Unix, this means every variable defined in Python's + installed Makefile; on Windows it's a much smaller set. + + With arguments, return a list of values that result from looking up + each argument in the configuration variable dictionary. + 'b'_init_'u'_init_'b'Return the value of a single variable using the dictionary + returned by 'get_config_vars()'. Equivalent to + get_config_vars().get(name) + 'u'Return the value of a single variable using the dictionary + returned by 'get_config_vars()'. Equivalent to + get_config_vars().get(name) + 'u'distutils.sysconfig'Read from and write to tar format archives. +0.9.0Lars Gustäbel (lars@gustaebel.de)Gustavo Niemeyer, Niels Gustäbel, Richard Townsend.bltn_opensymlink_exceptionTarFileTarInfois_tarfileCompressionErrorStreamErrorExtractErrorHeaderErrorUSTAR_FORMATGNU_FORMATPAX_FORMATDEFAULT_FORMATNULBLOCKSIZERECORDSIZEustar GNU_MAGICustar00POSIX_MAGICLENGTH_NAMELENGTH_LINKLENGTH_PREFIXREGTYPEAREGTYPELNKTYPESYMTYPECHRTYPEBLKTYPEDIRTYPEFIFOTYPECONTTYPEGNUTYPE_LONGNAMEGNUTYPE_LONGLINKGNUTYPE_SPARSEXHDTYPEXGLTYPESOLARIS_XHDTYPESUPPORTED_TYPESREGULAR_TYPESGNU_TYPESlinkpathPAX_FIELDSPAX_NAME_FIELDSatimePAX_NUMBER_FIELDSstnConvert a string to a null-terminated bytes object. + ntsConvert a null-terminated bytes object to a string. + ntiConvert a number field to a python number. + InvalidHeaderErrorinvalid headeritnConvert a python number to a number field. + %0*ooverflow in number fieldcalc_chksumsCalculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + 148B8x356Bunsigned_chksum148b8x356bsigned_chksumCopy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + unexpected end of data_safe_printBase exception.General exception for extract errors.Exception for unreadable tar archives.Exception for unavailable compression methods.Exception for unsupported operations on stream-like TarFiles.Base exception for header errors.EmptyHeaderErrorException for empty headers.TruncatedHeaderErrorException for truncated headers.EOFHeaderErrorException for end of file headers.Exception for invalid headers.SubsequentHeaderErrorException for missing and invalid extended headers._LowLevelFileLow-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + O_BINARY_StreamClass that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method and is accessed + blockwise. Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin, + sys.stdout, a socket, a tape device etc. + + _Stream is intended to be used only internally. + comptypeConstruct a _Stream object. + _extfileobj_StreamProxygetcomptypezlib module is not available_init_read_gz_init_write_gzbz2 module is not availabledbufcmplzma module is not availableunknown compression type %rInitialize for writing with gzip compression. + __write‹ÿWrite string s to the stream. + Write string s to the stream if a whole new block + is ready to be written. + Close the _Stream object. No operation should be + done on it afterwards. + 0xffffFFFFInitialize for reading a gzip compressed fileobj. + __readnot a gzip fileunsupported compression methodxlenReturn the stream's file pointer position. + Set the stream's file pointer to pos. Negative seeking + is forbidden. + seeking backwards is not allowedReturn the next size number of bytes from the stream.Return size bytes from the stream. + invalid compressed dataReturn size bytes from stream. If internal buffer is empty, + read another block from the stream. + Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + ‹BZh1AY&SY]€ý7zXZ_FileInFileA thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + blockinfomap_indexlastposrealposReturn the current file position. + Seek to a position in the file. + Invalid argumentRead data from the file. + ExFileObjectoffset_datasparseInformational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + Name of the archive member.Permission bits.User ID of the user who originally stored this member.Group ID of the user who originally stored this member.Size in bytes.Time of last modification.Header checksum.chksumFile type. type is usually one of these constants: REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_SPARSE.'File type. type is usually one of these constants: ''REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, ''CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_SPARSE.'Name of the target file name, which is only present in TarInfo objects of type LNKTYPE and SYMTYPE.'Name of the target file name, which is only present ''in TarInfo objects of type LNKTYPE and SYMTYPE.'User name.Group name.Device major number.devmajorDevice minor number.devminorThe tar header starts here.The file's data starts here.A dictionary containing key-value pairs of an associated pax extended header.'A dictionary containing key-value pairs of an ''associated pax extended header.'pax_headersSparse member information._sparse_structs_link_targetConstruct a TarInfo object. name is the optional name + of the member. + 4200o644In pax headers, "name" is called "path".In pax headers, "linkname" is called "linkpath".get_infoReturn the TarInfo's attributes as a dictionary. + tobufReturn a tar header as a string of 512 byte blocks. + create_ustar_headercreate_gnu_headercreate_pax_headerinvalid formatReturn the object as a ustar header block. + linkname is too long_posix_split_name_create_headerReturn the object as a GNU header block sequence. + _create_gnu_long_headerReturn the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + hname_create_pax_generic_headercreate_pax_global_headerReturn the object as a pax global header block sequence. + Split a name longer than 100 chars into a prefix + and a name part. + name is too longReturn a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + %ds364%06o357_create_payloadReturn the string payload filled with zero bytes + up to the next 512 byte border. + Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + ././@LongLinkReturn a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + 21 hdrcharset=BINARY +././@PaxHeaderfrombufConstruct a TarInfo object from a 512 byte bytes object. + empty headertruncated headerend of file headerbad checksum265297329337345386structs482isextended483495origsizefromtarfileReturn the next TarInfo object from TarFile object + tarfile. + _proc_memberChoose the right processing method depending on + the type and call it. + _proc_gnulong_proc_sparse_proc_pax_proc_builtinProcess a builtin type or an unknown type which + will be treated as a regular file. + isreg_block_apply_pax_infoProcess the blocks that hold a GNU longname + or longlink member. + missing or bad subsequent headerProcess a GNU sparse header plus extra headers. + Process an extended or global header as described in + POSIX.1-2008. + \d+ hdrcharset=([^\n]+)\nhdrcharset(\d+) ([^=]+)=_decode_pax_fieldGNU.sparse.map_proc_gnusparse_01GNU.sparse.size_proc_gnusparse_00GNU.sparse.majorGNU.sparse.minor_proc_gnusparse_10Process a GNU tar extended sparse header, version 0.0. + offsets\d+ GNU.sparse.offset=(\d+)\n\d+ GNU.sparse.numbytes=(\d+)\nProcess a GNU tar extended sparse header, version 0.1. + Process a GNU tar extended sparse header, version 1.0. + Replace fields with supplemental information from a previous + pax extended or global header. + GNU.sparse.nameGNU.sparse.realsizefallback_encodingfallback_errorsDecode a single field from a pax record. + Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + Return True if the Tarinfo object is a regular file.Return True if it is a directory.issymReturn True if it is a symbolic link.islnkReturn True if it is a hard link.ischrReturn True if it is a character device.isblkReturn True if it is a block device.isfifoReturn True if it is a FIFO.issparseisdevReturn True if it is one of character device, block device or FIFO.The TarFile Class provides an interface to tar archives. + dereferenceignore_zeroserrorlevelfileobjectcopybufsizeOpen an (uncompressed) tar archive `name'. `mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' + defaults to 'r'. + If `fileobj' is given, it is used for reading or writing data. If it + can be determined, `mode' is overridden by `fileobj's mode. + `fileobj' is not closed, when TarFile is closed. + r+bmodesmode must be 'r', 'a', 'w' or 'x'_loadedinodesfirstmemberOpen a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'r:xz' open for reading with lzma compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + 'w:xz' open for writing with lzma compression + + 'x' or 'x:' create a tarfile exclusively without compression, raise + an exception if the file is already created + 'x:gz' create a gzip compressed tarfile, raise an exception + if the file is already created + 'x:bz2' create a bzip2 compressed tarfile, raise an exception + if the file is already created + 'x:xz' create an lzma compressed tarfile, raise an exception + if the file is already created + + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'r|xz' open an lzma compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + 'w|xz' open an lzma compressed stream for writing + nothing to openr:*not_compressedOPEN_METHtaropensaved_posfile could not be opened successfullymode must be 'r' or 'w'undiscernible modeOpen uncompressed tar archive name for reading or writing. + gzopenOpen gzip compressed tar archive name for reading or writing. + Appending is not allowed. + mode must be 'r', 'w' or 'x'gzip module is not availablebz2openOpen bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + not a bzip2 filexzopenOpen lzma compressed tar archive name for reading or writing. + Appending is not allowed. + not an lzma fileClose the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + getmemberReturn a TarInfo object for member `name'. If `name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + _getmemberfilename %r not foundReturn the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + _checkgetnamesReturn the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + gettarinfoarcnameCreate a TarInfo object from the result of os.stat or equivalent + on an existing file. The file is either named by `name', or + specified as a file object `fileobj' with a file descriptor. If + given, `arcname' specifies an alternative name for the file in the + archive, otherwise, the name is taken from the 'name' attribute of + 'fileobj', or the 'name' argument. The name should be a text + string. + awxdrvstatresstmdPrint a table of contents to sys.stdout. If `verbose' is False, only + the names of the members are printed. If it is True, an `ls -l'-like + output is produced. `members' is optional and must be a subset of the + list returned by getmembers(). + %10s%d,%d%10d%d-%02d-%02d %02d:%02d:%02d-> link to Add the file `name' to the archive. `name' may be any type of file + (directory, fifo, symbolic link, etc.). If given, `arcname' + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting `recursive' to False. `filter' is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + _dbgtarfile: Skipped %rtarfile: Unsupported type %rtarfile: Excluded %raddfileAdd the TarInfo object `tarinfo' to the archive. If `fileobj' is + given, it should be a binary file, and tarinfo.size bytes are read + from it and added to the archive. You can create TarInfo objects + directly, or by using gettarinfo(). + numeric_ownerExtract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). If `numeric_owner` is True, only + the numbers for user/group names are used and not the names. + directories0o700set_attrstarfile: %sExtract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a TarInfo object. You can + specify a different directory using `path'. File attributes (owner, + mtime, mode) are set unless `set_attrs' is False. If `numeric_owner` + is True, only the numbers for user/group names are used and not + the names. + _extract_membertarfile: %s %rextractfileExtract a member from the archive as a file object. `member' may be + a filename or a TarInfo object. If `member' is a regular file or a + link, an io.BufferedReader object is returned. Otherwise, None is + returned. + cannot extract (sym)link as file object_find_link_targettargetpathExtract the TarInfo object tarinfo to a physical + file called targetpath. + upperdirsmakedirmakefifomakelinkmakeunknownMake a directory called targetpath. + Make a file called targetpath. + Make a file from a TarInfo object with an unknown type + at targetpath. + tarfile: Unknown file type %r, extracted as regular file."tarfile: Unknown file type %r, ""extracted as regular file."Make a fifo called targetpath. + fifo not supported by systemMake a character or block device called targetpath. + special devices not supported by systemMake a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + unable to resolve link inside archiveSet owner of targetpath according to tarinfo. If numeric_owner + is True, use .gid/.uid instead of .gname/.uname. If numeric_owner + is False, fall back to .gid/.uid when the search based on name + fails. + could not change ownerSet file permissions of targetpath according to tarinfo. + could not change modeSet modification time of targetpath according to tarinfo. + could not change modification timeReturn the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + ra0x%X: %sempty fileFind an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + Read through the entire archive file and look for readable + members. + Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + %s is closedbad operation for mode %rFind the target member of a symlink or hardlink member in the + archive. + linkname %r not foundProvide an iterator object. + Write debugging output to sys.stderr. + Return True if name points to a tar archive that we + are able to handle, else return False. + A simple command-line interface for tarfile module.--listShow listing of a tarfile--extractExtract tarfile into target dir--createCreate tarfile from sourcesTest if a tarfile is valid{!r} is a tar archive.{!r} is not a tar archive. +tf{!r} file is extracted.{!r} file is extracted into {!r} directory.'{!r} file is extracted ''into {!r} directory.'tar_name.tbz.tb2compressionsw:tar_modetar_files{!r} file created.#-------------------------------------------------------------------# tarfile.py# Copyright (C) 2002 Lars Gustaebel # Permission is hereby granted, free of charge, to any person# obtaining a copy of this software and associated documentation# files (the "Software"), to deal in the Software without# restriction, including without limitation the rights to use,# copy, modify, merge, publish, distribute, sublicense, and/or sell# copies of the Software, and to permit persons to whom the# Software is furnished to do so, subject to the following# conditions:# The above copyright notice and this permission notice shall be# included in all copies or substantial portions of the Software.# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR# OTHER DEALINGS IN THE SOFTWARE.#---------# os.symlink on Windows prior to 6.0 raises NotImplementedError# OSError (winerror=1314) will be raised if the caller does not hold the# SeCreateSymbolicLinkPrivilege privilege# from tarfile import *#---------------------------------------------------------# tar constants# the null character# length of processing blocks# length of records# magic gnu tar string# magic posix tar string# maximum length of a filename# maximum length of a linkname# maximum length of the prefix field# link (inside tarfile)# character special device# block special device# fifo special device# contiguous file# GNU tar longname# GNU tar longlink# GNU tar sparse file# POSIX.1-2001 extended header# POSIX.1-2001 global header# Solaris extended header# POSIX.1-1988 (ustar) format# GNU tar format# POSIX.1-2001 (pax) format# tarfile constants# File types that tarfile supports:# File types that will be treated as a regular file.# File types that are part of the GNU tar format.# Fields from a pax header that override a TarInfo attribute.# Fields from a pax header that are affected by hdrcharset.# Fields in a pax header that are numbers, all other fields# are treated as strings.# initialization# Some useful functions# There are two possible encodings for a number field, see# itn() below.# POSIX 1003.1-1988 requires numbers to be encoded as a string of# octal digits followed by a null-byte, this allows values up to# (8**(digits-1))-1. GNU tar allows storing numbers greater than# that if necessary. A leading 0o200 or 0o377 byte indicate this# particular encoding, the following digits-1 bytes are a big-endian# base-256 representation. This allows values up to (256**(digits-1))-1.# A 0o200 byte indicates a positive number, a 0o377 byte a negative# number.#---------------------------# internal stream interface# Enable transparent compression detection for the# stream interface# Honor "directory components removed" from RFC1952# RFC1952 says we must use ISO-8859-1 for the FNAME field.# taken from gzip.GzipFile with some alterations# Skip underlying buffer to avoid unaligned double buffering.# class _Stream# class StreamProxy#------------------------# Extraction file object# Construct a map with data and zero blocks.#class _FileInFile#class ExFileObject#------------------# Exported Classes# member name# file permissions# user id# group id# file size# modification time# header checksum# member type# link name# user name# group name# device major number# device minor number# the tar header starts here# the file's data starts here# sparse member information# pax header information# Test string fields for values that exceed the field length or cannot# be represented in ASCII encoding.# The pax header has priority.# Try to encode the string as ASCII.# Test number fields for values that exceed the field limit or values# that like to be stored as float.# The pax header has priority. Avoid overflow.# Create a pax extended header if necessary.# checksum field# create extended header + name blocks.# Check if one of the fields contains surrogate characters and thereby# forces hdrcharset=BINARY, see _proc_pax() for more information.# Put the hdrcharset field at the beginning of the header.# Try to restore the original byte representation of `value'.# Needless to say, that the encoding must match the string.# ' ' + '=' + '\n'# We use a hardcoded "././@PaxHeader" name like star does# instead of the one that POSIX recommends.# Create pax header + record blocks.# Old V7 tar format represents a directory as a regular# file with a trailing slash.# The old GNU sparse format occupies some of the unused# space in the buffer for up to 4 sparse structures.# Save them for later processing in _proc_sparse().# Remove redundant slashes from directories.# Reconstruct a ustar longname.#--------------------------------------------------------------------------# The following are methods that are called depending on the type of a# member. The entry point is _proc_member() which can be overridden in a# subclass to add custom _proc_*() methods. A _proc_*() method MUST# implement the following# operations:# 1. Set self.offset_data to the position where the data blocks begin,# if there is data that follows.# 2. Set tarfile.offset to the position where the next member's header will# begin.# 3. Return self or another valid TarInfo object.# Skip the following data blocks.# Patch the TarInfo object with saved global# header information.# Fetch the next header and process it.# Patch the TarInfo object from the next header with# the longname information.# We already collected some sparse structures in frombuf().# Collect sparse structures from extended header blocks.# Read the header information.# A pax header stores supplemental information for either# the following file (extended) or all following files# (global).# Check if the pax header contains a hdrcharset field. This tells us# the encoding of the path, linkpath, uname and gname fields. Normally,# these fields are UTF-8 encoded but since POSIX.1-2008 tar# implementations are allowed to store them as raw binary strings if# the translation to UTF-8 fails.# For the time being, we don't care about anything other than "BINARY".# The only other value that is currently allowed by the standard is# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.# Parse pax header information. A record looks like that:# "%d %s=%s\n" % (length, keyword, value). length is the size# of the complete record including the length field itself and# the newline. keyword and value are both UTF-8 encoded strings.# Normally, we could just use "utf-8" as the encoding and "strict"# as the error handler, but we better not take the risk. For# example, GNU tar <= 1.23 is known to store filenames it cannot# translate to UTF-8 as raw strings (unfortunately without a# hdrcharset=BINARY header).# We first try the strict standard encoding, and if that fails we# fall back on the user's encoding and error handler.# Fetch the next header.# Process GNU sparse information.# GNU extended sparse format version 0.1.# GNU extended sparse format version 0.0.# GNU extended sparse format version 1.0.# Patch the TarInfo object with the extended header info.# If the extended header replaces the size field,# we need to recalculate the offset where the next# header starts.# class TarInfo# May be set from 0 (no msgs) to 3 (all msgs)# If true, add content of linked file to the# tar file, else the link.# If true, skips empty or invalid blocks and# continues processing.# If 0, fatal errors only appear in debug# messages (if debug >= 0). If > 0, errors# are passed to the caller as exceptions.# The format to use when creating an archive.# Encoding for 8-bit character strings.# Error handler for unicode conversion.# The default TarInfo class to use.# The file-object for extractfile().# Create nonexistent files in append mode.# Init attributes.# Init datastructures.# list of members as TarInfo objects# flag if all members have been read# current position in the archive file# dictionary caching the inodes of# archive members already added# Move to the end of the archive,# before the first empty block.# Below are the classmethods which act as alternate constructors to the# TarFile class. The open() method is the only one that is needed for# public use; it is the "super"-constructor and is able to select an# adequate "sub"-constructor for a particular compression using the mapping# from OPEN_METH.# This concept allows one to subclass TarFile without losing the comfort of# the super-constructor. A sub-constructor is registered and made available# by adding it to the mapping in OPEN_METH.# Find out which *open() is appropriate for opening the file.# Select the *open() function according to# given compression.# All *open() methods are registered here.# uncompressed tar# gzip compressed tar# bzip2 compressed tar# lzma compressed tar# The public methods which TarFile provides:# fill up the end with zero-blocks# (like option -b20 for tar does)# if we want to obtain a list of# all members, we first have to# scan the whole archive.# When fileobj is given, replace name by# fileobj's real name.# Building the name of the member in the archive.# Backward slashes are converted to forward slashes,# Absolute paths are turned to relative paths.# Now, fill the TarInfo object with# information specific for the file.# Not needed# Use os.stat or os.lstat, depending on if symlinks shall be resolved.# Is it a hardlink to an already# archived file?# The inode is added only if its valid.# For win32 it is always 0.# Fill the TarInfo object with all# information we can get.# Skip if somebody tries to archive the archive...# Create a TarInfo object from the file.# Change or exclude the TarInfo object.# Append the tar header and data to the archive.# If there's data to follow, append it.# Extract directories with a safe mode.# Do not set_attrs directories, as we will do that further down# Reverse sort directories.# Set correct owner, mtime and filemode on directories.# Prepare the link target for makelink().# Members with unknown types are treated as regular files.# A small but ugly workaround for the case that someone tries# to extract a (sym)link as a file-object from a non-seekable# stream of tar blocks.# A (sym)link's file object is its target's file object.# If there's no data associated with the member (directory, chrdev,# blkdev, etc.), return None instead of a file object.# Fetch the TarInfo object for the given name# and build the destination pathname, replacing# forward slashes to platform specific separators.# Create all upper directories.# Create directories that are not part of the archive with# default permissions.# Below are the different file methods. They are called via# _extract_member() when extract() is called. They can be replaced in a# subclass to implement other functionality.# Use a safe mode for the directory, the real mode is set# later in _extract_member().# For systems that support symbolic and hard links.# Avoid FileExistsError on following os.symlink.# See extract().# We have to be root to do so.# Advance the file pointer.# Read the next block.# Little helper methods:# Ensure that all members have been loaded.# Limit the member search list up to tarinfo.# Always search the entire archive.# Search the archive before the link, because a hard link is# just a reference to an already archived file.# Yield items using TarFile's next() method.# When all members have been read, set TarFile as _loaded.# Fix for SF #1100429: Under rare circumstances it can# happen that getmembers() is called during iteration,# which will have already exhausted the next() method.# An exception occurred. We must not call close() because# it would try to write end-of-archive blocks and padding.#--------------------# exported functions# gz# xz# bz2b'Read from and write to tar format archives. +'u'Read from and write to tar format archives. +'b'0.9.0'u'0.9.0'b'Lars Gustäbel (lars@gustaebel.de)'u'Lars Gustäbel (lars@gustaebel.de)'b'Gustavo Niemeyer, Niels Gustäbel, Richard Townsend.'u'Gustavo Niemeyer, Niels Gustäbel, Richard Townsend.'b'TarFile'u'TarFile'b'TarInfo'u'TarInfo'b'is_tarfile'u'is_tarfile'b'TarError'u'TarError'b'ReadError'u'ReadError'b'CompressionError'u'CompressionError'b'StreamError'u'StreamError'b'ExtractError'u'ExtractError'b'HeaderError'u'HeaderError'b'ENCODING'u'ENCODING'b'USTAR_FORMAT'u'USTAR_FORMAT'b'GNU_FORMAT'u'GNU_FORMAT'b'PAX_FORMAT'u'PAX_FORMAT'b'DEFAULT_FORMAT'u'DEFAULT_FORMAT'b'ustar 'b'ustar00'b'linkpath'u'linkpath'b'uid'u'uid'b'gid'u'gid'b'gname'u'gname'b'atime'u'atime'b'ctime'u'ctime'b'Convert a string to a null-terminated bytes object. + 'u'Convert a string to a null-terminated bytes object. + 'b'Convert a null-terminated bytes object to a string. + 'u'Convert a null-terminated bytes object to a string. + 'b'Convert a number field to a python number. + 'u'Convert a number field to a python number. + 'b'invalid header'u'invalid header'b'Convert a python number to a number field. + 'u'Convert a python number to a number field. + 'b'%0*o'u'%0*o'b'overflow in number field'u'overflow in number field'b'Calculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + 'u'Calculate the checksum for a member's header by summing up all + characters except for the chksum field which is treated as if + it was filled with spaces. According to the GNU tar sources, + some tars (Sun and NeXT) calculate chksum with signed char, + which will be different if there are chars in the buffer with + the high bit set. So we calculate two checksums, unsigned and + signed. + 'b'148B8x356B'u'148B8x356B'b'148b8x356b'u'148b8x356b'b'Copy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + 'u'Copy length bytes from fileobj src to fileobj dst. + If length is None, copy the entire content. + 'b'unexpected end of data'u'unexpected end of data'b'Base exception.'u'Base exception.'b'General exception for extract errors.'u'General exception for extract errors.'b'Exception for unreadable tar archives.'u'Exception for unreadable tar archives.'b'Exception for unavailable compression methods.'u'Exception for unavailable compression methods.'b'Exception for unsupported operations on stream-like TarFiles.'u'Exception for unsupported operations on stream-like TarFiles.'b'Base exception for header errors.'u'Base exception for header errors.'b'Exception for empty headers.'u'Exception for empty headers.'b'Exception for truncated headers.'u'Exception for truncated headers.'b'Exception for end of file headers.'u'Exception for end of file headers.'b'Exception for invalid headers.'u'Exception for invalid headers.'b'Exception for missing and invalid extended headers.'u'Exception for missing and invalid extended headers.'b'Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + 'u'Low-level file object. Supports reading and writing. + It is used instead of a regular file object for streaming + access. + 'b'O_BINARY'u'O_BINARY'b'Class that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method and is accessed + blockwise. Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin, + sys.stdout, a socket, a tape device etc. + + _Stream is intended to be used only internally. + 'u'Class that serves as an adapter between TarFile and + a stream-like object. The stream-like object only + needs to have a read() or write() method and is accessed + blockwise. Use of gzip or bzip2 compression is possible. + A stream-like object could be for example: sys.stdin, + sys.stdout, a socket, a tape device etc. + + _Stream is intended to be used only internally. + 'b'Construct a _Stream object. + 'u'Construct a _Stream object. + 'b'zlib module is not available'u'zlib module is not available'b'bz2 module is not available'u'bz2 module is not available'b'lzma module is not available'u'lzma module is not available'b'unknown compression type %r'u'unknown compression type %r'b'Initialize for writing with gzip compression. + 'u'Initialize for writing with gzip compression. + 'b'‹'b'ÿ'b'Write string s to the stream. + 'u'Write string s to the stream. + 'b'Write string s to the stream if a whole new block + is ready to be written. + 'u'Write string s to the stream if a whole new block + is ready to be written. + 'b'Close the _Stream object. No operation should be + done on it afterwards. + 'u'Close the _Stream object. No operation should be + done on it afterwards. + 'b'Initialize for reading a gzip compressed fileobj. + 'u'Initialize for reading a gzip compressed fileobj. + 'b'not a gzip file'u'not a gzip file'b'unsupported compression method'u'unsupported compression method'b'Return the stream's file pointer position. + 'u'Return the stream's file pointer position. + 'b'Set the stream's file pointer to pos. Negative seeking + is forbidden. + 'u'Set the stream's file pointer to pos. Negative seeking + is forbidden. + 'b'seeking backwards is not allowed'u'seeking backwards is not allowed'b'Return the next size number of bytes from the stream.'u'Return the next size number of bytes from the stream.'b'Return size bytes from the stream. + 'u'Return size bytes from the stream. + 'b'invalid compressed data'u'invalid compressed data'b'Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + 'u'Return size bytes from stream. If internal buffer is empty, + read another block from the stream. + 'b'Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + 'u'Small proxy class that enables transparent compression + detection for the Stream interface (mode 'r|*'). + 'b'‹'b'BZh'b'1AY&SY'b']€'b'ý7zXZ'b'A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + 'u'A thin wrapper around an existing file object that + provides a part of its data as an individual file + object. + 'b'Return the current file position. + 'u'Return the current file position. + 'b'Seek to a position in the file. + 'u'Seek to a position in the file. + 'b'Invalid argument'u'Invalid argument'b'Read data from the file. + 'u'Read data from the file. + 'b'Informational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + 'u'Informational class which holds the details about an + archive member given by a tar header block. + TarInfo objects are returned by TarFile.getmember(), + TarFile.getmembers() and TarFile.gettarinfo() and are + usually created internally. + 'b'Name of the archive member.'u'Name of the archive member.'b'Permission bits.'u'Permission bits.'b'User ID of the user who originally stored this member.'u'User ID of the user who originally stored this member.'b'Group ID of the user who originally stored this member.'u'Group ID of the user who originally stored this member.'b'Size in bytes.'u'Size in bytes.'b'Time of last modification.'u'Time of last modification.'b'Header checksum.'u'Header checksum.'b'File type. type is usually one of these constants: REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_SPARSE.'u'File type. type is usually one of these constants: REGTYPE, AREGTYPE, LNKTYPE, SYMTYPE, DIRTYPE, FIFOTYPE, CONTTYPE, CHRTYPE, BLKTYPE, GNUTYPE_SPARSE.'b'Name of the target file name, which is only present in TarInfo objects of type LNKTYPE and SYMTYPE.'u'Name of the target file name, which is only present in TarInfo objects of type LNKTYPE and SYMTYPE.'b'User name.'u'User name.'b'Group name.'u'Group name.'b'Device major number.'u'Device major number.'b'Device minor number.'u'Device minor number.'b'The tar header starts here.'u'The tar header starts here.'b'The file's data starts here.'u'The file's data starts here.'b'A dictionary containing key-value pairs of an associated pax extended header.'u'A dictionary containing key-value pairs of an associated pax extended header.'b'Sparse member information.'u'Sparse member information.'b'Construct a TarInfo object. name is the optional name + of the member. + 'u'Construct a TarInfo object. name is the optional name + of the member. + 'b'In pax headers, "name" is called "path".'u'In pax headers, "name" is called "path".'b'In pax headers, "linkname" is called "linkpath".'u'In pax headers, "linkname" is called "linkpath".'b'Return the TarInfo's attributes as a dictionary. + 'u'Return the TarInfo's attributes as a dictionary. + 'b'chksum'u'chksum'b'linkname'u'linkname'b'devmajor'u'devmajor'b'devminor'u'devminor'b'Return a tar header as a string of 512 byte blocks. + 'u'Return a tar header as a string of 512 byte blocks. + 'b'invalid format'u'invalid format'b'Return the object as a ustar header block. + 'u'Return the object as a ustar header block. + 'b'magic'u'magic'b'linkname is too long'u'linkname is too long'b'Return the object as a GNU header block sequence. + 'u'Return the object as a GNU header block sequence. + 'b'Return the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + 'u'Return the object as a ustar header block. If it cannot be + represented this way, prepend a pax extended header sequence + with supplement information. + 'b'Return the object as a pax global header block sequence. + 'u'Return the object as a pax global header block sequence. + 'b'Split a name longer than 100 chars into a prefix + and a name part. + 'u'Split a name longer than 100 chars into a prefix + and a name part. + 'b'name is too long'u'name is too long'b'Return a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + 'u'Return a header block. info is a dictionary with file + information, format must be one of the *_FORMAT constants. + 'b' 'b'%ds'u'%ds'b'%06o'u'%06o'b'Return the string payload filled with zero bytes + up to the next 512 byte border. + 'u'Return the string payload filled with zero bytes + up to the next 512 byte border. + 'b'Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + 'u'Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence + for name. + 'b'././@LongLink'u'././@LongLink'b'Return a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + 'u'Return a POSIX.1-2008 extended or global header sequence + that contains a list of keyword, value pairs. The values + must be strings. + 'b'21 hdrcharset=BINARY +'b'././@PaxHeader'u'././@PaxHeader'b'Construct a TarInfo object from a 512 byte bytes object. + 'u'Construct a TarInfo object from a 512 byte bytes object. + 'b'empty header'u'empty header'b'truncated header'u'truncated header'b'end of file header'u'end of file header'b'bad checksum'u'bad checksum'b'Return the next TarInfo object from TarFile object + tarfile. + 'u'Return the next TarInfo object from TarFile object + tarfile. + 'b'Choose the right processing method depending on + the type and call it. + 'u'Choose the right processing method depending on + the type and call it. + 'b'Process a builtin type or an unknown type which + will be treated as a regular file. + 'u'Process a builtin type or an unknown type which + will be treated as a regular file. + 'b'Process the blocks that hold a GNU longname + or longlink member. + 'u'Process the blocks that hold a GNU longname + or longlink member. + 'b'missing or bad subsequent header'u'missing or bad subsequent header'b'Process a GNU sparse header plus extra headers. + 'u'Process a GNU sparse header plus extra headers. + 'b'Process an extended or global header as described in + POSIX.1-2008. + 'u'Process an extended or global header as described in + POSIX.1-2008. + 'b'\d+ hdrcharset=([^\n]+)\n'b'hdrcharset'u'hdrcharset'b'(\d+) ([^=]+)='b'GNU.sparse.map'u'GNU.sparse.map'b'GNU.sparse.size'u'GNU.sparse.size'b'GNU.sparse.major'u'GNU.sparse.major'b'GNU.sparse.minor'u'GNU.sparse.minor'b'Process a GNU tar extended sparse header, version 0.0. + 'u'Process a GNU tar extended sparse header, version 0.0. + 'b'\d+ GNU.sparse.offset=(\d+)\n'b'\d+ GNU.sparse.numbytes=(\d+)\n'b'Process a GNU tar extended sparse header, version 0.1. + 'u'Process a GNU tar extended sparse header, version 0.1. + 'b'Process a GNU tar extended sparse header, version 1.0. + 'u'Process a GNU tar extended sparse header, version 1.0. + 'b'Replace fields with supplemental information from a previous + pax extended or global header. + 'u'Replace fields with supplemental information from a previous + pax extended or global header. + 'b'GNU.sparse.name'u'GNU.sparse.name'b'GNU.sparse.realsize'u'GNU.sparse.realsize'b'Decode a single field from a pax record. + 'u'Decode a single field from a pax record. + 'b'Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + 'u'Round up a byte count by BLOCKSIZE and return it, + e.g. _block(834) => 1024. + 'b'Return True if the Tarinfo object is a regular file.'u'Return True if the Tarinfo object is a regular file.'b'Return True if it is a directory.'u'Return True if it is a directory.'b'Return True if it is a symbolic link.'u'Return True if it is a symbolic link.'b'Return True if it is a hard link.'u'Return True if it is a hard link.'b'Return True if it is a character device.'u'Return True if it is a character device.'b'Return True if it is a block device.'u'Return True if it is a block device.'b'Return True if it is a FIFO.'u'Return True if it is a FIFO.'b'Return True if it is one of character device, block device or FIFO.'u'Return True if it is one of character device, block device or FIFO.'b'The TarFile Class provides an interface to tar archives. + 'u'The TarFile Class provides an interface to tar archives. + 'b'Open an (uncompressed) tar archive `name'. `mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' + defaults to 'r'. + If `fileobj' is given, it is used for reading or writing data. If it + can be determined, `mode' is overridden by `fileobj's mode. + `fileobj' is not closed, when TarFile is closed. + 'u'Open an (uncompressed) tar archive `name'. `mode' is either 'r' to + read from an existing archive, 'a' to append data to an existing + file or 'w' to create a new file overwriting an existing one. `mode' + defaults to 'r'. + If `fileobj' is given, it is used for reading or writing data. If it + can be determined, `mode' is overridden by `fileobj's mode. + `fileobj' is not closed, when TarFile is closed. + 'b'r+b'u'r+b'b'mode must be 'r', 'a', 'w' or 'x''u'mode must be 'r', 'a', 'w' or 'x''b'Open a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'r:xz' open for reading with lzma compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + 'w:xz' open for writing with lzma compression + + 'x' or 'x:' create a tarfile exclusively without compression, raise + an exception if the file is already created + 'x:gz' create a gzip compressed tarfile, raise an exception + if the file is already created + 'x:bz2' create a bzip2 compressed tarfile, raise an exception + if the file is already created + 'x:xz' create an lzma compressed tarfile, raise an exception + if the file is already created + + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'r|xz' open an lzma compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + 'w|xz' open an lzma compressed stream for writing + 'u'Open a tar archive for reading, writing or appending. Return + an appropriate TarFile class. + + mode: + 'r' or 'r:*' open for reading with transparent compression + 'r:' open for reading exclusively uncompressed + 'r:gz' open for reading with gzip compression + 'r:bz2' open for reading with bzip2 compression + 'r:xz' open for reading with lzma compression + 'a' or 'a:' open for appending, creating the file if necessary + 'w' or 'w:' open for writing without compression + 'w:gz' open for writing with gzip compression + 'w:bz2' open for writing with bzip2 compression + 'w:xz' open for writing with lzma compression + + 'x' or 'x:' create a tarfile exclusively without compression, raise + an exception if the file is already created + 'x:gz' create a gzip compressed tarfile, raise an exception + if the file is already created + 'x:bz2' create a bzip2 compressed tarfile, raise an exception + if the file is already created + 'x:xz' create an lzma compressed tarfile, raise an exception + if the file is already created + + 'r|*' open a stream of tar blocks with transparent compression + 'r|' open an uncompressed stream of tar blocks for reading + 'r|gz' open a gzip compressed stream of tar blocks + 'r|bz2' open a bzip2 compressed stream of tar blocks + 'r|xz' open an lzma compressed stream of tar blocks + 'w|' open an uncompressed stream for writing + 'w|gz' open a gzip compressed stream for writing + 'w|bz2' open a bzip2 compressed stream for writing + 'w|xz' open an lzma compressed stream for writing + 'b'nothing to open'u'nothing to open'b'r:*'u'r:*'b'taropen'u'taropen'b'file could not be opened successfully'u'file could not be opened successfully'b'mode must be 'r' or 'w''u'mode must be 'r' or 'w''b'undiscernible mode'u'undiscernible mode'b'Open uncompressed tar archive name for reading or writing. + 'u'Open uncompressed tar archive name for reading or writing. + 'b'Open gzip compressed tar archive name for reading or writing. + Appending is not allowed. + 'u'Open gzip compressed tar archive name for reading or writing. + Appending is not allowed. + 'b'mode must be 'r', 'w' or 'x''u'mode must be 'r', 'w' or 'x''b'gzip module is not available'u'gzip module is not available'b'Open bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + 'u'Open bzip2 compressed tar archive name for reading or writing. + Appending is not allowed. + 'b'not a bzip2 file'u'not a bzip2 file'b'Open lzma compressed tar archive name for reading or writing. + Appending is not allowed. + 'u'Open lzma compressed tar archive name for reading or writing. + Appending is not allowed. + 'b'not an lzma file'u'not an lzma file'b'gzopen'u'gzopen'b'bz2open'u'bz2open'b'xzopen'u'xzopen'b'Close the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + 'u'Close the TarFile. In write-mode, two finishing zero blocks are + appended to the archive. + 'b'Return a TarInfo object for member `name'. If `name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + 'u'Return a TarInfo object for member `name'. If `name' can not be + found in the archive, KeyError is raised. If a member occurs more + than once in the archive, its last occurrence is assumed to be the + most up-to-date version. + 'b'filename %r not found'u'filename %r not found'b'Return the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + 'u'Return the members of the archive as a list of TarInfo objects. The + list has the same order as the members in the archive. + 'b'Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + 'u'Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + 'b'Create a TarInfo object from the result of os.stat or equivalent + on an existing file. The file is either named by `name', or + specified as a file object `fileobj' with a file descriptor. If + given, `arcname' specifies an alternative name for the file in the + archive, otherwise, the name is taken from the 'name' attribute of + 'fileobj', or the 'name' argument. The name should be a text + string. + 'u'Create a TarInfo object from the result of os.stat or equivalent + on an existing file. The file is either named by `name', or + specified as a file object `fileobj' with a file descriptor. If + given, `arcname' specifies an alternative name for the file in the + archive, otherwise, the name is taken from the 'name' attribute of + 'fileobj', or the 'name' argument. The name should be a text + string. + 'b'awx'u'awx'b'major'u'major'b'minor'u'minor'b'Print a table of contents to sys.stdout. If `verbose' is False, only + the names of the members are printed. If it is True, an `ls -l'-like + output is produced. `members' is optional and must be a subset of the + list returned by getmembers(). + 'u'Print a table of contents to sys.stdout. If `verbose' is False, only + the names of the members are printed. If it is True, an `ls -l'-like + output is produced. `members' is optional and must be a subset of the + list returned by getmembers(). + 'b'%10s'u'%10s'b'%d,%d'u'%d,%d'b'%10d'u'%10d'b'%d-%02d-%02d %02d:%02d:%02d'u'%d-%02d-%02d %02d:%02d:%02d'b'-> 'u'-> 'b'link to 'u'link to 'b'Add the file `name' to the archive. `name' may be any type of file + (directory, fifo, symbolic link, etc.). If given, `arcname' + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting `recursive' to False. `filter' is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + 'u'Add the file `name' to the archive. `name' may be any type of file + (directory, fifo, symbolic link, etc.). If given, `arcname' + specifies an alternative name for the file in the archive. + Directories are added recursively by default. This can be avoided by + setting `recursive' to False. `filter' is a function + that expects a TarInfo object argument and returns the changed + TarInfo object, if it returns None the TarInfo object will be + excluded from the archive. + 'b'tarfile: Skipped %r'u'tarfile: Skipped %r'b'tarfile: Unsupported type %r'u'tarfile: Unsupported type %r'b'tarfile: Excluded %r'u'tarfile: Excluded %r'b'Add the TarInfo object `tarinfo' to the archive. If `fileobj' is + given, it should be a binary file, and tarinfo.size bytes are read + from it and added to the archive. You can create TarInfo objects + directly, or by using gettarinfo(). + 'u'Add the TarInfo object `tarinfo' to the archive. If `fileobj' is + given, it should be a binary file, and tarinfo.size bytes are read + from it and added to the archive. You can create TarInfo objects + directly, or by using gettarinfo(). + 'b'Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). If `numeric_owner` is True, only + the numbers for user/group names are used and not the names. + 'u'Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). If `numeric_owner` is True, only + the numbers for user/group names are used and not the names. + 'b'tarfile: %s'u'tarfile: %s'b'Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a TarInfo object. You can + specify a different directory using `path'. File attributes (owner, + mtime, mode) are set unless `set_attrs' is False. If `numeric_owner` + is True, only the numbers for user/group names are used and not + the names. + 'u'Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a TarInfo object. You can + specify a different directory using `path'. File attributes (owner, + mtime, mode) are set unless `set_attrs' is False. If `numeric_owner` + is True, only the numbers for user/group names are used and not + the names. + 'b'tarfile: %s %r'u'tarfile: %s %r'b'Extract a member from the archive as a file object. `member' may be + a filename or a TarInfo object. If `member' is a regular file or a + link, an io.BufferedReader object is returned. Otherwise, None is + returned. + 'u'Extract a member from the archive as a file object. `member' may be + a filename or a TarInfo object. If `member' is a regular file or a + link, an io.BufferedReader object is returned. Otherwise, None is + returned. + 'b'cannot extract (sym)link as file object'u'cannot extract (sym)link as file object'b'Extract the TarInfo object tarinfo to a physical + file called targetpath. + 'u'Extract the TarInfo object tarinfo to a physical + file called targetpath. + 'b'Make a directory called targetpath. + 'u'Make a directory called targetpath. + 'b'Make a file called targetpath. + 'u'Make a file called targetpath. + 'b'Make a file from a TarInfo object with an unknown type + at targetpath. + 'u'Make a file from a TarInfo object with an unknown type + at targetpath. + 'b'tarfile: Unknown file type %r, extracted as regular file.'u'tarfile: Unknown file type %r, extracted as regular file.'b'Make a fifo called targetpath. + 'u'Make a fifo called targetpath. + 'b'fifo not supported by system'u'fifo not supported by system'b'Make a character or block device called targetpath. + 'u'Make a character or block device called targetpath. + 'b'makedev'u'makedev'b'special devices not supported by system'u'special devices not supported by system'b'Make a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + 'u'Make a (symbolic) link called targetpath. If it cannot be created + (platform limitation), we try to make a copy of the referenced file + instead of a link. + 'b'unable to resolve link inside archive'u'unable to resolve link inside archive'b'Set owner of targetpath according to tarinfo. If numeric_owner + is True, use .gid/.uid instead of .gname/.uname. If numeric_owner + is False, fall back to .gid/.uid when the search based on name + fails. + 'u'Set owner of targetpath according to tarinfo. If numeric_owner + is True, use .gid/.uid instead of .gname/.uname. If numeric_owner + is False, fall back to .gid/.uid when the search based on name + fails. + 'b'geteuid'u'geteuid'b'could not change owner'u'could not change owner'b'Set file permissions of targetpath according to tarinfo. + 'u'Set file permissions of targetpath according to tarinfo. + 'b'could not change mode'u'could not change mode'b'Set modification time of targetpath according to tarinfo. + 'u'Set modification time of targetpath according to tarinfo. + 'b'could not change modification time'u'could not change modification time'b'Return the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + 'u'Return the next member of the archive as a TarInfo object, when + TarFile is opened for reading. Return None if there is no more + available. + 'b'ra'u'ra'b'0x%X: %s'u'0x%X: %s'b'empty file'u'empty file'b'Find an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + 'u'Find an archive member by name from bottom to top. + If tarinfo is given, it is used as the starting point. + 'b'Read through the entire archive file and look for readable + members. + 'u'Read through the entire archive file and look for readable + members. + 'b'Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + 'u'Check if TarFile is still open, and if the operation's mode + corresponds to TarFile's mode. + 'b'%s is closed'u'%s is closed'b'bad operation for mode %r'u'bad operation for mode %r'b'Find the target member of a symlink or hardlink member in the + archive. + 'u'Find the target member of a symlink or hardlink member in the + archive. + 'b'linkname %r not found'u'linkname %r not found'b'Provide an iterator object. + 'u'Provide an iterator object. + 'b'Write debugging output to sys.stderr. + 'u'Write debugging output to sys.stderr. + 'b'Return True if name points to a tar archive that we + are able to handle, else return False. + 'u'Return True if name points to a tar archive that we + are able to handle, else return False. + 'b'A simple command-line interface for tarfile module.'u'A simple command-line interface for tarfile module.'b'--list'u'--list'b''u''b'Show listing of a tarfile'u'Show listing of a tarfile'b'--extract'u'--extract'b''u''b'Extract tarfile into target dir'u'Extract tarfile into target dir'b'--create'u'--create'b''u''b''u''b'Create tarfile from sources'u'Create tarfile from sources'b'Test if a tarfile is valid'u'Test if a tarfile is valid'b'{!r} is a tar archive.'u'{!r} is a tar archive.'b'{!r} is not a tar archive. +'u'{!r} is not a tar archive. +'b'{!r} file is extracted.'u'{!r} file is extracted.'b'{!r} file is extracted into {!r} directory.'u'{!r} file is extracted into {!r} directory.'b'.tbz'u'.tbz'b'.tb2'u'.tb2'b'w:'u'w:'b'{!r} file created.'u'{!r} file created.'u'tarfile'Support for tasks, coroutines and the scheduler.shieldrun_coroutine_threadsafebase_tasks_task_name_counterReturn a currently executed task.Return a set of all tasks for the loop.A coroutine wrapped in a Future.Return the currently running task in an event loop or None. + + By default the current task for the current event loop is returned. + + None is returned when called not in the context of a Task. + Task.current_task() is deprecated since Python 3.7, use asyncio.current_task() instead"Task.current_task() is deprecated since Python 3.7, ""use asyncio.current_task() instead"Return a set of all tasks for an event loop. + + By default all tasks for the current event loop are returned. + Task.all_tasks() is deprecated since Python 3.7, use asyncio.all_tasks() instead"Task.all_tasks() is deprecated since Python 3.7, ""use asyncio.all_tasks() instead"a coroutine was expected, got Task-__stepTask was destroyed but it is pending!Task does not support set_result operationTask does not support set_exception operationReturn the list of stack frames for this task's coroutine. + + If the coroutine is not done, this returns the stack where it is + suspended. If the coroutine has completed successfully or was + cancelled, this returns an empty list. If the coroutine was + terminated by an exception, this returns the list of traceback + frames. + + The frames are always ordered from oldest to newest. + + The optional limit gives the maximum number of frames to + return; by default all available frames are returned. Its + meaning differs depending on whether a stack or a traceback is + returned: the newest frames of a stack are returned, but the + oldest frames of a traceback are returned. (This matches the + behavior of the traceback module.) + + For reasons beyond our control, only one stack frame is + returned for a suspended coroutine. + Print the stack or traceback for this task's coroutine. + + This produces output similar to that of the traceback module, + for the frames retrieved by get_stack(). The limit argument + is passed to get_stack(). The file argument is an I/O stream + to which the output is written; by default output is written + to sys.stderr. + Request that this task cancel itself. + + This arranges for a CancelledError to be thrown into the + wrapped coroutine on the next cycle through the event loop. + The coroutine then has a chance to clean up or even deny + the request using try/except/finally. + + Unlike Future.cancel, this does not guarantee that the + task will be cancelled: the exception might be caught and + acted upon, delaying cancellation of the task or preventing + cancellation completely. The task may also return a value or + raise a different exception. + + Immediately after this method is called, Task.cancelled() will + not return True (unless the task was already cancelled). A + task will be marked as cancelled when the wrapped coroutine + terminates with a CancelledError exception (even if cancel() + was not called). + _step(): already done: Task got Future ' got Future ' attached to a different loopTask cannot await on itself: __wakeupyield was used instead of yield from in task 'yield was used instead of yield from ''in task ' with yield was used instead of yield from for generator in task 'yield was used instead of yield from for ''generator in task 'Task got bad yield: _PyTask_CTaskSchedule the execution of a coroutine object in a spawn task. + + Return a Task object. + Wait for the Futures and coroutines given by fs to complete. + + The fs iterable must not be empty. + + Coroutines will be wrapped in Tasks. + + Returns two sets of Future: (done, pending). + + Usage: + + done, pending = await asyncio.wait(fs) + + Note: This does not raise TimeoutError! Futures that aren't done + when the timeout occurs are returned in the second set. + expect a list of futures, not Set of coroutines/Futures is empty.Invalid return_when value: _release_waiterWait for the single Future or coroutine to complete, with timeout. + + Coroutine will be wrapped in Task. + + Returns result of the Future or coroutine. When a timeout occurs, + it cancels the task and raises TimeoutError. To avoid the task + cancellation, wrap it in shield(). + + If the wait is cancelled, the task is also cancelled. + + This function is a coroutine. + _cancel_and_waittimeout_handleInternal helper for wait(). + + The fs argument must be a collection of Futures. + Set of Futures is empty.counter_on_completionCancel the *fut* future or task and wait until it completes.Return an iterator whose values are coroutines. + + When waiting for the yielded coroutines you'll get the results (or + exceptions!) of the original Futures (or coroutines), in the order + in which and as soon as they complete. + + This differs from PEP 3148; the proper way to use this is: + + for f in as_completed(fs): + result = await f # The 'await' may raise. + # Use result. + + If a timeout is specified, the 'await' will raise + TimeoutError when the timeout occurs before all Futures are done. + + Note: The futures 'f' are not necessarily members of fs. + expect an iterable of futures, not _on_timeout_wait_for_one__sleep0Skip one event loop run cycle. + + This is a private helper for 'asyncio.sleep()', used + when the 'delay' is set to 0. It uses a bare 'yield' + expression (which Task.__step knows how to handle) + instead of creating a Future object. + Coroutine that completes after a given time (in seconds).coro_or_futureWrap a coroutine or an awaitable in a future. + + If the argument is a Future, it is returned directly. + The future belongs to a different loop than the one specified as the loop argument'The future belongs to a different loop than ''the one specified as the loop argument'_wrap_awaitableAn asyncio.Future, a coroutine or an awaitable is required'An asyncio.Future, a coroutine or an awaitable is ''required'Helper for asyncio.ensure_future(). + + Wraps awaitable (an object with __await__) into a coroutine + that will later be wrapped in a Task by ensure_future(). + _GatheringFutureHelper for gather(). + + This overrides cancel() to cancel all the children and act more + like Task.cancel(), which doesn't immediately mark itself as + cancelled. + _cancel_requestedcoros_or_futuresReturn a future aggregating results from the given coroutines/futures. + + Coroutines will be wrapped in a future and scheduled in the event + loop. They will not necessarily be scheduled in the same order as + passed in. + + All futures must share the same event loop. If all the tasks are + done successfully, the returned future's result is the list of + results (in the order of the original sequence, not necessarily + the order of results arrival). If *return_exceptions* is True, + exceptions in the tasks are treated the same as successful + results, and gathered in the result list; otherwise, the first + raised exception will be immediately propagated to the returned + future. + + Cancellation: if the outer Future is cancelled, all children (that + have not completed yet) are also cancelled. If any child is + cancelled, this is treated as if it raised CancelledError -- + the outer Future is *not* cancelled in this case. (This is to + prevent the cancellation of one child to cause other children to + be cancelled.) + + If *return_exceptions* is False, cancelling gather() after it + has been marked done won't cancel any submitted awaitables. + For instance, gather can be marked done after propagating an + exception to the caller, therefore, calling ``gather.cancel()`` + after catching an exception (raised by one of the awaitables) from + gather won't cancel any other awaitables. + _done_callbacknfinishednfutsarg_to_futWait for a future, shielding it from cancellation. + + The statement + + res = await shield(something()) + + is exactly equivalent to the statement + + res = await something() + + *except* that if the coroutine containing it is cancelled, the + task running in something() is not cancelled. From the POV of + something(), the cancellation did not happen. But its caller is + still cancelled, so the yield-from expression still raises + CancelledError. Note: If something() is cancelled by other means + this will still cancel shield(). + + If you want to completely ignore cancellation (not recommended) + you can combine shield() with a try/except clause, as follows: + + try: + res = await shield(something()) + except CancelledError: + res = None + _inner_done_callback_outer_done_callbackSubmit a coroutine object to a given event loop. + + Return a concurrent.futures.Future to access the result. + A coroutine object is requiredRegister a new task in asyncio as executed by loop.Cannot enter into task while another task " while another ""task " is being executed.Leaving task does not match the current task " does not match ""the current task "Unregister a task._py_register_task_py_unregister_task_py_enter_task_py_leave_task_c_register_task_c_unregister_task_c_enter_task_c_leave_task# Helper to generate new task names# This uses itertools.count() instead of a "+= 1" operation because the latter# is not thread safe. See bpo-11866 for a longer explanation.# Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another# thread while we do so. Therefore we cast it to list prior to filtering. The list# cast itself requires iteration, so we repeat it several times ignoring# RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for# Different from "all_task()" by returning *all* Tasks, including# the completed ones. Used to implement deprecated "Tasks.all_task()"# method.# Inherit Python Task implementation# from a Python Future implementation.# An important invariant maintained while a Task not done:# - Either _fut_waiter is None, and _step() is scheduled;# - or _fut_waiter is some Future, and _step() is *not* scheduled.# The only transition from the latter to the former is through# _wakeup(). When _fut_waiter is not None, one of its callbacks# must be _wakeup().# If False, don't log a message if the task is destroyed whereas its# status is still pending# raise after Future.__init__(), attrs are required for __del__# prevent logging for pending task in __del__# Leave self._fut_waiter; it may be a Task that# catches and ignores the cancellation so we may have# to cancel it again later.# It must be the case that self.__step is already scheduled.# Call either coro.throw(exc) or coro.send(None).# We use the `send` method directly, because coroutines# don't have `__iter__` and `__next__` methods.# Task is cancelled right before coro stops.# I.e., Future.cancel(self).# Yielded Future must come from Future.__iter__().# Bare yield relinquishes control for one event loop iteration.# Yielding a generator is just wrong.# Yielding something else is an error.# This may also be a cancellation.# Don't pass the value of `future.result()` explicitly,# as `Future.__iter__` and `Future.__await__` don't need it.# If we call `_step(value, None)` instead of `_step()`,# Python eval loop would use `.send(value)` method call,# instead of `__next__()`, which is slower for futures# that return non-generator iterators from their `__iter__`.# _CTask is needed for tests.# wait() and as_completed() similar to those in PEP 3148.# wait until the future completes or the timeout# We must ensure that the task is not running# after wait_for() returns.# See https://bugs.python.org/issue32751# We cannot wait on *fut* directly to make# sure _cancel_and_wait itself is reliably cancellable.# This is *not* a @coroutine! It is just an iterator (yielding Futures).# Import here to avoid circular import problem.# Queue a dummy value for _wait_for_one().# Can't do todo.remove(f) in the loop.# _on_timeout() was here first.# Dummy value from _on_timeout().# May raise f.exception().# If any child tasks were actually cancelled, we should# propagate the cancellation request regardless of# *return_exceptions* argument. See issue 32684.# Mark exception retrieved.# Check if 'fut' is cancelled first, as# 'fut.exception()' will *raise* a CancelledError# instead of returning it.# All futures are done; create a list of results# and set it to the 'outer' future.# If gather is being cancelled we must propagate the# cancellation regardless of *return_exceptions* argument.# See issue 32684.# 'arg' was not a Future, therefore, 'fut' is a new# Future created specifically for 'arg'. Since the caller# can't control it, disable the "destroy pending task"# warning.# There's a duplicate Future object in coros_or_futures.# Shortcut.# Mark inner's result as retrieved.# WeakSet containing all alive tasks.# Dictionary containing tasks that are currently active in# all running event loops. {EventLoop: Task}b'Support for tasks, coroutines and the scheduler.'u'Support for tasks, coroutines and the scheduler.'b'Task'u'Task'b'create_task'u'create_task'b'wait_for'u'wait_for'b'sleep'u'sleep'b'gather'u'gather'b'shield'u'shield'b'ensure_future'u'ensure_future'b'run_coroutine_threadsafe'u'run_coroutine_threadsafe'b'current_task'u'current_task'b'all_tasks'u'all_tasks'b'_register_task'u'_register_task'b'_unregister_task'u'_unregister_task'b'_enter_task'u'_enter_task'b'_leave_task'u'_leave_task'b'Return a currently executed task.'u'Return a currently executed task.'b'Return a set of all tasks for the loop.'u'Return a set of all tasks for the loop.'b'A coroutine wrapped in a Future.'b'Return the currently running task in an event loop or None. + + By default the current task for the current event loop is returned. + + None is returned when called not in the context of a Task. + 'u'Return the currently running task in an event loop or None. + + By default the current task for the current event loop is returned. + + None is returned when called not in the context of a Task. + 'b'Task.current_task() is deprecated since Python 3.7, use asyncio.current_task() instead'u'Task.current_task() is deprecated since Python 3.7, use asyncio.current_task() instead'b'Return a set of all tasks for an event loop. + + By default all tasks for the current event loop are returned. + 'u'Return a set of all tasks for an event loop. + + By default all tasks for the current event loop are returned. + 'b'Task.all_tasks() is deprecated since Python 3.7, use asyncio.all_tasks() instead'u'Task.all_tasks() is deprecated since Python 3.7, use asyncio.all_tasks() instead'b'a coroutine was expected, got 'u'a coroutine was expected, got 'b'Task-'u'Task-'b'Task was destroyed but it is pending!'u'Task was destroyed but it is pending!'b'Task does not support set_result operation'u'Task does not support set_result operation'b'Task does not support set_exception operation'u'Task does not support set_exception operation'b'Return the list of stack frames for this task's coroutine. + + If the coroutine is not done, this returns the stack where it is + suspended. If the coroutine has completed successfully or was + cancelled, this returns an empty list. If the coroutine was + terminated by an exception, this returns the list of traceback + frames. + + The frames are always ordered from oldest to newest. + + The optional limit gives the maximum number of frames to + return; by default all available frames are returned. Its + meaning differs depending on whether a stack or a traceback is + returned: the newest frames of a stack are returned, but the + oldest frames of a traceback are returned. (This matches the + behavior of the traceback module.) + + For reasons beyond our control, only one stack frame is + returned for a suspended coroutine. + 'u'Return the list of stack frames for this task's coroutine. + + If the coroutine is not done, this returns the stack where it is + suspended. If the coroutine has completed successfully or was + cancelled, this returns an empty list. If the coroutine was + terminated by an exception, this returns the list of traceback + frames. + + The frames are always ordered from oldest to newest. + + The optional limit gives the maximum number of frames to + return; by default all available frames are returned. Its + meaning differs depending on whether a stack or a traceback is + returned: the newest frames of a stack are returned, but the + oldest frames of a traceback are returned. (This matches the + behavior of the traceback module.) + + For reasons beyond our control, only one stack frame is + returned for a suspended coroutine. + 'b'Print the stack or traceback for this task's coroutine. + + This produces output similar to that of the traceback module, + for the frames retrieved by get_stack(). The limit argument + is passed to get_stack(). The file argument is an I/O stream + to which the output is written; by default output is written + to sys.stderr. + 'u'Print the stack or traceback for this task's coroutine. + + This produces output similar to that of the traceback module, + for the frames retrieved by get_stack(). The limit argument + is passed to get_stack(). The file argument is an I/O stream + to which the output is written; by default output is written + to sys.stderr. + 'b'Request that this task cancel itself. + + This arranges for a CancelledError to be thrown into the + wrapped coroutine on the next cycle through the event loop. + The coroutine then has a chance to clean up or even deny + the request using try/except/finally. + + Unlike Future.cancel, this does not guarantee that the + task will be cancelled: the exception might be caught and + acted upon, delaying cancellation of the task or preventing + cancellation completely. The task may also return a value or + raise a different exception. + + Immediately after this method is called, Task.cancelled() will + not return True (unless the task was already cancelled). A + task will be marked as cancelled when the wrapped coroutine + terminates with a CancelledError exception (even if cancel() + was not called). + 'u'Request that this task cancel itself. + + This arranges for a CancelledError to be thrown into the + wrapped coroutine on the next cycle through the event loop. + The coroutine then has a chance to clean up or even deny + the request using try/except/finally. + + Unlike Future.cancel, this does not guarantee that the + task will be cancelled: the exception might be caught and + acted upon, delaying cancellation of the task or preventing + cancellation completely. The task may also return a value or + raise a different exception. + + Immediately after this method is called, Task.cancelled() will + not return True (unless the task was already cancelled). A + task will be marked as cancelled when the wrapped coroutine + terminates with a CancelledError exception (even if cancel() + was not called). + 'b'_step(): already done: 'u'_step(): already done: 'b'Task 'u'Task 'b' got Future 'u' got Future 'b' attached to a different loop'u' attached to a different loop'b'Task cannot await on itself: 'u'Task cannot await on itself: 'b'yield was used instead of yield from in task 'u'yield was used instead of yield from in task 'b' with 'u' with 'b'yield was used instead of yield from for generator in task 'u'yield was used instead of yield from for generator in task 'b'Task got bad yield: 'u'Task got bad yield: 'b'Schedule the execution of a coroutine object in a spawn task. + + Return a Task object. + 'u'Schedule the execution of a coroutine object in a spawn task. + + Return a Task object. + 'b'Wait for the Futures and coroutines given by fs to complete. + + The fs iterable must not be empty. + + Coroutines will be wrapped in Tasks. + + Returns two sets of Future: (done, pending). + + Usage: + + done, pending = await asyncio.wait(fs) + + Note: This does not raise TimeoutError! Futures that aren't done + when the timeout occurs are returned in the second set. + 'u'Wait for the Futures and coroutines given by fs to complete. + + The fs iterable must not be empty. + + Coroutines will be wrapped in Tasks. + + Returns two sets of Future: (done, pending). + + Usage: + + done, pending = await asyncio.wait(fs) + + Note: This does not raise TimeoutError! Futures that aren't done + when the timeout occurs are returned in the second set. + 'b'expect a list of futures, not 'u'expect a list of futures, not 'b'Set of coroutines/Futures is empty.'u'Set of coroutines/Futures is empty.'b'Invalid return_when value: 'u'Invalid return_when value: 'b'Wait for the single Future or coroutine to complete, with timeout. + + Coroutine will be wrapped in Task. + + Returns result of the Future or coroutine. When a timeout occurs, + it cancels the task and raises TimeoutError. To avoid the task + cancellation, wrap it in shield(). + + If the wait is cancelled, the task is also cancelled. + + This function is a coroutine. + 'u'Wait for the single Future or coroutine to complete, with timeout. + + Coroutine will be wrapped in Task. + + Returns result of the Future or coroutine. When a timeout occurs, + it cancels the task and raises TimeoutError. To avoid the task + cancellation, wrap it in shield(). + + If the wait is cancelled, the task is also cancelled. + + This function is a coroutine. + 'b'Internal helper for wait(). + + The fs argument must be a collection of Futures. + 'u'Internal helper for wait(). + + The fs argument must be a collection of Futures. + 'b'Set of Futures is empty.'u'Set of Futures is empty.'b'Cancel the *fut* future or task and wait until it completes.'u'Cancel the *fut* future or task and wait until it completes.'b'Return an iterator whose values are coroutines. + + When waiting for the yielded coroutines you'll get the results (or + exceptions!) of the original Futures (or coroutines), in the order + in which and as soon as they complete. + + This differs from PEP 3148; the proper way to use this is: + + for f in as_completed(fs): + result = await f # The 'await' may raise. + # Use result. + + If a timeout is specified, the 'await' will raise + TimeoutError when the timeout occurs before all Futures are done. + + Note: The futures 'f' are not necessarily members of fs. + 'u'Return an iterator whose values are coroutines. + + When waiting for the yielded coroutines you'll get the results (or + exceptions!) of the original Futures (or coroutines), in the order + in which and as soon as they complete. + + This differs from PEP 3148; the proper way to use this is: + + for f in as_completed(fs): + result = await f # The 'await' may raise. + # Use result. + + If a timeout is specified, the 'await' will raise + TimeoutError when the timeout occurs before all Futures are done. + + Note: The futures 'f' are not necessarily members of fs. + 'b'expect an iterable of futures, not 'u'expect an iterable of futures, not 'b'Skip one event loop run cycle. + + This is a private helper for 'asyncio.sleep()', used + when the 'delay' is set to 0. It uses a bare 'yield' + expression (which Task.__step knows how to handle) + instead of creating a Future object. + 'u'Skip one event loop run cycle. + + This is a private helper for 'asyncio.sleep()', used + when the 'delay' is set to 0. It uses a bare 'yield' + expression (which Task.__step knows how to handle) + instead of creating a Future object. + 'b'Coroutine that completes after a given time (in seconds).'u'Coroutine that completes after a given time (in seconds).'b'Wrap a coroutine or an awaitable in a future. + + If the argument is a Future, it is returned directly. + 'u'Wrap a coroutine or an awaitable in a future. + + If the argument is a Future, it is returned directly. + 'b'The future belongs to a different loop than the one specified as the loop argument'u'The future belongs to a different loop than the one specified as the loop argument'b'An asyncio.Future, a coroutine or an awaitable is required'u'An asyncio.Future, a coroutine or an awaitable is required'b'Helper for asyncio.ensure_future(). + + Wraps awaitable (an object with __await__) into a coroutine + that will later be wrapped in a Task by ensure_future(). + 'u'Helper for asyncio.ensure_future(). + + Wraps awaitable (an object with __await__) into a coroutine + that will later be wrapped in a Task by ensure_future(). + 'b'Helper for gather(). + + This overrides cancel() to cancel all the children and act more + like Task.cancel(), which doesn't immediately mark itself as + cancelled. + 'u'Helper for gather(). + + This overrides cancel() to cancel all the children and act more + like Task.cancel(), which doesn't immediately mark itself as + cancelled. + 'b'Return a future aggregating results from the given coroutines/futures. + + Coroutines will be wrapped in a future and scheduled in the event + loop. They will not necessarily be scheduled in the same order as + passed in. + + All futures must share the same event loop. If all the tasks are + done successfully, the returned future's result is the list of + results (in the order of the original sequence, not necessarily + the order of results arrival). If *return_exceptions* is True, + exceptions in the tasks are treated the same as successful + results, and gathered in the result list; otherwise, the first + raised exception will be immediately propagated to the returned + future. + + Cancellation: if the outer Future is cancelled, all children (that + have not completed yet) are also cancelled. If any child is + cancelled, this is treated as if it raised CancelledError -- + the outer Future is *not* cancelled in this case. (This is to + prevent the cancellation of one child to cause other children to + be cancelled.) + + If *return_exceptions* is False, cancelling gather() after it + has been marked done won't cancel any submitted awaitables. + For instance, gather can be marked done after propagating an + exception to the caller, therefore, calling ``gather.cancel()`` + after catching an exception (raised by one of the awaitables) from + gather won't cancel any other awaitables. + 'u'Return a future aggregating results from the given coroutines/futures. + + Coroutines will be wrapped in a future and scheduled in the event + loop. They will not necessarily be scheduled in the same order as + passed in. + + All futures must share the same event loop. If all the tasks are + done successfully, the returned future's result is the list of + results (in the order of the original sequence, not necessarily + the order of results arrival). If *return_exceptions* is True, + exceptions in the tasks are treated the same as successful + results, and gathered in the result list; otherwise, the first + raised exception will be immediately propagated to the returned + future. + + Cancellation: if the outer Future is cancelled, all children (that + have not completed yet) are also cancelled. If any child is + cancelled, this is treated as if it raised CancelledError -- + the outer Future is *not* cancelled in this case. (This is to + prevent the cancellation of one child to cause other children to + be cancelled.) + + If *return_exceptions* is False, cancelling gather() after it + has been marked done won't cancel any submitted awaitables. + For instance, gather can be marked done after propagating an + exception to the caller, therefore, calling ``gather.cancel()`` + after catching an exception (raised by one of the awaitables) from + gather won't cancel any other awaitables. + 'b'Wait for a future, shielding it from cancellation. + + The statement + + res = await shield(something()) + + is exactly equivalent to the statement + + res = await something() + + *except* that if the coroutine containing it is cancelled, the + task running in something() is not cancelled. From the POV of + something(), the cancellation did not happen. But its caller is + still cancelled, so the yield-from expression still raises + CancelledError. Note: If something() is cancelled by other means + this will still cancel shield(). + + If you want to completely ignore cancellation (not recommended) + you can combine shield() with a try/except clause, as follows: + + try: + res = await shield(something()) + except CancelledError: + res = None + 'u'Wait for a future, shielding it from cancellation. + + The statement + + res = await shield(something()) + + is exactly equivalent to the statement + + res = await something() + + *except* that if the coroutine containing it is cancelled, the + task running in something() is not cancelled. From the POV of + something(), the cancellation did not happen. But its caller is + still cancelled, so the yield-from expression still raises + CancelledError. Note: If something() is cancelled by other means + this will still cancel shield(). + + If you want to completely ignore cancellation (not recommended) + you can combine shield() with a try/except clause, as follows: + + try: + res = await shield(something()) + except CancelledError: + res = None + 'b'Submit a coroutine object to a given event loop. + + Return a concurrent.futures.Future to access the result. + 'u'Submit a coroutine object to a given event loop. + + Return a concurrent.futures.Future to access the result. + 'b'A coroutine object is required'u'A coroutine object is required'b'Register a new task in asyncio as executed by loop.'u'Register a new task in asyncio as executed by loop.'b'Cannot enter into task 'u'Cannot enter into task 'b' while another task 'u' while another task 'b' is being executed.'u' is being executed.'b'Leaving task 'u'Leaving task 'b' does not match the current task 'u' does not match the current task 'b'Unregister a task.'u'Unregister a task.'u'asyncio.tasks'u'tasks'Temporary files. + +This module provides generic, low- and high-level interfaces for +creating temporary files and directories. All of the interfaces +provided by this module can be used without fear of race conditions +except for 'mktemp'. 'mktemp' is subject to race conditions and +should not be used; it is provided for backward compatibility only. + +The default path names are returned as str. If you supply bytes as +input, all return values will be in bytes. Ex: + + >>> tempfile.mkstemp() + (4, '/tmp/tmptpu9nin8') + >>> tempfile.mkdtemp(suffix=b'') + b'/tmp/tmppbi8f0hy' + +This module also provides some data items to the user: + + TMP_MAX - maximum number of names that will be tried before + giving up. + tempdir - If this is set to a string before the first use of + any routine from this module, it will be considered as + another candidate location to store temporary files. +TemporaryFileSpooledTemporaryFilegettempprefixgettempdirgettempprefixbgettempdirb_errno_Random_allocate_lock_text_openflags_bin_openflags_once_lock_infer_return_typeLook at the type of all args and divine their implied return type.return_typeCan't mix bytes and non-bytes in path components."Can't mix bytes and non-bytes in ""path components."_sanitize_paramsCommon parameter processing for most APIs in this module.output_type_RandomNameSequenceAn instance of _RandomNameSequence generates an endless + sequence of unpredictable strings which can safely be incorporated + into file names. Each string is eight characters long. Multiple + threads can safely use the same instance at the same time. + + _RandomNameSequence is an iterator.abcdefghijklmnopqrstuvwxyz0123456789_characterscur_pid_rng_pid_rngchooseletters_candidate_tempdir_listGenerate a list of candidate temporary directories which + _get_default_tempdir will try.envnameTMPDIRTEMPTMP~\AppData\Local\Temp%SYSTEMROOT%\Tempc:\tempc:\tmp\temp\tmp/tmp/var/tmp/usr/tmp_get_default_tempdirCalculate the default directory to use for temporary files. + This routine should be called exactly once. + + We determine whether or not a candidate temp dir is usable by + trying to create and write to a file in that directory. If this + is successful, the test file is deleted. To prevent denial of + service, the name of the test file must be randomized.blatNo usable temporary directory found in %s_name_sequence_get_candidate_namesCommon setup sequence for all user-callable interfaces._mkstemp_innersufCode common to mkstemp, TemporaryFile, and NamedTemporaryFile.tempfile.mkstempNo usable temporary file name foundThe default prefix for temporary directories.The default prefix for temporary directories as bytes.Accessor for tempfile.tempdir.A bytes version of tempfile.gettempdir().User-callable function to create and return a unique temporary + file. The return value is a pair (fd, name) where fd is the + file descriptor returned by os.open, and name is the filename. + + If 'suffix' is not None, the file name will end with that suffix, + otherwise there will be no suffix. + + If 'prefix' is not None, the file name will begin with that prefix, + otherwise a default prefix is used. + + If 'dir' is not None, the file will be created in that directory, + otherwise a default directory is used. + + If 'text' is specified and true, the file is opened in text + mode. Else (the default) the file is opened in binary mode. + + If any of 'suffix', 'prefix' and 'dir' are not None, they must be the + same type. If they are bytes, the returned name will be bytes; str + otherwise. + + The file is readable and writable only by the creating user ID. + If the operating system uses permission bits to indicate whether a + file is executable, the file is executable by no one. The file + descriptor is not inherited by children of this process. + + Caller is responsible for deleting the file when done with it. + User-callable function to create and return a unique temporary + directory. The return value is the pathname of the directory. + + Arguments are as for mkstemp, except that the 'text' argument is + not accepted. + + The directory is readable, writable, and searchable only by the + creating user. + + Caller is responsible for deleting the directory when done with it. + tempfile.mkdtempNo usable temporary directory name foundUser-callable function to return a unique temporary file name. The + file is not created. + + Arguments are similar to mkstemp, except that the 'text' argument is + not accepted, and suffix=None, prefix=None and bytes file names are not + supported. + + THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may + refer to a file that did not exist at some point, but by the time + you get around to creating it, someone else may have beaten you to + the punch. + No usable temporary filename found_TemporaryFileCloserA separate object allowing proper closing of a temporary file's + underlying file object, without adding a __del__ method to the + temporary file.close_calledTemporary file wrapper + + This class provides a wrapper around files opened for + temporary use. In particular, it seeks to automatically + remove the file when it is no longer needed. + _closerfunc_wrapper + Close the temporary file, possibly deleting it. + Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to io.open (default "w+b"). + 'buffering' -- the buffer size argument to io.open (default -1). + 'encoding' -- the encoding argument to io.open (default None) + 'newline' -- the newline argument to io.open (default None) + 'delete' -- whether the file is deleted on close (default True). + 'errors' -- the errors argument to io.open (default None) + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface; the name of the file + is accessible as its 'name' attribute. The file will be automatically + deleted when it is closed unless the 'delete' argument is set to False. + O_TEMPORARYO_TMPFILE_O_TMPFILE_WORKSCreate and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to io.open (default "w+b"). + 'buffering' -- the buffer size argument to io.open (default -1). + 'encoding' -- the encoding argument to io.open (default None) + 'newline' -- the newline argument to io.open (default None) + 'errors' -- the errors argument to io.open (default None) + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface. The file has no + name, and will cease to exist when it is closed. + flags2Temporary file wrapper, specialized to switch from BytesIO + or StringIO to a real file when it exceeds a certain size or + when a fileno is needed. + _rolled_file_max_size_TemporaryFileArgsrolloverCannot enter context with closed filesoftspaceCreate and return a temporary directory. This has the same + behavior as mkdtemp but can be used as a context manager. For + example: + + with TemporaryDirectory() as tmpdir: + ... + + Upon exiting the context, the directory and everything contained + in it are removed. + finalizeImplicitly cleaning up {!r}warn_message_finalizerresetperms<{} {!r}># high level safe interfaces# low level safe interfaces# deprecated unsafe interface# constants# Imports.# This variable _was_ unused for legacy reasons, see issue 10354.# But as of 3.5 we actually use it at runtime so changing it would# have a possibly desirable side effect... But we do not want to support# that as an API. It is undocumented on purpose. Do not depend on this.# Internal routines.# tempfile APIs return a str by default.# First, try the environment.# Failing that, try OS-specific locations.# As a last resort, the current directory.# Try only a few names per directory.# This exception is thrown when a directory with the chosen name# already exists on windows.# no point trying more names in this directory# try again# User visible interfaces.## from warnings import warn as _warn## _warn("mktemp is a potential security risk to your program",## RuntimeWarning, stacklevel=2)# NT provides delete-on-close as a primitive, so we don't need# the wrapper to do anything special. We still use it so that# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.# Cache the unlinker so we don't get spurious errors at# shutdown when the module-level "os" is None'd out. Note# that this must be referenced as self.unlink, because the# name TemporaryFileWrapper may also get None'd out before# __del__ is called.# Need to ensure the file is deleted on __del__# Attribute lookups are delegated to the underlying file# and cached for non-numeric results# (i.e. methods are cached, closed and friends are not)# Avoid closing the file as long as the wrapper is alive,# see issue #18879.# The underlying __enter__ method returns the wrong object# (self.file) so override it to return the wrapper# Need to trap __exit__ as well to ensure the file gets# deleted when used in a with statement# iter() doesn't use __getattr__ to find the __iter__ method# Don't return iter(self.file), but yield from it to avoid closing# file as long as it's being used as iterator (see issue #23700). We# can't use 'yield from' here because iter(file) returns the file# object itself, which has a close method, and thus the file would get# closed when the generator is finalized, due to PEP380 semantics.# Setting O_TEMPORARY in the flags causes the OS to delete# the file when it is closed. This is only supported by Windows.# On non-POSIX and Cygwin systems, assume that we cannot unlink a file# while it is open.# Is the O_TMPFILE flag available and does it work?# The flag is set to False if os.open(dir, os.O_TMPFILE) raises an# IsADirectoryError exception# Linux kernel older than 3.11 ignores the O_TMPFILE flag:# O_TMPFILE is read as O_DIRECTORY. Trying to open a directory# with O_RDWR|O_DIRECTORY fails with IsADirectoryError, a# directory cannot be open to write. Set flag to False to not# try again.# The filesystem of the directory does not support O_TMPFILE.# For example, OSError(95, 'Operation not supported').# On Linux kernel older than 3.11, trying to open a regular# file (or a symbolic link to a regular file) with O_TMPFILE# fails with NotADirectoryError, because O_TMPFILE is read as# O_DIRECTORY.# Fallback to _mkstemp_inner().# The method caching trick from NamedTemporaryFile# won't work here, because _file may change from a# BytesIO/StringIO instance to a real file. So we list# all the methods directly.# Context management protocol# file protocol# PermissionError is raised on FreeBSD for directoriesb'Temporary files. + +This module provides generic, low- and high-level interfaces for +creating temporary files and directories. All of the interfaces +provided by this module can be used without fear of race conditions +except for 'mktemp'. 'mktemp' is subject to race conditions and +should not be used; it is provided for backward compatibility only. + +The default path names are returned as str. If you supply bytes as +input, all return values will be in bytes. Ex: + + >>> tempfile.mkstemp() + (4, '/tmp/tmptpu9nin8') + >>> tempfile.mkdtemp(suffix=b'') + b'/tmp/tmppbi8f0hy' + +This module also provides some data items to the user: + + TMP_MAX - maximum number of names that will be tried before + giving up. + tempdir - If this is set to a string before the first use of + any routine from this module, it will be considered as + another candidate location to store temporary files. +'u'Temporary files. + +This module provides generic, low- and high-level interfaces for +creating temporary files and directories. All of the interfaces +provided by this module can be used without fear of race conditions +except for 'mktemp'. 'mktemp' is subject to race conditions and +should not be used; it is provided for backward compatibility only. + +The default path names are returned as str. If you supply bytes as +input, all return values will be in bytes. Ex: + + >>> tempfile.mkstemp() + (4, '/tmp/tmptpu9nin8') + >>> tempfile.mkdtemp(suffix=b'') + b'/tmp/tmppbi8f0hy' + +This module also provides some data items to the user: + + TMP_MAX - maximum number of names that will be tried before + giving up. + tempdir - If this is set to a string before the first use of + any routine from this module, it will be considered as + another candidate location to store temporary files. +'b'NamedTemporaryFile'u'NamedTemporaryFile'b'TemporaryFile'u'TemporaryFile'b'SpooledTemporaryFile'u'SpooledTemporaryFile'b'TemporaryDirectory'u'TemporaryDirectory'b'mkstemp'u'mkstemp'b'mkdtemp'u'mkdtemp'b'mktemp'u'mktemp'b'TMP_MAX'u'TMP_MAX'b'gettempprefix'u'gettempprefix'b'tempdir'u'tempdir'b'gettempdir'u'gettempdir'b'gettempprefixb'u'gettempprefixb'b'gettempdirb'u'gettempdirb'b'O_NOFOLLOW'u'O_NOFOLLOW'b'tmp'u'tmp'b'Look at the type of all args and divine their implied return type.'u'Look at the type of all args and divine their implied return type.'b'Can't mix bytes and non-bytes in path components.'u'Can't mix bytes and non-bytes in path components.'b'Common parameter processing for most APIs in this module.'u'Common parameter processing for most APIs in this module.'b'An instance of _RandomNameSequence generates an endless + sequence of unpredictable strings which can safely be incorporated + into file names. Each string is eight characters long. Multiple + threads can safely use the same instance at the same time. + + _RandomNameSequence is an iterator.'u'An instance of _RandomNameSequence generates an endless + sequence of unpredictable strings which can safely be incorporated + into file names. Each string is eight characters long. Multiple + threads can safely use the same instance at the same time. + + _RandomNameSequence is an iterator.'b'abcdefghijklmnopqrstuvwxyz0123456789_'u'abcdefghijklmnopqrstuvwxyz0123456789_'b'_rng_pid'u'_rng_pid'b'Generate a list of candidate temporary directories which + _get_default_tempdir will try.'u'Generate a list of candidate temporary directories which + _get_default_tempdir will try.'b'TMPDIR'u'TMPDIR'b'TEMP'u'TEMP'b'TMP'u'TMP'b'~\AppData\Local\Temp'u'~\AppData\Local\Temp'b'%SYSTEMROOT%\Temp'u'%SYSTEMROOT%\Temp'b'c:\temp'u'c:\temp'b'c:\tmp'u'c:\tmp'b'\temp'u'\temp'b'\tmp'u'\tmp'b'/tmp'u'/tmp'b'/var/tmp'u'/var/tmp'b'/usr/tmp'u'/usr/tmp'b'Calculate the default directory to use for temporary files. + This routine should be called exactly once. + + We determine whether or not a candidate temp dir is usable by + trying to create and write to a file in that directory. If this + is successful, the test file is deleted. To prevent denial of + service, the name of the test file must be randomized.'u'Calculate the default directory to use for temporary files. + This routine should be called exactly once. + + We determine whether or not a candidate temp dir is usable by + trying to create and write to a file in that directory. If this + is successful, the test file is deleted. To prevent denial of + service, the name of the test file must be randomized.'b'blat'b'No usable temporary directory found in %s'u'No usable temporary directory found in %s'b'Common setup sequence for all user-callable interfaces.'u'Common setup sequence for all user-callable interfaces.'b'Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.'u'Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.'b'tempfile.mkstemp'u'tempfile.mkstemp'b'No usable temporary file name found'u'No usable temporary file name found'b'The default prefix for temporary directories.'u'The default prefix for temporary directories.'b'The default prefix for temporary directories as bytes.'u'The default prefix for temporary directories as bytes.'b'Accessor for tempfile.tempdir.'u'Accessor for tempfile.tempdir.'b'A bytes version of tempfile.gettempdir().'u'A bytes version of tempfile.gettempdir().'b'User-callable function to create and return a unique temporary + file. The return value is a pair (fd, name) where fd is the + file descriptor returned by os.open, and name is the filename. + + If 'suffix' is not None, the file name will end with that suffix, + otherwise there will be no suffix. + + If 'prefix' is not None, the file name will begin with that prefix, + otherwise a default prefix is used. + + If 'dir' is not None, the file will be created in that directory, + otherwise a default directory is used. + + If 'text' is specified and true, the file is opened in text + mode. Else (the default) the file is opened in binary mode. + + If any of 'suffix', 'prefix' and 'dir' are not None, they must be the + same type. If they are bytes, the returned name will be bytes; str + otherwise. + + The file is readable and writable only by the creating user ID. + If the operating system uses permission bits to indicate whether a + file is executable, the file is executable by no one. The file + descriptor is not inherited by children of this process. + + Caller is responsible for deleting the file when done with it. + 'u'User-callable function to create and return a unique temporary + file. The return value is a pair (fd, name) where fd is the + file descriptor returned by os.open, and name is the filename. + + If 'suffix' is not None, the file name will end with that suffix, + otherwise there will be no suffix. + + If 'prefix' is not None, the file name will begin with that prefix, + otherwise a default prefix is used. + + If 'dir' is not None, the file will be created in that directory, + otherwise a default directory is used. + + If 'text' is specified and true, the file is opened in text + mode. Else (the default) the file is opened in binary mode. + + If any of 'suffix', 'prefix' and 'dir' are not None, they must be the + same type. If they are bytes, the returned name will be bytes; str + otherwise. + + The file is readable and writable only by the creating user ID. + If the operating system uses permission bits to indicate whether a + file is executable, the file is executable by no one. The file + descriptor is not inherited by children of this process. + + Caller is responsible for deleting the file when done with it. + 'b'User-callable function to create and return a unique temporary + directory. The return value is the pathname of the directory. + + Arguments are as for mkstemp, except that the 'text' argument is + not accepted. + + The directory is readable, writable, and searchable only by the + creating user. + + Caller is responsible for deleting the directory when done with it. + 'u'User-callable function to create and return a unique temporary + directory. The return value is the pathname of the directory. + + Arguments are as for mkstemp, except that the 'text' argument is + not accepted. + + The directory is readable, writable, and searchable only by the + creating user. + + Caller is responsible for deleting the directory when done with it. + 'b'tempfile.mkdtemp'u'tempfile.mkdtemp'b'No usable temporary directory name found'u'No usable temporary directory name found'b'User-callable function to return a unique temporary file name. The + file is not created. + + Arguments are similar to mkstemp, except that the 'text' argument is + not accepted, and suffix=None, prefix=None and bytes file names are not + supported. + + THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may + refer to a file that did not exist at some point, but by the time + you get around to creating it, someone else may have beaten you to + the punch. + 'u'User-callable function to return a unique temporary file name. The + file is not created. + + Arguments are similar to mkstemp, except that the 'text' argument is + not accepted, and suffix=None, prefix=None and bytes file names are not + supported. + + THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may + refer to a file that did not exist at some point, but by the time + you get around to creating it, someone else may have beaten you to + the punch. + 'b'No usable temporary filename found'u'No usable temporary filename found'b'A separate object allowing proper closing of a temporary file's + underlying file object, without adding a __del__ method to the + temporary file.'u'A separate object allowing proper closing of a temporary file's + underlying file object, without adding a __del__ method to the + temporary file.'b'Temporary file wrapper + + This class provides a wrapper around files opened for + temporary use. In particular, it seeks to automatically + remove the file when it is no longer needed. + 'u'Temporary file wrapper + + This class provides a wrapper around files opened for + temporary use. In particular, it seeks to automatically + remove the file when it is no longer needed. + 'b' + Close the temporary file, possibly deleting it. + 'u' + Close the temporary file, possibly deleting it. + 'b'Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to io.open (default "w+b"). + 'buffering' -- the buffer size argument to io.open (default -1). + 'encoding' -- the encoding argument to io.open (default None) + 'newline' -- the newline argument to io.open (default None) + 'delete' -- whether the file is deleted on close (default True). + 'errors' -- the errors argument to io.open (default None) + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface; the name of the file + is accessible as its 'name' attribute. The file will be automatically + deleted when it is closed unless the 'delete' argument is set to False. + 'u'Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to io.open (default "w+b"). + 'buffering' -- the buffer size argument to io.open (default -1). + 'encoding' -- the encoding argument to io.open (default None) + 'newline' -- the newline argument to io.open (default None) + 'delete' -- whether the file is deleted on close (default True). + 'errors' -- the errors argument to io.open (default None) + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface; the name of the file + is accessible as its 'name' attribute. The file will be automatically + deleted when it is closed unless the 'delete' argument is set to False. + 'b'O_TMPFILE'u'O_TMPFILE'b'Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to io.open (default "w+b"). + 'buffering' -- the buffer size argument to io.open (default -1). + 'encoding' -- the encoding argument to io.open (default None) + 'newline' -- the newline argument to io.open (default None) + 'errors' -- the errors argument to io.open (default None) + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface. The file has no + name, and will cease to exist when it is closed. + 'u'Create and return a temporary file. + Arguments: + 'prefix', 'suffix', 'dir' -- as for mkstemp. + 'mode' -- the mode argument to io.open (default "w+b"). + 'buffering' -- the buffer size argument to io.open (default -1). + 'encoding' -- the encoding argument to io.open (default None) + 'newline' -- the newline argument to io.open (default None) + 'errors' -- the errors argument to io.open (default None) + The file is created as mkstemp() would do it. + + Returns an object with a file-like interface. The file has no + name, and will cease to exist when it is closed. + 'b'Temporary file wrapper, specialized to switch from BytesIO + or StringIO to a real file when it exceeds a certain size or + when a fileno is needed. + 'u'Temporary file wrapper, specialized to switch from BytesIO + or StringIO to a real file when it exceeds a certain size or + when a fileno is needed. + 'b'buffering'u'buffering'b'suffix'u'suffix'b'newline'u'newline'b'Cannot enter context with closed file'u'Cannot enter context with closed file'b'Create and return a temporary directory. This has the same + behavior as mkdtemp but can be used as a context manager. For + example: + + with TemporaryDirectory() as tmpdir: + ... + + Upon exiting the context, the directory and everything contained + in it are removed. + 'u'Create and return a temporary directory. This has the same + behavior as mkdtemp but can be used as a context manager. For + example: + + with TemporaryDirectory() as tmpdir: + ... + + Upon exiting the context, the directory and everything contained + in it are removed. + 'b'Implicitly cleaning up {!r}'u'Implicitly cleaning up {!r}'b'<{} {!r}>'u'<{} {!r}>'u'tempfile'B0B110115200B1152001200B1200B134B1501800B180019200B19200B200230400B2304002400B2400B30038400B384004800B4800B5057600B57600B600B759600B9600BRKINTBS0BS1BSDLYCDSUSPCEOFCEOLCEOTCERASECFLUSHCINTRCKILLCLNEXTCLOCALCQUITCR0CR1CR212288CR3CRDLYCREADCRPRNT196608CRTSCTSCS5CS6CS7CS8CSIZECSTARTCSTOPCSTOPBCSUSPCWERASEECHOECHOCTLECHOEECHOKECHOKEECHONLECHOPRTEXTAEXTBFF0FF1FFDLY2147772029FIOASYNC536897025FIOCLEX2147772030FIONBIO536897026FIONCLEX1074030207FIONREADFLUSHOHUPCLICANONICRNLIEXTENIGNBRKIGNCRIGNPARIMAXBELINLCRINPCKISIGISTRIPIXANYIXOFFIXONNCCSNL0NL1NLDLYNOFLSHOCRNLOFDELOFILLONLCRONLRETONOCROPOSTPARENBPARMRKPARODDPENDINTAB0TAB1TAB2TAB3TABDLYTCIFLUSHTCIOFFTCIOFLUSHTCIONTCOFLUSHTCOOFFTCOONTCSADRAINTCSANOWTCSASOFT2147775586TIOCCONS536900621TIOCEXCL1074033690TIOCGETD1074033783TIOCGPGRP1074295912TIOCGWINSZ2147775595TIOCMBIC2147775596TIOCMBIS1074033770TIOCMGET2147775597TIOCMSETTIOCM_CARTIOCM_CDTIOCM_CTSTIOCM_DSRTIOCM_DTRTIOCM_LETIOCM_RITIOCM_RNGTIOCM_RTSTIOCM_SRTIOCM_ST536900721TIOCNOTTY536900622TIOCNXCL1074033779TIOCOUTQ2147775600TIOCPKTTIOCPKT_DATATIOCPKT_DOSTOPTIOCPKT_FLUSHREADTIOCPKT_FLUSHWRITETIOCPKT_NOSTOPTIOCPKT_STARTTIOCPKT_STOP536900705TIOCSCTTY2147775515TIOCSETD2147775606TIOCSPGRP2147578994TIOCSTI2148037735TIOCSWINSZTOSTOPVDISCARDVEOFVEOLVEOL2VERASEVINTRVKILLVLNEXTVMINVQUITVREPRINTVSTARTVSTOPVSUSPVT0VT1VTDLYVTIMEVWERASEu'This module provides an interface to the Posix calls for tty I/O control. +For a complete description of these calls, see the Posix or Unix manual +pages. It is only available for those Unix versions that support Posix +termios style tty I/O control. + +All functions in this module take a file descriptor fd as their first +argument. This can be an integer file descriptor, such as returned by +sys.stdin.fileno(), or a file object, such as sys.stdin itself.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/termios.cpython-38-darwin.so'u'termios'termios.errortcdraintcflowtcflushtcsendbreaktermiosTest runner and result class for the regression test suite. + +ETRegressionTestResulttestsuite__suite__e__start_time__results__getId_add_resultsystem-outsystem-errv2__makeErrorDicterr_typeerr_valueerr_tbfailureskipped UNEXPECTED_SUCCESSflavorQuietRegressionTestRunnerget_test_runner_classTestTeststest_passtest_pass_slowtest_failfailure messagetest_errorerror messagerunner_clsOutput:XML: b'Test runner and result class for the regression test suite. + +'u'Test runner and result class for the regression test suite. + +'b'testsuite'u'testsuite'b'testcase'u'testcase'b'completed'u'completed'b'time'b'system-out'u'system-out'b'system-err'u'system-err'b'skipped 'u'skipped 'b'UNEXPECTED_SUCCESS'u'UNEXPECTED_SUCCESS'b'failures'u'failures'b'failure message'u'failure message'b'error message'u'error message'b'Output:'u'Output:'b'XML: 'u'XML: 'u'test.support.testresult'u'support.testresult'u'testresult'text_file + +provides the TextFile class, which gives an interface to text files +that (optionally) takes care of stripping comments, ignoring blank +lines, and joining lines with backslashes.Provides a file-like object that takes care of all the things you + commonly want to do when processing a text file that has some + line-by-line syntax: strip comments (as long as "#" is your + comment character), skip blank lines, join adjacent lines by + escaping the newline (ie. backslash at end of line), strip + leading and/or trailing whitespace. All of these are optional + and independently controllable. + + Provides a 'warn()' method so you can generate warning messages that + report physical line number, even if the logical line in question + spans multiple physical lines. Also provides 'unreadline()' for + implementing line-at-a-time lookahead. + + Constructor is called as: + + TextFile (filename=None, file=None, **options) + + It bombs (RuntimeError) if both 'filename' and 'file' are None; + 'filename' should be a string, and 'file' a file object (or + something that provides 'readline()' and 'close()' methods). It is + recommended that you supply at least 'filename', so that TextFile + can include it in warning messages. If 'file' is not supplied, + TextFile creates its own using 'io.open()'. + + The options are all boolean, and affect the value returned by + 'readline()': + strip_comments [default: true] + strip from "#" to end-of-line, as well as any whitespace + leading up to the "#" -- unless it is escaped by a backslash + lstrip_ws [default: false] + strip leading whitespace from each line before returning it + rstrip_ws [default: true] + strip trailing whitespace (including line terminator!) from + each line before returning it + skip_blanks [default: true} + skip lines that are empty *after* stripping comments and + whitespace. (If both lstrip_ws and rstrip_ws are false, + then some lines may consist of solely whitespace: these will + *not* be skipped, even if 'skip_blanks' is true.) + join_lines [default: false] + if a backslash is the last non-newline character on a line + after stripping comments and whitespace, join the following line + to it to form one "logical line"; if N consecutive lines end + with a backslash, then N+1 physical lines will be joined to + form one logical line. + collapse_join [default: false] + strip leading whitespace from lines that are joined to their + predecessor; only matters if (join_lines and not lstrip_ws) + errors [default: 'strict'] + error handler used to decode the file content + + Note that since 'rstrip_ws' can strip the trailing newline, the + semantics of 'readline()' must differ from those of the builtin file + object's 'readline()' method! In particular, 'readline()' returns + None for end-of-file: an empty string might just be a blank line (or + an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is + not.lstrip_wsrstrip_wscollapse_joindefault_optionsConstruct a new TextFile object. At least one of 'filename' + (a string) and 'file' (a file-like object) must be supplied. + They keyword argument options are described above and affect + the values returned by 'readline()'.you must supply either or both of 'filename' and 'file'invalid TextFile option '%s'linebufOpen a new file named 'filename'. This overrides both the + 'filename' and 'file' arguments to the constructor.Close the current file and forget everything we know about it + (filename, current line number).gen_erroroutmsglines %d-%d: line %d: error: Print (to stderr) a warning message tied to the current logical + line in the current file. If the current logical line in the + file spans multiple physical lines, the warning refers to the + whole range, eg. "lines 3-5". If 'line' supplied, it overrides + the current line number; it may be a list or tuple to indicate a + range of physical lines, or an integer for a single physical + line.warning: Read and return a single logical line from the current file (or + from an internal buffer if lines have previously been "unread" + with 'unreadline()'). If the 'join_lines' option is true, this + may involve reading multiple physical lines concatenated into a + single string. Updates the current line number, so calling + 'warn()' after 'readline()' emits a warning about the physical + line(s) just read. Returns None on end-of-file, since the empty + string can occur if 'rstrip_ws' is true but 'strip_blanks' is + not.buildup_line\#continuation line immediately precedes end-of-file"continuation line immediately precedes ""end-of-file"\ +Read and return the list of all logical lines remaining in the + current file.Push 'line' (a string) onto an internal buffer that will be + checked by future 'readline()' calls. Handy for implementing + a parser with line-at-a-time lookahead.# set values for all options -- either from client option hash# or fallback to default_options# sanity check client option hash# assuming that file is at BOF!# 'linebuf' is a stack of lines that will be emptied before we# actually read from the file; it's only populated by an# 'unreadline()' operation# If any "unread" lines waiting in 'linebuf', return the top# one. (We don't actually buffer read-ahead data -- lines only# get put in 'linebuf' if the client explicitly does an# 'unreadline()'.# read the line, make it None if EOF# Look for the first "#" in the line. If none, never# mind. If we find one and it's the first character, or# is not preceded by "\", then it starts a comment --# strip the comment, strip whitespace before it, and# carry on. Otherwise, it's just an escaped "#", so# unescape it (and any other escaped "#"'s that might be# lurking in there) and otherwise leave the line alone.# no "#" -- no comments# It's definitely a comment -- either "#" is the first# character, or it's elsewhere and unescaped.# Have to preserve the trailing newline, because it's# the job of a later step (rstrip_ws) to remove it --# and if rstrip_ws is false, we'd better preserve it!# (NB. this means that if the final line is all comment# and has no trailing newline, we will think that it's# EOF; I think that's OK.)# If all that's left is whitespace, then skip line# *now*, before we try to join it to 'buildup_line' --# that way constructs like# hello \\# # comment that should be ignored# there# result in "hello there".# it's an escaped "#"# did previous line end with a backslash? then accumulate# oops: end of file# careful: pay attention to line number when incrementing it# just an ordinary line, read it as usual# still have to be careful about incrementing the line number!# strip whitespace however the client wants (leading and# trailing, or one or the other, or neither)# blank line (whether we rstrip'ed or not)? skip to next line# if appropriate# well, I guess there's some actual content there: return itb'text_file + +provides the TextFile class, which gives an interface to text files +that (optionally) takes care of stripping comments, ignoring blank +lines, and joining lines with backslashes.'u'text_file + +provides the TextFile class, which gives an interface to text files +that (optionally) takes care of stripping comments, ignoring blank +lines, and joining lines with backslashes.'b'Provides a file-like object that takes care of all the things you + commonly want to do when processing a text file that has some + line-by-line syntax: strip comments (as long as "#" is your + comment character), skip blank lines, join adjacent lines by + escaping the newline (ie. backslash at end of line), strip + leading and/or trailing whitespace. All of these are optional + and independently controllable. + + Provides a 'warn()' method so you can generate warning messages that + report physical line number, even if the logical line in question + spans multiple physical lines. Also provides 'unreadline()' for + implementing line-at-a-time lookahead. + + Constructor is called as: + + TextFile (filename=None, file=None, **options) + + It bombs (RuntimeError) if both 'filename' and 'file' are None; + 'filename' should be a string, and 'file' a file object (or + something that provides 'readline()' and 'close()' methods). It is + recommended that you supply at least 'filename', so that TextFile + can include it in warning messages. If 'file' is not supplied, + TextFile creates its own using 'io.open()'. + + The options are all boolean, and affect the value returned by + 'readline()': + strip_comments [default: true] + strip from "#" to end-of-line, as well as any whitespace + leading up to the "#" -- unless it is escaped by a backslash + lstrip_ws [default: false] + strip leading whitespace from each line before returning it + rstrip_ws [default: true] + strip trailing whitespace (including line terminator!) from + each line before returning it + skip_blanks [default: true} + skip lines that are empty *after* stripping comments and + whitespace. (If both lstrip_ws and rstrip_ws are false, + then some lines may consist of solely whitespace: these will + *not* be skipped, even if 'skip_blanks' is true.) + join_lines [default: false] + if a backslash is the last non-newline character on a line + after stripping comments and whitespace, join the following line + to it to form one "logical line"; if N consecutive lines end + with a backslash, then N+1 physical lines will be joined to + form one logical line. + collapse_join [default: false] + strip leading whitespace from lines that are joined to their + predecessor; only matters if (join_lines and not lstrip_ws) + errors [default: 'strict'] + error handler used to decode the file content + + Note that since 'rstrip_ws' can strip the trailing newline, the + semantics of 'readline()' must differ from those of the builtin file + object's 'readline()' method! In particular, 'readline()' returns + None for end-of-file: an empty string might just be a blank line (or + an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is + not.'u'Provides a file-like object that takes care of all the things you + commonly want to do when processing a text file that has some + line-by-line syntax: strip comments (as long as "#" is your + comment character), skip blank lines, join adjacent lines by + escaping the newline (ie. backslash at end of line), strip + leading and/or trailing whitespace. All of these are optional + and independently controllable. + + Provides a 'warn()' method so you can generate warning messages that + report physical line number, even if the logical line in question + spans multiple physical lines. Also provides 'unreadline()' for + implementing line-at-a-time lookahead. + + Constructor is called as: + + TextFile (filename=None, file=None, **options) + + It bombs (RuntimeError) if both 'filename' and 'file' are None; + 'filename' should be a string, and 'file' a file object (or + something that provides 'readline()' and 'close()' methods). It is + recommended that you supply at least 'filename', so that TextFile + can include it in warning messages. If 'file' is not supplied, + TextFile creates its own using 'io.open()'. + + The options are all boolean, and affect the value returned by + 'readline()': + strip_comments [default: true] + strip from "#" to end-of-line, as well as any whitespace + leading up to the "#" -- unless it is escaped by a backslash + lstrip_ws [default: false] + strip leading whitespace from each line before returning it + rstrip_ws [default: true] + strip trailing whitespace (including line terminator!) from + each line before returning it + skip_blanks [default: true} + skip lines that are empty *after* stripping comments and + whitespace. (If both lstrip_ws and rstrip_ws are false, + then some lines may consist of solely whitespace: these will + *not* be skipped, even if 'skip_blanks' is true.) + join_lines [default: false] + if a backslash is the last non-newline character on a line + after stripping comments and whitespace, join the following line + to it to form one "logical line"; if N consecutive lines end + with a backslash, then N+1 physical lines will be joined to + form one logical line. + collapse_join [default: false] + strip leading whitespace from lines that are joined to their + predecessor; only matters if (join_lines and not lstrip_ws) + errors [default: 'strict'] + error handler used to decode the file content + + Note that since 'rstrip_ws' can strip the trailing newline, the + semantics of 'readline()' must differ from those of the builtin file + object's 'readline()' method! In particular, 'readline()' returns + None for end-of-file: an empty string might just be a blank line (or + an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is + not.'b'strip_comments'u'strip_comments'b'skip_blanks'u'skip_blanks'b'lstrip_ws'u'lstrip_ws'b'rstrip_ws'u'rstrip_ws'b'join_lines'u'join_lines'b'collapse_join'u'collapse_join'b'Construct a new TextFile object. At least one of 'filename' + (a string) and 'file' (a file-like object) must be supplied. + They keyword argument options are described above and affect + the values returned by 'readline()'.'u'Construct a new TextFile object. At least one of 'filename' + (a string) and 'file' (a file-like object) must be supplied. + They keyword argument options are described above and affect + the values returned by 'readline()'.'b'you must supply either or both of 'filename' and 'file''u'you must supply either or both of 'filename' and 'file''b'invalid TextFile option '%s''u'invalid TextFile option '%s''b'Open a new file named 'filename'. This overrides both the + 'filename' and 'file' arguments to the constructor.'u'Open a new file named 'filename'. This overrides both the + 'filename' and 'file' arguments to the constructor.'b'Close the current file and forget everything we know about it + (filename, current line number).'u'Close the current file and forget everything we know about it + (filename, current line number).'b'lines %d-%d: 'u'lines %d-%d: 'b'line %d: 'u'line %d: 'b'error: 'u'error: 'b'Print (to stderr) a warning message tied to the current logical + line in the current file. If the current logical line in the + file spans multiple physical lines, the warning refers to the + whole range, eg. "lines 3-5". If 'line' supplied, it overrides + the current line number; it may be a list or tuple to indicate a + range of physical lines, or an integer for a single physical + line.'u'Print (to stderr) a warning message tied to the current logical + line in the current file. If the current logical line in the + file spans multiple physical lines, the warning refers to the + whole range, eg. "lines 3-5". If 'line' supplied, it overrides + the current line number; it may be a list or tuple to indicate a + range of physical lines, or an integer for a single physical + line.'b'warning: 'u'warning: 'b'Read and return a single logical line from the current file (or + from an internal buffer if lines have previously been "unread" + with 'unreadline()'). If the 'join_lines' option is true, this + may involve reading multiple physical lines concatenated into a + single string. Updates the current line number, so calling + 'warn()' after 'readline()' emits a warning about the physical + line(s) just read. Returns None on end-of-file, since the empty + string can occur if 'rstrip_ws' is true but 'strip_blanks' is + not.'u'Read and return a single logical line from the current file (or + from an internal buffer if lines have previously been "unread" + with 'unreadline()'). If the 'join_lines' option is true, this + may involve reading multiple physical lines concatenated into a + single string. Updates the current line number, so calling + 'warn()' after 'readline()' emits a warning about the physical + line(s) just read. Returns None on end-of-file, since the empty + string can occur if 'rstrip_ws' is true but 'strip_blanks' is + not.'b'\#'u'\#'b'continuation line immediately precedes end-of-file'u'continuation line immediately precedes end-of-file'b'\ +'u'\ +'b'Read and return the list of all logical lines remaining in the + current file.'u'Read and return the list of all logical lines remaining in the + current file.'b'Push 'line' (a string) onto an internal buffer that will be + checked by future 'readline()' calls. Handy for implementing + a parser with line-at-a-time lookahead.'u'Push 'line' (a string) onto an internal buffer that will be + checked by future 'readline()' calls. Handy for implementing + a parser with line-at-a-time lookahead.'u'distutils.text_file'u'text_file'Text wrapping and filling. +TextWrappershorten + _whitespace + Object for wrapping/filling text. The public interface consists of + the wrap() and fill() methods; the other methods are just there for + subclasses to override in order to tweak the default behaviour. + If you want to completely replace the main wrapping algorithm, + you'll probably have to override _wrap_chunks(). + + Several instance attributes control various aspects of wrapping: + width (default: 70) + the maximum width of wrapped lines (unless break_long_words + is false) + initial_indent (default: "") + string that will be prepended to the first line of wrapped + output. Counts towards the line's width. + subsequent_indent (default: "") + string that will be prepended to all lines save the first + of wrapped output; also counts towards each line's width. + expand_tabs (default: true) + Expand tabs in input text to spaces before further processing. + Each tab will become 0 .. 'tabsize' spaces, depending on its position + in its line. If false, each tab is treated as a single character. + tabsize (default: 8) + Expand tabs in input text to 0 .. 'tabsize' spaces, unless + 'expand_tabs' is false. + replace_whitespace (default: true) + Replace all whitespace characters in the input text by spaces + after tab expansion. Note that if expand_tabs is false and + replace_whitespace is true, every tab will be converted to a + single space! + fix_sentence_endings (default: false) + Ensure that sentence-ending punctuation is always followed + by two spaces. Off by default because the algorithm is + (unavoidably) imperfect. + break_long_words (default: true) + Break words longer than 'width'. If false, those words will not + be broken, and some lines might be longer than 'width'. + break_on_hyphens (default: true) + Allow breaking hyphenated words. If true, wrapping will occur + preferably on whitespaces and right after hyphens part of + compound words. + drop_whitespace (default: true) + Drop leading and trailing whitespace from lines. + max_lines (default: None) + Truncate wrapped lines. + placeholder (default: ' [...]') + Append to the last line of truncated text. + unicode_whitespace_transuspace[\w!"\'&.,?]word_punct[^\d\W]letter[^nowhitespace + ( # any whitespace + %(ws)s+ + | # em-dash between words + (?<=%(wp)s) -{2,} (?=\w) + | # word, possibly hyphenated + %(nws)s+? (?: + # hyphenated word + -(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-)) + (?= %(lt)s -? %(lt)s) + | # end of word + (?=%(ws)s|\Z) + | # em-dash + (?<=%(wp)s) (?=-{2,}\w) + ) + )wpnwswordsep_re(%s+)wordsep_simple_re[a-z][\.\!\?][\"\']?\Zr'[a-z]'r'[\.\!\?]'r'[\"\']?'r'\Z'sentence_end_rereplace_whitespacefix_sentence_endingsbreak_long_wordsdrop_whitespacebreak_on_hyphensmax_linesplaceholder_munge_whitespace_munge_whitespace(text : string) -> string + + Munge whitespace in text: expand tabs and convert all other + whitespace characters to spaces. Eg. " foo\tbar\n\nbaz" + becomes " foo bar baz". + _split_split(text : string) -> [string] + + Split the text to wrap into indivisible chunks. Chunks are + not quite the same as words; see _wrap_chunks() for full + details. As an example, the text + Look, goof-ball -- use the -b option! + breaks into the following chunks: + 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', 'option!' + if break_on_hyphens is True, or in: + 'Look,', ' ', 'goof-ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', option!' + otherwise. + _fix_sentence_endings_fix_sentence_endings(chunks : [string]) + + Correct for sentence endings buried in 'chunks'. Eg. when the + original text contains "... foo.\nBar ...", munge_whitespace() + and split() will convert that to [..., "foo.", " ", "Bar", ...] + which has one too few spaces; this method simply changes the one + space to two. + patsearch_handle_long_wordreversed_chunks_handle_long_word(chunks : [string], + cur_line : [string], + cur_len : int, width : int) + + Handle a chunk of text (most likely a word, not whitespace) that + is too long to fit in any line. + space_left_wrap_chunks_wrap_chunks(chunks : [string]) -> [string] + + Wrap a sequence of text chunks and return a list of lines of + length 'self.width' or less. (If 'break_long_words' is false, + some lines may be longer than this.) Chunks correspond roughly + to words and the whitespace between them: each chunk is + indivisible (modulo 'break_long_words'), but a line break can + come between any two chunks. Chunks should not have internal + whitespace; ie. a chunk is either all whitespace or a "word". + Whitespace chunks will be removed from the beginning and end of + lines, but apart from that whitespace is preserved. + invalid width %r (must be > 0)placeholder too large for max widthprev_line_split_chunkswrap(text : string) -> [string] + + Reformat the single paragraph in 'text' so it fits in lines of + no more than 'self.width' columns, and return a list of wrapped + lines. Tabs in 'text' are expanded with string.expandtabs(), + and all other whitespace characters (including newline) are + converted to space. + fill(text : string) -> string + + Reformat the single paragraph in 'text' to fit in lines of no + more than 'self.width' columns, and return a new string + containing the entire wrapped paragraph. + Wrap a single paragraph of text, returning a list of wrapped lines. + + Reformat the single paragraph in 'text' so it fits in lines of no + more than 'width' columns, and return a list of wrapped lines. By + default, tabs in 'text' are expanded with string.expandtabs(), and + all other whitespace characters (including newline) are converted to + space. See TextWrapper class for available keyword args to customize + wrapping behaviour. + Fill a single paragraph of text, returning a new string. + + Reformat the single paragraph in 'text' to fit in lines of no more + than 'width' columns, and return a new string containing the entire + wrapped paragraph. As with wrap(), tabs are expanded and other + whitespace characters converted to space. See TextWrapper class for + available keyword args to customize wrapping behaviour. + Collapse and truncate the given text to fit in the given width. + + The text first has its whitespace collapsed. If it then fits in + the *width*, it is returned as is. Otherwise, as many words + as possible are joined and then the placeholder is appended:: + + >>> textwrap.shorten("Hello world!", width=12) + 'Hello world!' + >>> textwrap.shorten("Hello world!", width=11) + 'Hello [...]' + ^[ ]+$_whitespace_only_re(^[ ]*)(?:[^ +])_leading_whitespace_reRemove any common leading whitespace from every line in `text`. + + This can be used to make triple-quoted strings line up with the left + edge of the display, while still presenting them in the source code + in indented form. + + Note that tabs and spaces are both treated as whitespace, but they + are not equal: the lines " hello" and "\thello" are + considered to have no common leading whitespace. + + Entirely blank lines are normalized to a newline character. + line = %r, margin = %r(?m)^Adds 'prefix' to the beginning of selected lines in 'text'. + + If 'predicate' is provided, 'prefix' will only be added to the lines + where 'predicate(line)' is True. If 'predicate' is not provided, + it will default to adding 'prefix' to all non-empty lines that do not + consist solely of whitespace characters. + prefixed_linesHello there. + This is indented.# Copyright (C) 1999-2001 Gregory P. Ward.# Copyright (C) 2002, 2003 Python Software Foundation.# Written by Greg Ward # Hardcode the recognized whitespace characters to the US-ASCII# whitespace characters. The main reason for doing this is that# some Unicode spaces (like \u00a0) are non-breaking whitespaces.# This funky little regex is just the trick for splitting# text up into word-wrappable chunks. E.g.# "Hello there -- you goof-ball, use the -b option!"# splits into# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!# (after stripping out empty strings).# This less funky little regex just split on recognized spaces. E.g.# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/# XXX this is not locale- or charset-aware -- string.lowercase# is US-ASCII only (and therefore English-only)# lowercase letter# sentence-ending punct.# optional end-of-quote# end of chunk# (possibly useful for subclasses to override)# Figure out when indent is larger than the specified width, and make# sure at least one character is stripped off on every pass# If we're allowed to break long words, then do so: put as much# of the next chunk onto the current line as will fit.# Otherwise, we have to preserve the long word intact. Only add# it to the current line if there's nothing already there --# that minimizes how much we violate the width constraint.# If we're not allowed to break long words, and there's already# text on the current line, do nothing. Next time through the# main loop of _wrap_chunks(), we'll wind up here again, but# cur_len will be zero, so the next line will be entirely# devoted to the long word that we can't handle right now.# Arrange in reverse order so items can be efficiently popped# from a stack of chucks.# Start the list of chunks that will make up the current line.# cur_len is just the length of all the chunks in cur_line.# Figure out which static string will prefix this line.# Maximum width for this line.# First chunk on line is whitespace -- drop it, unless this# is the very beginning of the text (ie. no lines started yet).# Can at least squeeze this chunk onto the current line.# Nope, this line is full.# The current line is full, and the next chunk is too big to# fit on *any* line (not just this one).# If the last chunk on this line is all whitespace, drop it.# Convert current line back to a string and store it in# list of all lines (return value).# -- Public interface ----------------------------------------------# -- Convenience interface ---------------------------------------------# -- Loosely related functionality -------------------------------------# Look for the longest leading string of spaces and tabs common to# all lines.# Current line more deeply indented than previous winner:# no change (previous winner is still on top).# Current line consistent with and no deeper than previous winner:# it's the new winner.# Find the largest common whitespace between current line and previous# winner.# sanity check (testing/debugging only)#print dedent("\tfoo\n\tbar")#print dedent(" \thello there\n \t how are you?")b'Text wrapping and filling. +'u'Text wrapping and filling. +'b'TextWrapper'u'TextWrapper'b'wrap'u'wrap'b'dedent'u'dedent'b'shorten'u'shorten'b' + 'u' + 'b' + Object for wrapping/filling text. The public interface consists of + the wrap() and fill() methods; the other methods are just there for + subclasses to override in order to tweak the default behaviour. + If you want to completely replace the main wrapping algorithm, + you'll probably have to override _wrap_chunks(). + + Several instance attributes control various aspects of wrapping: + width (default: 70) + the maximum width of wrapped lines (unless break_long_words + is false) + initial_indent (default: "") + string that will be prepended to the first line of wrapped + output. Counts towards the line's width. + subsequent_indent (default: "") + string that will be prepended to all lines save the first + of wrapped output; also counts towards each line's width. + expand_tabs (default: true) + Expand tabs in input text to spaces before further processing. + Each tab will become 0 .. 'tabsize' spaces, depending on its position + in its line. If false, each tab is treated as a single character. + tabsize (default: 8) + Expand tabs in input text to 0 .. 'tabsize' spaces, unless + 'expand_tabs' is false. + replace_whitespace (default: true) + Replace all whitespace characters in the input text by spaces + after tab expansion. Note that if expand_tabs is false and + replace_whitespace is true, every tab will be converted to a + single space! + fix_sentence_endings (default: false) + Ensure that sentence-ending punctuation is always followed + by two spaces. Off by default because the algorithm is + (unavoidably) imperfect. + break_long_words (default: true) + Break words longer than 'width'. If false, those words will not + be broken, and some lines might be longer than 'width'. + break_on_hyphens (default: true) + Allow breaking hyphenated words. If true, wrapping will occur + preferably on whitespaces and right after hyphens part of + compound words. + drop_whitespace (default: true) + Drop leading and trailing whitespace from lines. + max_lines (default: None) + Truncate wrapped lines. + placeholder (default: ' [...]') + Append to the last line of truncated text. + 'u' + Object for wrapping/filling text. The public interface consists of + the wrap() and fill() methods; the other methods are just there for + subclasses to override in order to tweak the default behaviour. + If you want to completely replace the main wrapping algorithm, + you'll probably have to override _wrap_chunks(). + + Several instance attributes control various aspects of wrapping: + width (default: 70) + the maximum width of wrapped lines (unless break_long_words + is false) + initial_indent (default: "") + string that will be prepended to the first line of wrapped + output. Counts towards the line's width. + subsequent_indent (default: "") + string that will be prepended to all lines save the first + of wrapped output; also counts towards each line's width. + expand_tabs (default: true) + Expand tabs in input text to spaces before further processing. + Each tab will become 0 .. 'tabsize' spaces, depending on its position + in its line. If false, each tab is treated as a single character. + tabsize (default: 8) + Expand tabs in input text to 0 .. 'tabsize' spaces, unless + 'expand_tabs' is false. + replace_whitespace (default: true) + Replace all whitespace characters in the input text by spaces + after tab expansion. Note that if expand_tabs is false and + replace_whitespace is true, every tab will be converted to a + single space! + fix_sentence_endings (default: false) + Ensure that sentence-ending punctuation is always followed + by two spaces. Off by default because the algorithm is + (unavoidably) imperfect. + break_long_words (default: true) + Break words longer than 'width'. If false, those words will not + be broken, and some lines might be longer than 'width'. + break_on_hyphens (default: true) + Allow breaking hyphenated words. If true, wrapping will occur + preferably on whitespaces and right after hyphens part of + compound words. + drop_whitespace (default: true) + Drop leading and trailing whitespace from lines. + max_lines (default: None) + Truncate wrapped lines. + placeholder (default: ' [...]') + Append to the last line of truncated text. + 'b'[\w!"\'&.,?]'u'[\w!"\'&.,?]'b'[^\d\W]'u'[^\d\W]'b'[^'u'[^'b' + ( # any whitespace + %(ws)s+ + | # em-dash between words + (?<=%(wp)s) -{2,} (?=\w) + | # word, possibly hyphenated + %(nws)s+? (?: + # hyphenated word + -(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-)) + (?= %(lt)s -? %(lt)s) + | # end of word + (?=%(ws)s|\Z) + | # em-dash + (?<=%(wp)s) (?=-{2,}\w) + ) + )'u' + ( # any whitespace + %(ws)s+ + | # em-dash between words + (?<=%(wp)s) -{2,} (?=\w) + | # word, possibly hyphenated + %(nws)s+? (?: + # hyphenated word + -(?: (?<=%(lt)s{2}-) | (?<=%(lt)s-%(lt)s-)) + (?= %(lt)s -? %(lt)s) + | # end of word + (?=%(ws)s|\Z) + | # em-dash + (?<=%(wp)s) (?=-{2,}\w) + ) + )'b'wp'u'wp'b'nws'u'nws'b'(%s+)'u'(%s+)'b'[a-z][\.\!\?][\"\']?\Z'u'[a-z][\.\!\?][\"\']?\Z'b'_munge_whitespace(text : string) -> string + + Munge whitespace in text: expand tabs and convert all other + whitespace characters to spaces. Eg. " foo\tbar\n\nbaz" + becomes " foo bar baz". + 'u'_munge_whitespace(text : string) -> string + + Munge whitespace in text: expand tabs and convert all other + whitespace characters to spaces. Eg. " foo\tbar\n\nbaz" + becomes " foo bar baz". + 'b'_split(text : string) -> [string] + + Split the text to wrap into indivisible chunks. Chunks are + not quite the same as words; see _wrap_chunks() for full + details. As an example, the text + Look, goof-ball -- use the -b option! + breaks into the following chunks: + 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', 'option!' + if break_on_hyphens is True, or in: + 'Look,', ' ', 'goof-ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', option!' + otherwise. + 'u'_split(text : string) -> [string] + + Split the text to wrap into indivisible chunks. Chunks are + not quite the same as words; see _wrap_chunks() for full + details. As an example, the text + Look, goof-ball -- use the -b option! + breaks into the following chunks: + 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', 'option!' + if break_on_hyphens is True, or in: + 'Look,', ' ', 'goof-ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', option!' + otherwise. + 'b'_fix_sentence_endings(chunks : [string]) + + Correct for sentence endings buried in 'chunks'. Eg. when the + original text contains "... foo.\nBar ...", munge_whitespace() + and split() will convert that to [..., "foo.", " ", "Bar", ...] + which has one too few spaces; this method simply changes the one + space to two. + 'u'_fix_sentence_endings(chunks : [string]) + + Correct for sentence endings buried in 'chunks'. Eg. when the + original text contains "... foo.\nBar ...", munge_whitespace() + and split() will convert that to [..., "foo.", " ", "Bar", ...] + which has one too few spaces; this method simply changes the one + space to two. + 'b'_handle_long_word(chunks : [string], + cur_line : [string], + cur_len : int, width : int) + + Handle a chunk of text (most likely a word, not whitespace) that + is too long to fit in any line. + 'u'_handle_long_word(chunks : [string], + cur_line : [string], + cur_len : int, width : int) + + Handle a chunk of text (most likely a word, not whitespace) that + is too long to fit in any line. + 'b'_wrap_chunks(chunks : [string]) -> [string] + + Wrap a sequence of text chunks and return a list of lines of + length 'self.width' or less. (If 'break_long_words' is false, + some lines may be longer than this.) Chunks correspond roughly + to words and the whitespace between them: each chunk is + indivisible (modulo 'break_long_words'), but a line break can + come between any two chunks. Chunks should not have internal + whitespace; ie. a chunk is either all whitespace or a "word". + Whitespace chunks will be removed from the beginning and end of + lines, but apart from that whitespace is preserved. + 'u'_wrap_chunks(chunks : [string]) -> [string] + + Wrap a sequence of text chunks and return a list of lines of + length 'self.width' or less. (If 'break_long_words' is false, + some lines may be longer than this.) Chunks correspond roughly + to words and the whitespace between them: each chunk is + indivisible (modulo 'break_long_words'), but a line break can + come between any two chunks. Chunks should not have internal + whitespace; ie. a chunk is either all whitespace or a "word". + Whitespace chunks will be removed from the beginning and end of + lines, but apart from that whitespace is preserved. + 'b'invalid width %r (must be > 0)'u'invalid width %r (must be > 0)'b'placeholder too large for max width'u'placeholder too large for max width'b'wrap(text : string) -> [string] + + Reformat the single paragraph in 'text' so it fits in lines of + no more than 'self.width' columns, and return a list of wrapped + lines. Tabs in 'text' are expanded with string.expandtabs(), + and all other whitespace characters (including newline) are + converted to space. + 'u'wrap(text : string) -> [string] + + Reformat the single paragraph in 'text' so it fits in lines of + no more than 'self.width' columns, and return a list of wrapped + lines. Tabs in 'text' are expanded with string.expandtabs(), + and all other whitespace characters (including newline) are + converted to space. + 'b'fill(text : string) -> string + + Reformat the single paragraph in 'text' to fit in lines of no + more than 'self.width' columns, and return a new string + containing the entire wrapped paragraph. + 'u'fill(text : string) -> string + + Reformat the single paragraph in 'text' to fit in lines of no + more than 'self.width' columns, and return a new string + containing the entire wrapped paragraph. + 'b'Wrap a single paragraph of text, returning a list of wrapped lines. + + Reformat the single paragraph in 'text' so it fits in lines of no + more than 'width' columns, and return a list of wrapped lines. By + default, tabs in 'text' are expanded with string.expandtabs(), and + all other whitespace characters (including newline) are converted to + space. See TextWrapper class for available keyword args to customize + wrapping behaviour. + 'u'Wrap a single paragraph of text, returning a list of wrapped lines. + + Reformat the single paragraph in 'text' so it fits in lines of no + more than 'width' columns, and return a list of wrapped lines. By + default, tabs in 'text' are expanded with string.expandtabs(), and + all other whitespace characters (including newline) are converted to + space. See TextWrapper class for available keyword args to customize + wrapping behaviour. + 'b'Fill a single paragraph of text, returning a new string. + + Reformat the single paragraph in 'text' to fit in lines of no more + than 'width' columns, and return a new string containing the entire + wrapped paragraph. As with wrap(), tabs are expanded and other + whitespace characters converted to space. See TextWrapper class for + available keyword args to customize wrapping behaviour. + 'u'Fill a single paragraph of text, returning a new string. + + Reformat the single paragraph in 'text' to fit in lines of no more + than 'width' columns, and return a new string containing the entire + wrapped paragraph. As with wrap(), tabs are expanded and other + whitespace characters converted to space. See TextWrapper class for + available keyword args to customize wrapping behaviour. + 'b'Collapse and truncate the given text to fit in the given width. + + The text first has its whitespace collapsed. If it then fits in + the *width*, it is returned as is. Otherwise, as many words + as possible are joined and then the placeholder is appended:: + + >>> textwrap.shorten("Hello world!", width=12) + 'Hello world!' + >>> textwrap.shorten("Hello world!", width=11) + 'Hello [...]' + 'u'Collapse and truncate the given text to fit in the given width. + + The text first has its whitespace collapsed. If it then fits in + the *width*, it is returned as is. Otherwise, as many words + as possible are joined and then the placeholder is appended:: + + >>> textwrap.shorten("Hello world!", width=12) + 'Hello world!' + >>> textwrap.shorten("Hello world!", width=11) + 'Hello [...]' + 'b'^[ ]+$'u'^[ ]+$'b'(^[ ]*)(?:[^ +])'u'(^[ ]*)(?:[^ +])'b'Remove any common leading whitespace from every line in `text`. + + This can be used to make triple-quoted strings line up with the left + edge of the display, while still presenting them in the source code + in indented form. + + Note that tabs and spaces are both treated as whitespace, but they + are not equal: the lines " hello" and "\thello" are + considered to have no common leading whitespace. + + Entirely blank lines are normalized to a newline character. + 'u'Remove any common leading whitespace from every line in `text`. + + This can be used to make triple-quoted strings line up with the left + edge of the display, while still presenting them in the source code + in indented form. + + Note that tabs and spaces are both treated as whitespace, but they + are not equal: the lines " hello" and "\thello" are + considered to have no common leading whitespace. + + Entirely blank lines are normalized to a newline character. + 'b'line = %r, margin = %r'u'line = %r, margin = %r'b'(?m)^'u'(?m)^'b'Adds 'prefix' to the beginning of selected lines in 'text'. + + If 'predicate' is provided, 'prefix' will only be added to the lines + where 'predicate(line)' is True. If 'predicate' is not provided, + it will default to adding 'prefix' to all non-empty lines that do not + consist solely of whitespace characters. + 'u'Adds 'prefix' to the beginning of selected lines in 'text'. + + If 'predicate' is provided, 'prefix' will only be added to the lines + where 'predicate(line)' is True. If 'predicate' is not provided, + it will default to adding 'prefix' to all non-empty lines that do not + consist solely of whitespace characters. + 'b'Hello there. + This is indented.'u'Hello there. + This is indented.'u'textwrap'Implements ThreadPoolExecutor._threads_queues_workerwork_queue_initializer_failed_idle_semaphoreException in workerBrokenThreadPool + Raised when a worker thread in a ThreadPoolExecutor failed initializing. + _counterthread_name_prefixInitializes a new ThreadPoolExecutor instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + initializer: A callable used to initialize worker threads. + initargs: A tuple of arguments to pass to the initializer. + _work_queueThreadPoolExecutor-%d_thread_name_prefixdescriptor 'submit' of 'ThreadPoolExecutor' object needs an argument"descriptor 'submit' of 'ThreadPoolExecutor' object "_adjust_thread_countnum_threads%s_%dthread_nameA thread initializer failed, the thread pool is not usable anymore'A thread initializer failed, the thread pool ''is not usable anymore'# Workers are created as daemon threads. This is done to allow the interpreter# to exit when there are still idle threads in a ThreadPoolExecutor's thread# pool (i.e. shutdown() was not called). However, allowing workers to die with# the interpreter has two undesirable properties:# threads finish.# Break a reference cycle with the exception 'exc'# attempt to increment idle count# Exit if:# - The executor that owns the worker has been collected OR# - The executor that owns the worker has been shutdown.# Notice other workers# Used to assign unique thread names when thread_name_prefix is not supplied.# ThreadPoolExecutor is often used to:# * CPU bound task which releases GIL# * I/O bound task (which releases GIL, of course)# We use cpu_count + 4 for both types of tasks.# But we limit it to 32 to avoid consuming surprisingly large resource# on many core machine.# if idle threads are available, don't spin new threads# When the executor gets lost, the weakref callback will wake up# the worker threads.# Drain work queue and mark pending futures failedb'Implements ThreadPoolExecutor.'u'Implements ThreadPoolExecutor.'b'Exception in worker'u'Exception in worker'b' + Raised when a worker thread in a ThreadPoolExecutor failed initializing. + 'u' + Raised when a worker thread in a ThreadPoolExecutor failed initializing. + 'b'Initializes a new ThreadPoolExecutor instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + initializer: A callable used to initialize worker threads. + initargs: A tuple of arguments to pass to the initializer. + 'u'Initializes a new ThreadPoolExecutor instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + initializer: A callable used to initialize worker threads. + initargs: A tuple of arguments to pass to the initializer. + 'b'ThreadPoolExecutor-%d'u'ThreadPoolExecutor-%d'b'descriptor 'submit' of 'ThreadPoolExecutor' object needs an argument'u'descriptor 'submit' of 'ThreadPoolExecutor' object needs an argument'b'%s_%d'u'%s_%d'b'A thread initializer failed, the thread pool is not usable anymore'u'A thread initializer failed, the thread pool is not usable anymore'u'concurrent.futures.thread'u'futures.thread'Thread module emulating a subset of Java's threading model._islice_dequeactive_countBrokenBarrierErrorTimerThreadErrorExceptHookArgs_start_new_thread_CRLock_profile_hook_trace_hookSet a profile function for all threads started from the threading module. + + The func will be passed to sys.setprofile() for each thread, before its + run() method is called. + + Set a trace function for all threads started from the threading module. + + The func will be passed to sys.settrace() for each thread, before its run() + method is called. + + Factory function that returns a new reentrant lock. + + A reentrant lock must be released by the thread that acquired it. Once a + thread has acquired a reentrant lock, the same thread may acquire it again + without blocking; the thread must release it once for each time it has + acquired it. + + _PyRLock_RLockThis class implements reentrant lock objects. + + A reentrant lock must be released by the thread that acquired it. Once a + thread has acquired a reentrant lock, the same thread may acquire it + again without blocking; the thread must release it once for each time it + has acquired it. + + _owner<%s %s.%s object owner=%r count=%d at %s>Acquire a lock, blocking or non-blocking. + + When invoked without arguments: if this thread already owns the lock, + increment the recursion level by one, and return immediately. Otherwise, + if another thread owns the lock, block until the lock is unlocked. Once + the lock is unlocked (not owned by any thread), then grab ownership, set + the recursion level to one, and return. If more than one thread is + blocked waiting until the lock is unlocked, only one at a time will be + able to grab ownership of the lock. There is no return value in this + case. + + When invoked with the blocking argument set to true, do the same thing + as when called without arguments, and return true. + + When invoked with the blocking argument set to false, do not block. If a + call without an argument would block, return false immediately; + otherwise, do the same thing as when called without arguments, and + return true. + + When invoked with the floating-point timeout argument set to a positive + value, block for at most the number of seconds specified by timeout + and as long as the lock cannot be acquired. Return true if the lock has + been acquired, false if the timeout has elapsed. + + Release a lock, decrementing the recursion level. + + If after the decrement it is zero, reset the lock to unlocked (not owned + by any thread), and if any other threads are blocked waiting for the + lock to become unlocked, allow exactly one of them to proceed. If after + the decrement the recursion level is still nonzero, the lock remains + locked and owned by the calling thread. + + Only call this method when the calling thread owns the lock. A + RuntimeError is raised if this method is called when the lock is + unlocked. + + There is no return value. + + Class that implements a condition variable. + + A condition variable allows one or more threads to wait until they are + notified by another thread. + + If the lock argument is given and not None, it must be a Lock or RLock + object, and it is used as the underlying lock. Otherwise, a new RLock object + is created and used as the underlying lock. + + Wait until notified or until a timeout occurs. + + If the calling thread has not acquired the lock when this method is + called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks until it is + awakened by a notify() or notify_all() call for the same condition + variable in another thread, or until the optional timeout occurs. Once + awakened or timed out, it re-acquires the lock and returns. + + When the timeout argument is present and not None, it should be a + floating point number specifying a timeout for the operation in seconds + (or fractions thereof). + + When the underlying lock is an RLock, it is not released using its + release() method, since this may not actually unlock the lock when it + was acquired multiple times recursively. Instead, an internal interface + of the RLock class is used, which really unlocks it even when it has + been recursively acquired several times. Another internal interface is + then used to restore the recursion level when the lock is reacquired. + + saved_stategotitWait until a condition evaluates to True. + + predicate should be a callable which result will be interpreted as a + boolean value. A timeout may be provided giving the maximum time to + wait. + + waittimeWake up one or more threads waiting on this condition, if any. + + If the calling thread has not acquired the lock when this method is + called, a RuntimeError is raised. + + This method wakes up at most n of the threads waiting for the condition + variable; it is a no-op if no threads are waiting. + + all_waiterswaiters_to_notifyWake up all threads waiting on this condition. + + If the calling thread has not acquired the lock when this method + is called, a RuntimeError is raised. + + notifyAllThis class implements semaphore objects. + + Semaphores manage a counter representing the number of release() calls minus + the number of acquire() calls, plus an initial value. The acquire() method + blocks if necessary until it can return without making the counter + negative. If not given, value defaults to 1. + + semaphore initial value must be >= 0Acquire a semaphore, decrementing the internal counter by one. + + When invoked without arguments: if the internal counter is larger than + zero on entry, decrement it by one and return immediately. If it is zero + on entry, block, waiting until some other thread has called release() to + make it larger than zero. This is done with proper interlocking so that + if multiple acquire() calls are blocked, release() will wake exactly one + of them up. The implementation may pick one at random, so the order in + which blocked threads are awakened should not be relied on. There is no + return value in this case. + + When invoked with blocking set to true, do the same thing as when called + without arguments, and return true. + + When invoked with blocking set to false, do not block. If a call without + an argument would block, return false immediately; otherwise, do the + same thing as when called without arguments, and return true. + + When invoked with a timeout other than None, it will block for at + most timeout seconds. If acquire does not complete successfully in + that interval, return false. Return true otherwise. + + can't specify timeout for non-blocking acquireRelease a semaphore, incrementing the internal counter by one. + + When the counter is zero on entry and another thread is waiting for it + to become larger than zero again, wake up that thread. + + Implements a bounded semaphore. + + A bounded semaphore checks to make sure its current value doesn't exceed its + initial value. If it does, ValueError is raised. In most situations + semaphores are used to guard resources with limited capacity. + + If the semaphore is released too many times it's a sign of a bug. If not + given, value defaults to 1. + + Like regular semaphores, bounded semaphores manage a counter representing + the number of release() calls minus the number of acquire() calls, plus an + initial value. The acquire() method blocks if necessary until it can return + without making the counter negative. If not given, value defaults to 1. + + _initial_valueRelease a semaphore, incrementing the internal counter by one. + + When the counter is zero on entry and another thread is waiting for it + to become larger than zero again, wake up that thread. + + If the number of releases exceeds the number of acquires, + raise a ValueError. + + Semaphore released too many timesClass implementing event objects. + + Events manage a flag that can be set to true with the set() method and reset + to false with the clear() method. The wait() method blocks until the flag is + true. The flag is initially false. + + _flag_reset_internal_locksReturn true if and only if the internal flag is true.isSetSet the internal flag to true. + + All threads waiting for it to become true are awakened. Threads + that call wait() once the flag is true will not block at all. + + Reset the internal flag to false. + + Subsequently, threads calling wait() will block until set() is called to + set the internal flag to true again. + + Block until the internal flag is true. + + If the internal flag is true on entry, return immediately. Otherwise, + block until another thread calls set() to set the flag to true, or until + the optional timeout occurs. + + When the timeout argument is present and not None, it should be a + floating point number specifying a timeout for the operation in seconds + (or fractions thereof). + + This method returns the internal flag on exit, so it will always return + True except if a timeout is given and the operation times out. + + signaledImplements a Barrier. + + Useful for synchronizing a fixed number of threads at known synchronization + points. Threads block on 'wait()' and are simultaneously awoken once they + have all made that call. + + Create a barrier, initialised to 'parties' threads. + + 'action' is a callable which, when supplied, will be called by one of + the threads after they have all entered the barrier and just prior to + releasing them all. If a 'timeout' is provided, it is used as the + default for all subsequent 'wait()' calls. + + _action_timeout_partiesWait for the barrier. + + When the specified number of threads have started waiting, they are all + simultaneously awoken. If an 'action' was provided for the barrier, one + of the threads will have executed that callback prior to returning. + Returns an individual index number from 0 to 'parties-1'. + + _enter_release_breakReset the barrier to the initial state. + + Any threads currently waiting will get the BrokenBarrier exception + raised. + + Place the barrier into a 'broken' state. + + Useful in case of error. Any currently waiting threads and threads + attempting to 'wait()' will have BrokenBarrierError raised. + + Return the number of threads required to trip the barrier.n_waitingReturn the number of threads currently waiting at the barrier.brokenReturn True if the barrier is in a broken state._newnameThread-%d_active_limbo_lock_limbo_shutdown_locks_lock_shutdown_locksA class that represents a thread of control. + + This class can be safely subclassed in a limited fashion. There are two ways + to specify the activity: by passing a callable object to the constructor, or + by overriding the run() method in a subclass. + + _initializedThis constructor should always be called with keyword arguments. Arguments are: + + *group* should be None; reserved for future extension when a ThreadGroup + class is implemented. + + *target* is the callable object to be invoked by the run() + method. Defaults to None, meaning nothing is called. + + *name* is the thread name. By default, a unique name is constructed of + the form "Thread-N" where N is a small decimal number. + + *args* is the argument tuple for the target invocation. Defaults to (). + + *kwargs* is a dictionary of keyword arguments for the target + invocation. Defaults to {}. + + If a subclass overrides the constructor, it must make sure to invoke + the base class constructor (Thread.__init__()) before doing anything + else to the thread. + + _daemonic_ident_native_id_tstate_lock_started_is_stopped_stderr_make_invoke_excepthook_invoke_excepthook_set_tstate_lockThread.__init__() was not called daemon %s<%s(%s, %s)>Start the thread's activity. + + It must be called at most once per thread object. It arranges for the + object's run() method to be invoked in a separate thread of control. + + This method will raise a RuntimeError if called more than once on the + same thread object. + + thread.__init__() not calledthreads can only be started onceMethod representing the thread's activity. + + You may override this method in a subclass. The standard run() method + invokes the callable object passed to the object's constructor as the + target argument, if any, with sequential and keyword arguments taken + from the args and kwargs arguments, respectively. + + _bootstrap_inner_set_ident + Set a lock object which will be released by the interpreter when + the underlying thread state (see pystate.h) gets deleted. + _deleteRemove current thread from the dict of currently running threads.Wait until the thread terminates. + + This blocks the calling thread until the thread whose join() method is + called terminates -- either normally or through an unhandled exception + or until the optional timeout occurs. + + When the timeout argument is present and not None, it should be a + floating point number specifying a timeout for the operation in seconds + (or fractions thereof). As join() always returns None, you must call + is_alive() after join() to decide whether a timeout happened -- if the + thread is still alive, the join() call timed out. + + When the timeout argument is not present or None, the operation will + block until the thread terminates. + + A thread can be join()ed many times. + + join() raises a RuntimeError if an attempt is made to join the current + thread as that would cause a deadlock. It is also an error to join() a + thread before it has been started and attempts to do so raises the same + exception. + + Thread.__init__() not calledcannot join thread before it is startedcannot join current thread_wait_for_tstate_lockA string used for identification purposes only. + + It has no semantics. Multiple threads may be given the same name. The + initial name is set by the constructor. + + Thread identifier of this thread or None if it has not been started. + + This is a nonzero integer. See the get_ident() function. Thread + identifiers may be recycled when a thread exits and another thread is + created. The identifier is available even after the thread has exited. + + native_idNative integral thread ID of this thread, or None if it has not been started. + + This is a non-negative integer. See the get_native_id() function. + This represents the Thread ID as reported by the kernel. + + Return whether the thread is alive. + + This method returns True just before the run() method starts until just + after the run() method terminates. The module function enumerate() + returns a list of all alive threads. + + isAliveReturn whether the thread is alive. + + This method is deprecated, use is_alive() instead. + isAlive() is deprecated, use is_alive() insteadA boolean value indicating whether this thread is a daemon thread. + + This must be set before start() is called, otherwise RuntimeError is + raised. Its initial value is inherited from the creating thread; the + main thread is not a daemon thread and therefore all threads created in + the main thread default to daemon = False. + + The entire Python program exits when only daemon threads are left. + + cannot set daemon status of active threadisDaemonsetDaemongetNamesetName_print_exceptionexc_type exc_value exc_traceback thread + Handle uncaught Thread.run() exception. + Exception in thread old_excepthookold_sys_excepthookthreading.excepthook is Nonesys.excepthook is Nonesys_exc_infolocal_printlocal_sysinvoke_excepthookException in threading.excepthook:sys_excepthookCall a function after a specified number of seconds: + + t = Timer(30.0, f, args=None, kwargs=None) + t.start() + t.cancel() # stop the timer's action if it's still waiting + + Stop the timer if it hasn't finished yet.MainThread_DummyThreadDummy-%dcannot join a dummy threadReturn the current Thread object, corresponding to the caller's thread of control. + + If the caller's thread of control was not created through the threading + module, a dummy thread object with limited functionality is returned. + + currentThreadReturn the number of Thread objects currently alive. + + The returned count is equal to the length of the list returned by + enumerate(). + + activeCount_enumerateReturn a list of all Thread objects currently alive. + + The list includes daemonic threads, dummy thread objects created by + current_thread(), and the main thread. It excludes terminated threads and + threads that have not yet been started. + + _main_thread + Wait until the Python thread state of all non-daemon threads get deleted. + tlockReturn the main thread object. + + In normal conditions, the main thread is the thread from which the + Python interpreter was started. + _threading_local + Cleanup threading module state that should not exist after a fork. + new_active# Note regarding PEP 8 compliant names# This threading model was originally inspired by Java, and inherited# the convention of camelCase function and method names from that# language. Those original names are not in any imminent danger of# being deprecated (even for Py3k),so this module provides them as an# alias for the PEP 8 compliant names# Note that using the new PEP 8 compliant names facilitates substitution# with the multiprocessing module, which doesn't provide the old# Java inspired names.# Rename some stuff so "from threading import *" is safe# Support for profile and trace hooks# Synchronization classes# Internal methods used by condition variables# Export the lock's acquire() and release() methods# If the lock defines _release_save() and/or _acquire_restore(),# these override the default implementations (which just call# release() and acquire() on the lock). Ditto for _is_owned().# No state to save# Ignore saved state# Return True if lock is owned by current_thread.# This method is called only if _lock doesn't have _is_owned().# restore state no matter what (e.g., KeyboardInterrupt)# After Tim Peters' semaphore class, but not quite the same (no maximum)# After Tim Peters' event class (without is_posted())# private! called by Thread._reset_internal_locks by _after_fork()# A barrier class. Inspired in part by the pthread_barrier_* api and# the CyclicBarrier class from Java. See# http://sourceware.org/pthreads-win32/manual/pthread_barrier_init.html and# http://java.sun.com/j2se/1.5.0/docs/api/java/util/concurrent/# CyclicBarrier.html# for information.# We maintain two main states, 'filling' and 'draining' enabling the barrier# to be cyclic. Threads are not allowed into it until it has fully drained# since the previous cycle. In addition, a 'resetting' state exists which is# similar to 'draining' except that threads leave with a BrokenBarrierError,# and a 'broken' state in which all threads get the exception.#0 filling, 1, draining, -1 resetting, -2 broken# Block while the barrier drains.# We release the barrier# We wait until someone releases us# Wake up any threads waiting for barrier to drain.# Block until the barrier is ready for us, or raise an exception# if it is broken.# It is draining or resetting, wait until done#see if the barrier is in a broken state# Optionally run the 'action' and release the threads waiting# in the barrier.# enter draining state#an exception during the _action handler. Break and reraise# Wait in the barrier until we are released. Raise an exception# if the barrier is reset or broken.#timed out. Break the barrier# If we are the last thread to exit the barrier, signal any threads# waiting for the barrier to drain.#resetting or draining#reset the barrier, waking up threads#was broken, set it to reset state#which clears when the last thread exits# An internal error was detected. The barrier is set to# a broken state all parties awakened.# We don't need synchronization here since this is an ephemeral result# anyway. It returns the correct value in the steady state.# exception raised by the Barrier class# Helper to generate new thread names# Consume 0 so first non-main thread has id 1.# Active thread administration# maps thread id to Thread object# Set of Thread._tstate_lock locks of non-daemon threads used by _shutdown()# to wait until all Python thread states get deleted:# see Thread._set_tstate_lock().# Main class for threads# Copy of sys.stderr used by self._invoke_excepthook()# For debugging and _after_fork()# private! Called by _after_fork() to reset our internal locks as# they may be in an invalid state leading to a deadlock or crash.# The thread isn't alive after fork: it doesn't have a tstate# anymore.# easy way to get ._is_stopped set when appropriate# Avoid a refcycle if the thread is running a function with# an argument that has a member that points to the thread.# Wrapper around the real bootstrap code that ignores# exceptions during interpreter cleanup. Those typically# happen when a daemon thread wakes up at an unfortunate# moment, finds the world around it destroyed, and raises some# random exception *** while trying to report the exception in# _bootstrap_inner() below ***. Those random exceptions# don't help anybody, and they confuse users, so we suppress# them. We suppress them only when it appears that the world# indeed has already been destroyed, so that exceptions in# _bootstrap_inner() during normal business hours are properly# reported. Also, we only suppress them for daemonic threads;# if a non-daemonic encounters this, something else is wrong.# We don't call self._delete() because it also# grabs _active_limbo_lock.# After calling ._stop(), .is_alive() returns False and .join() returns# immediately. ._tstate_lock must be released before calling ._stop().# Normal case: C code at the end of the thread's life# (release_sentinel in _threadmodule.c) releases ._tstate_lock, and# that's detected by our ._wait_for_tstate_lock(), called by .join()# and .is_alive(). Any number of threads _may_ call ._stop()# simultaneously (for example, if multiple threads are blocked in# .join() calls), and they're not serialized. That's harmless -# they'll just make redundant rebindings of ._is_stopped and# ._tstate_lock. Obscure: we rebind ._tstate_lock last so that the# "assert self._is_stopped" in ._wait_for_tstate_lock() always works# (the assert is executed only if ._tstate_lock is None).# Special case: _main_thread releases ._tstate_lock via this# module's _shutdown() function.# There must not be any python code between the previous line# and after the lock is released. Otherwise a tracing function# could try to acquire the lock again in the same thread, (in# current_thread()), and would block.# the behavior of a negative timeout isn't documented, but# historically .join(timeout=x) for x<0 has acted as if timeout=0# Issue #18808: wait for the thread state to be gone.# At the end of the thread's life, after all knowledge of the thread# is removed from C data structures, C code releases our _tstate_lock.# This method passes its arguments to _tstate_lock.acquire().# If the lock is acquired, the C code is done, and self._stop() is# called. That sets ._is_stopped to True, and ._tstate_lock to None.# already determined that the C code is done# Simple Python implementation if _thread._excepthook() is not available# silently ignore SystemExit# do nothing if sys.stderr is None and sys.stderr was None# when the thread was created# do nothing if sys.stderr is None and args.thread is None# Create a local namespace to ensure that variables remain alive# when _invoke_excepthook() is called, even if it is called late during# Python shutdown. It is mostly needed for daemon threads.# Break reference cycle (exception stored in a variable)# The timer class was contributed by Itamar Shtull-Trauring# Special thread class to represent the main thread# Dummy thread class to represent threads not started here.# These aren't garbage collected when they die, nor can they be waited for.# If they invoke anything in threading.py that calls current_thread(), they# leave an entry in the _active dict forever after.# Their purpose is to return *something* from current_thread().# They are marked as daemon threads so we won't wait for them# when we exit (conform previous semantics).# Global API functions# Same as enumerate(), but without the lock. Internal use only.# Create the main thread object,# and make it available for the interpreter# (Py_Main) as threading._shutdown.# Obscure: other threads may be waiting to join _main_thread. That's# dubious, but some code does it. We can't wait for C code to release# the main thread's tstate_lock - that won't happen until the interpreter# is nearly dead. So we release it here. Note that just calling _stop()# isn't enough: other threads may already be waiting on _tstate_lock.# _shutdown() was already called# Main thread# The main thread isn't finished yet, so its thread state lock can't have# been released.# Join all non-deamon threads# mimick Thread.join()# new threads can be spawned while we were waiting for the other# threads to complete# get thread-local implementation, either from the thread# module, or from the python fallback# Reset _active_limbo_lock, in case we forked while the lock was held# by another (non-forked) thread. http://bugs.python.org/issue874900# fork() only copied the current thread; clear references to others.# fork() was called in a thread which was not spawned# by threading.Thread. For example, a thread spawned# by thread.start_new_thread().# reset _shutdown() locks: threads re-register their _tstate_lock below# Dangling thread instances must still have their locks reset,# because someone may join() them.# Any lock/condition variable may be currently locked or in an# invalid state, so we reinitialize them.# There is only one active thread. We reset the ident to# its new value since it can have changed.# All the others are already stopped.b'Thread module emulating a subset of Java's threading model.'u'Thread module emulating a subset of Java's threading model.'b'get_ident'u'get_ident'b'active_count'u'active_count'b'current_thread'u'current_thread'b'main_thread'u'main_thread'b'TIMEOUT_MAX'u'TIMEOUT_MAX'b'RLock'u'RLock'b'Thread'u'Thread'b'Barrier'u'Barrier'b'BrokenBarrierError'u'BrokenBarrierError'b'Timer'u'Timer'b'ThreadError'u'ThreadError'b'setprofile'u'setprofile'b'settrace'u'settrace'b'stack_size'u'stack_size'b'excepthook'u'excepthook'b'ExceptHookArgs'u'ExceptHookArgs'b'get_native_id'u'get_native_id'b'Set a profile function for all threads started from the threading module. + + The func will be passed to sys.setprofile() for each thread, before its + run() method is called. + + 'u'Set a profile function for all threads started from the threading module. + + The func will be passed to sys.setprofile() for each thread, before its + run() method is called. + + 'b'Set a trace function for all threads started from the threading module. + + The func will be passed to sys.settrace() for each thread, before its run() + method is called. + + 'u'Set a trace function for all threads started from the threading module. + + The func will be passed to sys.settrace() for each thread, before its run() + method is called. + + 'b'Factory function that returns a new reentrant lock. + + A reentrant lock must be released by the thread that acquired it. Once a + thread has acquired a reentrant lock, the same thread may acquire it again + without blocking; the thread must release it once for each time it has + acquired it. + + 'u'Factory function that returns a new reentrant lock. + + A reentrant lock must be released by the thread that acquired it. Once a + thread has acquired a reentrant lock, the same thread may acquire it again + without blocking; the thread must release it once for each time it has + acquired it. + + 'b'This class implements reentrant lock objects. + + A reentrant lock must be released by the thread that acquired it. Once a + thread has acquired a reentrant lock, the same thread may acquire it + again without blocking; the thread must release it once for each time it + has acquired it. + + 'u'This class implements reentrant lock objects. + + A reentrant lock must be released by the thread that acquired it. Once a + thread has acquired a reentrant lock, the same thread may acquire it + again without blocking; the thread must release it once for each time it + has acquired it. + + 'b'<%s %s.%s object owner=%r count=%d at %s>'u'<%s %s.%s object owner=%r count=%d at %s>'b'Acquire a lock, blocking or non-blocking. + + When invoked without arguments: if this thread already owns the lock, + increment the recursion level by one, and return immediately. Otherwise, + if another thread owns the lock, block until the lock is unlocked. Once + the lock is unlocked (not owned by any thread), then grab ownership, set + the recursion level to one, and return. If more than one thread is + blocked waiting until the lock is unlocked, only one at a time will be + able to grab ownership of the lock. There is no return value in this + case. + + When invoked with the blocking argument set to true, do the same thing + as when called without arguments, and return true. + + When invoked with the blocking argument set to false, do not block. If a + call without an argument would block, return false immediately; + otherwise, do the same thing as when called without arguments, and + return true. + + When invoked with the floating-point timeout argument set to a positive + value, block for at most the number of seconds specified by timeout + and as long as the lock cannot be acquired. Return true if the lock has + been acquired, false if the timeout has elapsed. + + 'u'Acquire a lock, blocking or non-blocking. + + When invoked without arguments: if this thread already owns the lock, + increment the recursion level by one, and return immediately. Otherwise, + if another thread owns the lock, block until the lock is unlocked. Once + the lock is unlocked (not owned by any thread), then grab ownership, set + the recursion level to one, and return. If more than one thread is + blocked waiting until the lock is unlocked, only one at a time will be + able to grab ownership of the lock. There is no return value in this + case. + + When invoked with the blocking argument set to true, do the same thing + as when called without arguments, and return true. + + When invoked with the blocking argument set to false, do not block. If a + call without an argument would block, return false immediately; + otherwise, do the same thing as when called without arguments, and + return true. + + When invoked with the floating-point timeout argument set to a positive + value, block for at most the number of seconds specified by timeout + and as long as the lock cannot be acquired. Return true if the lock has + been acquired, false if the timeout has elapsed. + + 'b'Release a lock, decrementing the recursion level. + + If after the decrement it is zero, reset the lock to unlocked (not owned + by any thread), and if any other threads are blocked waiting for the + lock to become unlocked, allow exactly one of them to proceed. If after + the decrement the recursion level is still nonzero, the lock remains + locked and owned by the calling thread. + + Only call this method when the calling thread owns the lock. A + RuntimeError is raised if this method is called when the lock is + unlocked. + + There is no return value. + + 'u'Release a lock, decrementing the recursion level. + + If after the decrement it is zero, reset the lock to unlocked (not owned + by any thread), and if any other threads are blocked waiting for the + lock to become unlocked, allow exactly one of them to proceed. If after + the decrement the recursion level is still nonzero, the lock remains + locked and owned by the calling thread. + + Only call this method when the calling thread owns the lock. A + RuntimeError is raised if this method is called when the lock is + unlocked. + + There is no return value. + + 'b'Class that implements a condition variable. + + A condition variable allows one or more threads to wait until they are + notified by another thread. + + If the lock argument is given and not None, it must be a Lock or RLock + object, and it is used as the underlying lock. Otherwise, a new RLock object + is created and used as the underlying lock. + + 'u'Class that implements a condition variable. + + A condition variable allows one or more threads to wait until they are + notified by another thread. + + If the lock argument is given and not None, it must be a Lock or RLock + object, and it is used as the underlying lock. Otherwise, a new RLock object + is created and used as the underlying lock. + + 'b''u''b'Wait until notified or until a timeout occurs. + + If the calling thread has not acquired the lock when this method is + called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks until it is + awakened by a notify() or notify_all() call for the same condition + variable in another thread, or until the optional timeout occurs. Once + awakened or timed out, it re-acquires the lock and returns. + + When the timeout argument is present and not None, it should be a + floating point number specifying a timeout for the operation in seconds + (or fractions thereof). + + When the underlying lock is an RLock, it is not released using its + release() method, since this may not actually unlock the lock when it + was acquired multiple times recursively. Instead, an internal interface + of the RLock class is used, which really unlocks it even when it has + been recursively acquired several times. Another internal interface is + then used to restore the recursion level when the lock is reacquired. + + 'u'Wait until notified or until a timeout occurs. + + If the calling thread has not acquired the lock when this method is + called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks until it is + awakened by a notify() or notify_all() call for the same condition + variable in another thread, or until the optional timeout occurs. Once + awakened or timed out, it re-acquires the lock and returns. + + When the timeout argument is present and not None, it should be a + floating point number specifying a timeout for the operation in seconds + (or fractions thereof). + + When the underlying lock is an RLock, it is not released using its + release() method, since this may not actually unlock the lock when it + was acquired multiple times recursively. Instead, an internal interface + of the RLock class is used, which really unlocks it even when it has + been recursively acquired several times. Another internal interface is + then used to restore the recursion level when the lock is reacquired. + + 'b'Wait until a condition evaluates to True. + + predicate should be a callable which result will be interpreted as a + boolean value. A timeout may be provided giving the maximum time to + wait. + + 'u'Wait until a condition evaluates to True. + + predicate should be a callable which result will be interpreted as a + boolean value. A timeout may be provided giving the maximum time to + wait. + + 'b'Wake up one or more threads waiting on this condition, if any. + + If the calling thread has not acquired the lock when this method is + called, a RuntimeError is raised. + + This method wakes up at most n of the threads waiting for the condition + variable; it is a no-op if no threads are waiting. + + 'u'Wake up one or more threads waiting on this condition, if any. + + If the calling thread has not acquired the lock when this method is + called, a RuntimeError is raised. + + This method wakes up at most n of the threads waiting for the condition + variable; it is a no-op if no threads are waiting. + + 'b'Wake up all threads waiting on this condition. + + If the calling thread has not acquired the lock when this method + is called, a RuntimeError is raised. + + 'u'Wake up all threads waiting on this condition. + + If the calling thread has not acquired the lock when this method + is called, a RuntimeError is raised. + + 'b'This class implements semaphore objects. + + Semaphores manage a counter representing the number of release() calls minus + the number of acquire() calls, plus an initial value. The acquire() method + blocks if necessary until it can return without making the counter + negative. If not given, value defaults to 1. + + 'u'This class implements semaphore objects. + + Semaphores manage a counter representing the number of release() calls minus + the number of acquire() calls, plus an initial value. The acquire() method + blocks if necessary until it can return without making the counter + negative. If not given, value defaults to 1. + + 'b'semaphore initial value must be >= 0'u'semaphore initial value must be >= 0'b'Acquire a semaphore, decrementing the internal counter by one. + + When invoked without arguments: if the internal counter is larger than + zero on entry, decrement it by one and return immediately. If it is zero + on entry, block, waiting until some other thread has called release() to + make it larger than zero. This is done with proper interlocking so that + if multiple acquire() calls are blocked, release() will wake exactly one + of them up. The implementation may pick one at random, so the order in + which blocked threads are awakened should not be relied on. There is no + return value in this case. + + When invoked with blocking set to true, do the same thing as when called + without arguments, and return true. + + When invoked with blocking set to false, do not block. If a call without + an argument would block, return false immediately; otherwise, do the + same thing as when called without arguments, and return true. + + When invoked with a timeout other than None, it will block for at + most timeout seconds. If acquire does not complete successfully in + that interval, return false. Return true otherwise. + + 'u'Acquire a semaphore, decrementing the internal counter by one. + + When invoked without arguments: if the internal counter is larger than + zero on entry, decrement it by one and return immediately. If it is zero + on entry, block, waiting until some other thread has called release() to + make it larger than zero. This is done with proper interlocking so that + if multiple acquire() calls are blocked, release() will wake exactly one + of them up. The implementation may pick one at random, so the order in + which blocked threads are awakened should not be relied on. There is no + return value in this case. + + When invoked with blocking set to true, do the same thing as when called + without arguments, and return true. + + When invoked with blocking set to false, do not block. If a call without + an argument would block, return false immediately; otherwise, do the + same thing as when called without arguments, and return true. + + When invoked with a timeout other than None, it will block for at + most timeout seconds. If acquire does not complete successfully in + that interval, return false. Return true otherwise. + + 'b'can't specify timeout for non-blocking acquire'u'can't specify timeout for non-blocking acquire'b'Release a semaphore, incrementing the internal counter by one. + + When the counter is zero on entry and another thread is waiting for it + to become larger than zero again, wake up that thread. + + 'u'Release a semaphore, incrementing the internal counter by one. + + When the counter is zero on entry and another thread is waiting for it + to become larger than zero again, wake up that thread. + + 'b'Implements a bounded semaphore. + + A bounded semaphore checks to make sure its current value doesn't exceed its + initial value. If it does, ValueError is raised. In most situations + semaphores are used to guard resources with limited capacity. + + If the semaphore is released too many times it's a sign of a bug. If not + given, value defaults to 1. + + Like regular semaphores, bounded semaphores manage a counter representing + the number of release() calls minus the number of acquire() calls, plus an + initial value. The acquire() method blocks if necessary until it can return + without making the counter negative. If not given, value defaults to 1. + + 'u'Implements a bounded semaphore. + + A bounded semaphore checks to make sure its current value doesn't exceed its + initial value. If it does, ValueError is raised. In most situations + semaphores are used to guard resources with limited capacity. + + If the semaphore is released too many times it's a sign of a bug. If not + given, value defaults to 1. + + Like regular semaphores, bounded semaphores manage a counter representing + the number of release() calls minus the number of acquire() calls, plus an + initial value. The acquire() method blocks if necessary until it can return + without making the counter negative. If not given, value defaults to 1. + + 'b'Release a semaphore, incrementing the internal counter by one. + + When the counter is zero on entry and another thread is waiting for it + to become larger than zero again, wake up that thread. + + If the number of releases exceeds the number of acquires, + raise a ValueError. + + 'u'Release a semaphore, incrementing the internal counter by one. + + When the counter is zero on entry and another thread is waiting for it + to become larger than zero again, wake up that thread. + + If the number of releases exceeds the number of acquires, + raise a ValueError. + + 'b'Semaphore released too many times'u'Semaphore released too many times'b'Class implementing event objects. + + Events manage a flag that can be set to true with the set() method and reset + to false with the clear() method. The wait() method blocks until the flag is + true. The flag is initially false. + + 'u'Class implementing event objects. + + Events manage a flag that can be set to true with the set() method and reset + to false with the clear() method. The wait() method blocks until the flag is + true. The flag is initially false. + + 'b'Return true if and only if the internal flag is true.'u'Return true if and only if the internal flag is true.'b'Set the internal flag to true. + + All threads waiting for it to become true are awakened. Threads + that call wait() once the flag is true will not block at all. + + 'u'Set the internal flag to true. + + All threads waiting for it to become true are awakened. Threads + that call wait() once the flag is true will not block at all. + + 'b'Reset the internal flag to false. + + Subsequently, threads calling wait() will block until set() is called to + set the internal flag to true again. + + 'u'Reset the internal flag to false. + + Subsequently, threads calling wait() will block until set() is called to + set the internal flag to true again. + + 'b'Block until the internal flag is true. + + If the internal flag is true on entry, return immediately. Otherwise, + block until another thread calls set() to set the flag to true, or until + the optional timeout occurs. + + When the timeout argument is present and not None, it should be a + floating point number specifying a timeout for the operation in seconds + (or fractions thereof). + + This method returns the internal flag on exit, so it will always return + True except if a timeout is given and the operation times out. + + 'u'Block until the internal flag is true. + + If the internal flag is true on entry, return immediately. Otherwise, + block until another thread calls set() to set the flag to true, or until + the optional timeout occurs. + + When the timeout argument is present and not None, it should be a + floating point number specifying a timeout for the operation in seconds + (or fractions thereof). + + This method returns the internal flag on exit, so it will always return + True except if a timeout is given and the operation times out. + + 'b'Implements a Barrier. + + Useful for synchronizing a fixed number of threads at known synchronization + points. Threads block on 'wait()' and are simultaneously awoken once they + have all made that call. + + 'u'Implements a Barrier. + + Useful for synchronizing a fixed number of threads at known synchronization + points. Threads block on 'wait()' and are simultaneously awoken once they + have all made that call. + + 'b'Create a barrier, initialised to 'parties' threads. + + 'action' is a callable which, when supplied, will be called by one of + the threads after they have all entered the barrier and just prior to + releasing them all. If a 'timeout' is provided, it is used as the + default for all subsequent 'wait()' calls. + + 'u'Create a barrier, initialised to 'parties' threads. + + 'action' is a callable which, when supplied, will be called by one of + the threads after they have all entered the barrier and just prior to + releasing them all. If a 'timeout' is provided, it is used as the + default for all subsequent 'wait()' calls. + + 'b'Wait for the barrier. + + When the specified number of threads have started waiting, they are all + simultaneously awoken. If an 'action' was provided for the barrier, one + of the threads will have executed that callback prior to returning. + Returns an individual index number from 0 to 'parties-1'. + + 'u'Wait for the barrier. + + When the specified number of threads have started waiting, they are all + simultaneously awoken. If an 'action' was provided for the barrier, one + of the threads will have executed that callback prior to returning. + Returns an individual index number from 0 to 'parties-1'. + + 'b'Reset the barrier to the initial state. + + Any threads currently waiting will get the BrokenBarrier exception + raised. + + 'u'Reset the barrier to the initial state. + + Any threads currently waiting will get the BrokenBarrier exception + raised. + + 'b'Place the barrier into a 'broken' state. + + Useful in case of error. Any currently waiting threads and threads + attempting to 'wait()' will have BrokenBarrierError raised. + + 'u'Place the barrier into a 'broken' state. + + Useful in case of error. Any currently waiting threads and threads + attempting to 'wait()' will have BrokenBarrierError raised. + + 'b'Return the number of threads required to trip the barrier.'u'Return the number of threads required to trip the barrier.'b'Return the number of threads currently waiting at the barrier.'u'Return the number of threads currently waiting at the barrier.'b'Return True if the barrier is in a broken state.'u'Return True if the barrier is in a broken state.'b'Thread-%d'u'Thread-%d'b'A class that represents a thread of control. + + This class can be safely subclassed in a limited fashion. There are two ways + to specify the activity: by passing a callable object to the constructor, or + by overriding the run() method in a subclass. + + 'u'A class that represents a thread of control. + + This class can be safely subclassed in a limited fashion. There are two ways + to specify the activity: by passing a callable object to the constructor, or + by overriding the run() method in a subclass. + + 'b'This constructor should always be called with keyword arguments. Arguments are: + + *group* should be None; reserved for future extension when a ThreadGroup + class is implemented. + + *target* is the callable object to be invoked by the run() + method. Defaults to None, meaning nothing is called. + + *name* is the thread name. By default, a unique name is constructed of + the form "Thread-N" where N is a small decimal number. + + *args* is the argument tuple for the target invocation. Defaults to (). + + *kwargs* is a dictionary of keyword arguments for the target + invocation. Defaults to {}. + + If a subclass overrides the constructor, it must make sure to invoke + the base class constructor (Thread.__init__()) before doing anything + else to the thread. + + 'u'This constructor should always be called with keyword arguments. Arguments are: + + *group* should be None; reserved for future extension when a ThreadGroup + class is implemented. + + *target* is the callable object to be invoked by the run() + method. Defaults to None, meaning nothing is called. + + *name* is the thread name. By default, a unique name is constructed of + the form "Thread-N" where N is a small decimal number. + + *args* is the argument tuple for the target invocation. Defaults to (). + + *kwargs* is a dictionary of keyword arguments for the target + invocation. Defaults to {}. + + If a subclass overrides the constructor, it must make sure to invoke + the base class constructor (Thread.__init__()) before doing anything + else to the thread. + + 'b'Thread.__init__() was not called'u'Thread.__init__() was not called'b' daemon'u' daemon'b' %s'u' %s'b'<%s(%s, %s)>'u'<%s(%s, %s)>'b'Start the thread's activity. + + It must be called at most once per thread object. It arranges for the + object's run() method to be invoked in a separate thread of control. + + This method will raise a RuntimeError if called more than once on the + same thread object. + + 'u'Start the thread's activity. + + It must be called at most once per thread object. It arranges for the + object's run() method to be invoked in a separate thread of control. + + This method will raise a RuntimeError if called more than once on the + same thread object. + + 'b'thread.__init__() not called'u'thread.__init__() not called'b'threads can only be started once'u'threads can only be started once'b'Method representing the thread's activity. + + You may override this method in a subclass. The standard run() method + invokes the callable object passed to the object's constructor as the + target argument, if any, with sequential and keyword arguments taken + from the args and kwargs arguments, respectively. + + 'u'Method representing the thread's activity. + + You may override this method in a subclass. The standard run() method + invokes the callable object passed to the object's constructor as the + target argument, if any, with sequential and keyword arguments taken + from the args and kwargs arguments, respectively. + + 'b' + Set a lock object which will be released by the interpreter when + the underlying thread state (see pystate.h) gets deleted. + 'u' + Set a lock object which will be released by the interpreter when + the underlying thread state (see pystate.h) gets deleted. + 'b'Remove current thread from the dict of currently running threads.'u'Remove current thread from the dict of currently running threads.'b'Wait until the thread terminates. + + This blocks the calling thread until the thread whose join() method is + called terminates -- either normally or through an unhandled exception + or until the optional timeout occurs. + + When the timeout argument is present and not None, it should be a + floating point number specifying a timeout for the operation in seconds + (or fractions thereof). As join() always returns None, you must call + is_alive() after join() to decide whether a timeout happened -- if the + thread is still alive, the join() call timed out. + + When the timeout argument is not present or None, the operation will + block until the thread terminates. + + A thread can be join()ed many times. + + join() raises a RuntimeError if an attempt is made to join the current + thread as that would cause a deadlock. It is also an error to join() a + thread before it has been started and attempts to do so raises the same + exception. + + 'u'Wait until the thread terminates. + + This blocks the calling thread until the thread whose join() method is + called terminates -- either normally or through an unhandled exception + or until the optional timeout occurs. + + When the timeout argument is present and not None, it should be a + floating point number specifying a timeout for the operation in seconds + (or fractions thereof). As join() always returns None, you must call + is_alive() after join() to decide whether a timeout happened -- if the + thread is still alive, the join() call timed out. + + When the timeout argument is not present or None, the operation will + block until the thread terminates. + + A thread can be join()ed many times. + + join() raises a RuntimeError if an attempt is made to join the current + thread as that would cause a deadlock. It is also an error to join() a + thread before it has been started and attempts to do so raises the same + exception. + + 'b'Thread.__init__() not called'u'Thread.__init__() not called'b'cannot join thread before it is started'u'cannot join thread before it is started'b'cannot join current thread'u'cannot join current thread'b'A string used for identification purposes only. + + It has no semantics. Multiple threads may be given the same name. The + initial name is set by the constructor. + + 'u'A string used for identification purposes only. + + It has no semantics. Multiple threads may be given the same name. The + initial name is set by the constructor. + + 'b'Thread identifier of this thread or None if it has not been started. + + This is a nonzero integer. See the get_ident() function. Thread + identifiers may be recycled when a thread exits and another thread is + created. The identifier is available even after the thread has exited. + + 'u'Thread identifier of this thread or None if it has not been started. + + This is a nonzero integer. See the get_ident() function. Thread + identifiers may be recycled when a thread exits and another thread is + created. The identifier is available even after the thread has exited. + + 'b'Native integral thread ID of this thread, or None if it has not been started. + + This is a non-negative integer. See the get_native_id() function. + This represents the Thread ID as reported by the kernel. + + 'u'Native integral thread ID of this thread, or None if it has not been started. + + This is a non-negative integer. See the get_native_id() function. + This represents the Thread ID as reported by the kernel. + + 'b'Return whether the thread is alive. + + This method returns True just before the run() method starts until just + after the run() method terminates. The module function enumerate() + returns a list of all alive threads. + + 'u'Return whether the thread is alive. + + This method returns True just before the run() method starts until just + after the run() method terminates. The module function enumerate() + returns a list of all alive threads. + + 'b'Return whether the thread is alive. + + This method is deprecated, use is_alive() instead. + 'u'Return whether the thread is alive. + + This method is deprecated, use is_alive() instead. + 'b'isAlive() is deprecated, use is_alive() instead'u'isAlive() is deprecated, use is_alive() instead'b'A boolean value indicating whether this thread is a daemon thread. + + This must be set before start() is called, otherwise RuntimeError is + raised. Its initial value is inherited from the creating thread; the + main thread is not a daemon thread and therefore all threads created in + the main thread default to daemon = False. + + The entire Python program exits when only daemon threads are left. + + 'u'A boolean value indicating whether this thread is a daemon thread. + + This must be set before start() is called, otherwise RuntimeError is + raised. Its initial value is inherited from the creating thread; the + main thread is not a daemon thread and therefore all threads created in + the main thread default to daemon = False. + + The entire Python program exits when only daemon threads are left. + + 'b'cannot set daemon status of active thread'u'cannot set daemon status of active thread'b'exc_type exc_value exc_traceback thread'u'exc_type exc_value exc_traceback thread'b' + Handle uncaught Thread.run() exception. + 'u' + Handle uncaught Thread.run() exception. + 'b'Exception in thread 'u'Exception in thread 'b'threading.excepthook is None'u'threading.excepthook is None'b'sys.excepthook is None'u'sys.excepthook is None'b'Exception in threading.excepthook:'u'Exception in threading.excepthook:'b'Call a function after a specified number of seconds: + + t = Timer(30.0, f, args=None, kwargs=None) + t.start() + t.cancel() # stop the timer's action if it's still waiting + + 'u'Call a function after a specified number of seconds: + + t = Timer(30.0, f, args=None, kwargs=None) + t.start() + t.cancel() # stop the timer's action if it's still waiting + + 'b'Stop the timer if it hasn't finished yet.'u'Stop the timer if it hasn't finished yet.'b'MainThread'u'MainThread'b'Dummy-%d'u'Dummy-%d'b'cannot join a dummy thread'u'cannot join a dummy thread'b'Return the current Thread object, corresponding to the caller's thread of control. + + If the caller's thread of control was not created through the threading + module, a dummy thread object with limited functionality is returned. + + 'u'Return the current Thread object, corresponding to the caller's thread of control. + + If the caller's thread of control was not created through the threading + module, a dummy thread object with limited functionality is returned. + + 'b'Return the number of Thread objects currently alive. + + The returned count is equal to the length of the list returned by + enumerate(). + + 'u'Return the number of Thread objects currently alive. + + The returned count is equal to the length of the list returned by + enumerate(). + + 'b'Return a list of all Thread objects currently alive. + + The list includes daemonic threads, dummy thread objects created by + current_thread(), and the main thread. It excludes terminated threads and + threads that have not yet been started. + + 'u'Return a list of all Thread objects currently alive. + + The list includes daemonic threads, dummy thread objects created by + current_thread(), and the main thread. It excludes terminated threads and + threads that have not yet been started. + + 'b' + Wait until the Python thread state of all non-daemon threads get deleted. + 'u' + Wait until the Python thread state of all non-daemon threads get deleted. + 'b'Return the main thread object. + + In normal conditions, the main thread is the thread from which the + Python interpreter was started. + 'u'Return the main thread object. + + In normal conditions, the main thread is the thread from which the + Python interpreter was started. + 'b' + Cleanup threading module state that should not exist after a fork. + 'u' + Cleanup threading module state that should not exist after a fork. + 'u'threading'CLOCK_MONOTONICCLOCK_MONOTONIC_RAWCLOCK_PROCESS_CPUTIME_IDCLOCK_REALTIMECLOCK_THREAD_CPUTIME_IDCLOCK_UPTIME_RAW_STRUCT_TM_ITEMSu'This module provides various functions to manipulate time values. + +There are two standard representations of time. One is the number +of seconds since the Epoch, in UTC (a.k.a. GMT). It may be an integer +or a floating point number (to represent fractions of seconds). +The Epoch is system-defined; on Unix, it is generally January 1st, 1970. +The actual value can be retrieved by calling gmtime(0). + +The other representation is a tuple of 9 integers giving local time. +The tuple items are: + year (including century, e.g. 1998) + month (1-12) + day (1-31) + hours (0-23) + minutes (0-59) + seconds (0-59) + weekday (0-6, Monday is 0) + Julian day (day in the year, 1-366) + DST (Daylight Savings Time) flag (-1, 0 or 1) +If the DST flag is 0, the time is given in the regular time zone; +if it is 1, the time is given in the DST time zone; +if it is -1, mktime() should guess based on the date and time. +'-7200altzoneclock_getresclock_gettimeclock_gettime_nsclock_settimeclock_settime_nsdaylightmonotonic_nsperf_counter_nsprocess_timeprocess_time_nsu'The time value as returned by gmtime(), localtime(), and strptime(), and + accepted by asctime(), mktime() and strftime(). May be considered as a + sequence of 9 integers. + + Note that several fields' values are not the same as those defined by + the C language standard for struct tm. For example, the value of the + field tm_year is the actual year, not year - 1900. See individual + fields' descriptions for details.'tm_hourtm_isdsttm_mdaytm_mintm_montm_sectm_wdaytm_ydaytm_yeartime.struct_timethread_timethread_time_nstime_ns-3600u'CET'u'CEST'Token constants (from "token.h").LSQBRSQBSEMIMINUSSLASHVBARAMPERLESSGREATERPERCENTBACKQUOTEEQEQUALNOTEQUALLESSEQUALGREATEREQUALTILDECIRCUMFLEXLEFTSHIFTRIGHTSHIFTDOUBLESTARPLUSEQUALMINEQUALSTAREQUALSLASHEQUALPERCENTEQUALAMPEREQUALVBAREQUALCIRCUMFLEXEQUALLEFTSHIFTEQUALRIGHTSHIFTEQUALDOUBLESTAREQUALDOUBLESLASHDOUBLESLASHEQUALATEQUALRARROWAWAITASYNCCOLONEQUALN_TOKENSNT_OFFSETISTERMINALISNONTERMINALISEOF# Taken from Python (r53757) and modified to include some tokens# originally monkeypatched in by pgen2.tokenize#--start constants--#--end constants--b'Token constants (from "token.h").'u'Token constants (from "token.h").'u'lib2to3.pgen2.token'u'pgen2.token'u'token'Token constants.TYPE_IGNORETYPE_COMMENT:=@=EXACT_TOKEN_TYPES# Auto-generated by Tools/scripts/generate_token.py# These aren't used by the C tokenizer but are needed for tokenize.py# Special definitions for cooperation with parserb'Token constants.'u'Token constants.'b'tok_name'u'tok_name'b'ISTERMINAL'u'ISTERMINAL'b'ISNONTERMINAL'u'ISNONTERMINAL'b'ISEOF'u'ISEOF'b':='u':='b'@='u'@='Tokenization help for Python programs. + +tokenize(readline) is a generator that breaks a stream of bytes into +Python tokens. It decodes the bytes according to PEP-0263 for +determining source file encoding. + +It accepts a readline-like method which is called repeatedly to get the +next line of input (or b"" for EOF). It generates 5-tuples with these +members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators. Additionally, all token lists start with an ENCODING token +which tells you which encoding was used to decode the bytes stream. +GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro, Raymond Hettinger, Trent Nelson, Michael Foord'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ''Skip Montanaro, Raymond Hettinger, Trent Nelson, ''Michael Foord'_itertools^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)cookie_re^[ \t\f]*(?:[#\r\n]|$)blank_reuntokenizeTokenInfotype string start end line%d (%s)annotated_typeTokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)exact_type[ \f\t]*Whitespace#[^\r\n]*\\\r?\nIgnore\w+0[xX](?:_?[0-9a-fA-F])+Hexnumber0[bB](?:_?[01])+Binnumber0[oO](?:_?[0-7])+Octnumber(?:0(?:_?0)*|[1-9](?:_?[0-9])*)DecnumberIntnumber[eE][-+]?[0-9](?:_?[0-9])*Exponent[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?\.[0-9](?:_?[0-9])*Pointfloat[0-9](?:_?[0-9])*ExpfloatFloatnumber[0-9](?:_?[0-9])*[jJ][jJ]Imagnumber_all_string_prefixes_valid_string_prefixesStringPrefix[^'\\]*(?:\\.[^'\\]*)*'Single[^"\\]*(?:\\.[^"\\]*)*"Double[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''Single3[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""Double3Triple'[^\n'\\]*(?:\\.[^\n'\\]*)*'"[^\n"\\]*(?:\\.[^\n"\\]*)*"Special\r?\nFunnyPlainToken'[^\n'\\]*(?:\\.[^\n'\\]*)*"[^\n"\\]*(?:\\.[^\n"\\]*)*ContStr\\\r?\n|\ZPseudoExtrasPseudoTokenendpatssingle_quotedtriple_quotedTokenErrorStopTokenizingUntokenizerprev_rowprev_coladd_whitespacestart ({},{}) precedes previous end ({},{})row_offsetstartlinecompattok_typetoks_appendprevstringtoknumtokvalTransform tokens back into Python source code. + It returns a bytes object, encoded using the ENCODING + token, which is the first token sequence output by tokenize. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited input: + # Output bytes will tokenize back to the input + t1 = [tok[:2] for tok in tokenize(f.readline)] + newcode = untokenize(t1) + readline = BytesIO(newcode).readline + t2 = [tok[:2] for tok in tokenize(readline)] + assert t1 == t2 + ut_get_normal_nameorig_encImitates get_normal_name in tokenizer.c.utf-8-iso-latin-1latin-1-iso-8859-1-iso-latin-1- + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + bom_foundread_or_stopfind_cookieline_stringinvalid or missing encoding declaration{} for {!r}unknown encoding: unknown encoding for {!r}: {}encoding problem: utf-8encoding problem for {!r}: utf-8-sigutf-8-sigOpen a file in read only mode using the encoding detected by + detect_encoding(). + + The tokenize() generator requires one argument, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as bytes. Alternatively, readline + can be a callable function terminating with StopIteration: + readline = open(myfile, 'rb').__next__ # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + physical line. + + The first token sequence will always be an ENCODING token + which tells you which encoding was used to decode the bytes stream. + rl_genparenlevcontinuednumcharscontstrneedcontcontlineEOF in multi-line stringstrstartendprogendmatch\ +# +comment_tokenunindent does not match any outer indentation levelEOF in multi-line statementpseudomatchsposepos([{)]}Tokenize a source reading Python code as unicode strings. + + This has the same API as tokenize(), except that it expects the *readline* + callable to return str objects instead of bytes. + perror%s:%d:%d: error: %s%s: error: %serror: %spython -m tokenizefilename.pythe file to tokenize; defaults to stdin--exactdisplay token names using the exact typetoken_type%d,%d-%d,%d:token_range%-20s%-15s%-15rinterrupted +unexpected error: %s# Note: we use unicode matching for names ("\w") but ascii matching for# number literals.# Return the empty string, plus all of the valid string prefixes.# The valid string prefixes. Only contain the lower case versions,# and don't contain any permutations (include 'fr', but not# 'rf'). The various permutations will be generated.# if we add binary f-strings, add: ['fb', 'fbr']# create a list with upper and lower versions of each# character# Note that since _all_string_prefixes includes the empty string,# StringPrefix can be the empty string (making it optional).# Tail end of ' string.# Tail end of " string.# Tail end of ''' string.# Tail end of """ string.# Single-line ' or " string.# Sorting in reverse order puts the long operators before their prefixes.# Otherwise if = came before ==, == would get recognized as two instances# of =.# First (or only) line of ' or " string.# For a given string prefix plus quotes, endpats maps it to a regex# to match the remainder of that string. _prefix can be empty, for# a normal single or triple quoted string (with no prefix).# A set of all of the single and triple quoted string prefixes,# including the opening quotes.# Insert a space between two consecutive strings# Only care about the first 12 characters.# Decode as UTF-8. Either the line is an encoding declaration,# in which case it should be pure ASCII, or it must be UTF-8# per default encoding.# This behaviour mimics the Python interpreter# BOM will already have been stripped.# loop over lines in stream# We capture the value of the line variable here because# readline uses the empty string '' to signal end of input,# hence `line` itself will always be overwritten at the end# of this loop.# continued string# new statement# measure leading whitespace# skip comments or blank lines# count indents or dedents# continued statement# scan for tokens# ordinary number# all on one line# multiple lines# Check up to the first 3 chars of the token to see if# they're in the single_quoted set. If so, they start# a string.# We're using the first 3, because we're looking for# "rb'" (for example) at the start of the token. If# we switch to longer prefixes, this needs to be# adjusted.# Note that initial == token[:1].# Also note that single quote checking must come after# triple quote checking (above).# Again, using the first 3 chars of the# token. This is looking for the matching end# regex for the correct type of quote# character. So it's really looking for# endpats["'"] or endpats['"'], by trying to# skip string prefix characters, if any.# ordinary string# ordinary name# continued stmt# Add an implicit NEWLINE if the input doesn't end in one# pop remaining indent levels# Helper error handling routines# Parse the arguments and options# Tokenize the input# Output the tokenizationb'Tokenization help for Python programs. + +tokenize(readline) is a generator that breaks a stream of bytes into +Python tokens. It decodes the bytes according to PEP-0263 for +determining source file encoding. + +It accepts a readline-like method which is called repeatedly to get the +next line of input (or b"" for EOF). It generates 5-tuples with these +members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators. Additionally, all token lists start with an ENCODING token +which tells you which encoding was used to decode the bytes stream. +'u'Tokenization help for Python programs. + +tokenize(readline) is a generator that breaks a stream of bytes into +Python tokens. It decodes the bytes according to PEP-0263 for +determining source file encoding. + +It accepts a readline-like method which is called repeatedly to get the +next line of input (or b"" for EOF). It generates 5-tuples with these +members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators. Additionally, all token lists start with an ENCODING token +which tells you which encoding was used to decode the bytes stream. +'b'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro, Raymond Hettinger, Trent Nelson, Michael Foord'u'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro, Raymond Hettinger, Trent Nelson, Michael Foord'b'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)'u'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)'b'^[ \t\f]*(?:[#\r\n]|$)'b'tokenize'u'tokenize'b'generate_tokens'u'generate_tokens'b'detect_encoding'u'detect_encoding'b'untokenize'u'untokenize'b'TokenInfo'u'TokenInfo'b'type string start end line'u'type string start end line'b'%d (%s)'u'%d (%s)'b'TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)'u'TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)'b'[ \f\t]*'u'[ \f\t]*'b'#[^\r\n]*'u'#[^\r\n]*'b'\\\r?\n'u'\\\r?\n'b'\w+'u'\w+'b'0[xX](?:_?[0-9a-fA-F])+'u'0[xX](?:_?[0-9a-fA-F])+'b'0[bB](?:_?[01])+'u'0[bB](?:_?[01])+'b'0[oO](?:_?[0-7])+'u'0[oO](?:_?[0-7])+'b'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'u'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'b'[eE][-+]?[0-9](?:_?[0-9])*'u'[eE][-+]?[0-9](?:_?[0-9])*'b'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?'u'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?'b'\.[0-9](?:_?[0-9])*'u'\.[0-9](?:_?[0-9])*'b'[0-9](?:_?[0-9])*'u'[0-9](?:_?[0-9])*'b'[0-9](?:_?[0-9])*[jJ]'u'[0-9](?:_?[0-9])*[jJ]'b'[jJ]'u'[jJ]'b'[^'\\]*(?:\\.[^'\\]*)*''u'[^'\\]*(?:\\.[^'\\]*)*''b'[^"\\]*(?:\\.[^"\\]*)*"'u'[^"\\]*(?:\\.[^"\\]*)*"'b'[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*''''u'[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*''''b'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'u'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'b''[^\n'\\]*(?:\\.[^\n'\\]*)*''u''[^\n'\\]*(?:\\.[^\n'\\]*)*''b'"[^\n"\\]*(?:\\.[^\n"\\]*)*"'u'"[^\n"\\]*(?:\\.[^\n"\\]*)*"'b'\r?\n'u'\r?\n'b''[^\n'\\]*(?:\\.[^\n'\\]*)*'u''[^\n'\\]*(?:\\.[^\n'\\]*)*'b'"[^\n"\\]*(?:\\.[^\n"\\]*)*'u'"[^\n"\\]*(?:\\.[^\n"\\]*)*'b'\\\r?\n|\Z'u'\\\r?\n|\Z'b'start ({},{}) precedes previous end ({},{})'u'start ({},{}) precedes previous end ({},{})'b'Transform tokens back into Python source code. + It returns a bytes object, encoded using the ENCODING + token, which is the first token sequence output by tokenize. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited input: + # Output bytes will tokenize back to the input + t1 = [tok[:2] for tok in tokenize(f.readline)] + newcode = untokenize(t1) + readline = BytesIO(newcode).readline + t2 = [tok[:2] for tok in tokenize(readline)] + assert t1 == t2 + 'u'Transform tokens back into Python source code. + It returns a bytes object, encoded using the ENCODING + token, which is the first token sequence output by tokenize. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited input: + # Output bytes will tokenize back to the input + t1 = [tok[:2] for tok in tokenize(f.readline)] + newcode = untokenize(t1) + readline = BytesIO(newcode).readline + t2 = [tok[:2] for tok in tokenize(readline)] + assert t1 == t2 + 'b'Imitates get_normal_name in tokenizer.c.'u'Imitates get_normal_name in tokenizer.c.'b'utf-8-'u'utf-8-'b'iso-latin-1'u'iso-latin-1'b'latin-1-'u'latin-1-'b'iso-8859-1-'u'iso-8859-1-'b'iso-latin-1-'u'iso-latin-1-'b' + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + 'u' + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + 'b'invalid or missing encoding declaration'u'invalid or missing encoding declaration'b'{} for {!r}'u'{} for {!r}'b'unknown encoding: 'u'unknown encoding: 'b'unknown encoding for {!r}: {}'u'unknown encoding for {!r}: {}'b'encoding problem: utf-8'u'encoding problem: utf-8'b'encoding problem for {!r}: utf-8'u'encoding problem for {!r}: utf-8'b'-sig'u'-sig'b'utf-8-sig'u'utf-8-sig'b'Open a file in read only mode using the encoding detected by + detect_encoding(). + 'u'Open a file in read only mode using the encoding detected by + detect_encoding(). + 'b' + The tokenize() generator requires one argument, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as bytes. Alternatively, readline + can be a callable function terminating with StopIteration: + readline = open(myfile, 'rb').__next__ # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + physical line. + + The first token sequence will always be an ENCODING token + which tells you which encoding was used to decode the bytes stream. + 'u' + The tokenize() generator requires one argument, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as bytes. Alternatively, readline + can be a callable function terminating with StopIteration: + readline = open(myfile, 'rb').__next__ # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + physical line. + + The first token sequence will always be an ENCODING token + which tells you which encoding was used to decode the bytes stream. + 'b'EOF in multi-line string'u'EOF in multi-line string'b'\ +'u'\ +'b'# +'u'# +'b'unindent does not match any outer indentation level'u'unindent does not match any outer indentation level'b''u''b'EOF in multi-line statement'u'EOF in multi-line statement'b'([{'u'([{'b')]}'u')]}'b'Tokenize a source reading Python code as unicode strings. + + This has the same API as tokenize(), except that it expects the *readline* + callable to return str objects instead of bytes. + 'u'Tokenize a source reading Python code as unicode strings. + + This has the same API as tokenize(), except that it expects the *readline* + callable to return str objects instead of bytes. + 'b'%s:%d:%d: error: %s'u'%s:%d:%d: error: %s'b'%s: error: %s'u'%s: error: %s'b'error: %s'u'error: %s'b'python -m tokenize'u'python -m tokenize'b'filename.py'u'filename.py'b'the file to tokenize; defaults to stdin'u'the file to tokenize; defaults to stdin'b'--exact'u'--exact'b'exact'u'exact'b'display token names using the exact type'u'display token names using the exact type'b'%d,%d-%d,%d:'u'%d,%d-%d,%d:'b'%-20s%-15s%-15r'u'%-20s%-15s%-15r'b'interrupted +'u'interrupted +'b'unexpected error: %s'u'unexpected error: %s'Tokenization help for Python programs. + +generate_tokens(readline) is a generator that breaks a stream of +text into Python tokens. It accepts a readline-like method which is called +repeatedly to get the next line of input (or "" for EOF). It generates +5-tuples with these members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators + +Older entry points + tokenize_loop(readline, tokeneater) + tokenize(readline, tokeneater=printtoken) +are the same, except instead of generating tokens, tokeneater is a callback +function to which the 5 fields described above are passed as 5 arguments, +each time a new token is found.GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanarolib2to3.pgen2.token_combinations0[bB]_?[01]+(?:_[01]+)*0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?[1-9]\d*(?:_\d+)*[lL]?0[lL]?[eE][-+]?\d+(?:_\d+)*\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?\.\d+(?:_\d+)*\d+(?:_\d+)*\d+(?:_\d+)*[jJ](?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?_litprefix\*\*=?>>=?<<=?//=?[+\-*/%&@|^=<>]=?Operator[][(){}]Bracket[:;.,`@]tokenprogpseudoprogsingle3progdouble3proguRUrURendprogsprinttokenxxx_todo_changemexxx_todo_changeme1srowscolerowecol%d,%d-%d,%d: %s %s + The tokenize() function accepts two parameters: one representing the + input stream, and one providing an output mechanism for tokenize(). + + The first parameter, readline, must be a callable object which provides + the same interface as the readline() method of built-in file objects. + Each call to the function should return one line of input as a string. + + The second parameter, tokeneater, must also be a callable object. It is + called once for each token, with five arguments, corresponding to the + tuples generated by generate_tokens(). + tokenize_looptoken_info + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read + in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, but + disagree, a SyntaxError will be raised. If the encoding cookie is an invalid + charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + Transform tokens back into Python source code. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited input: + # Output text will tokenize the back to the input + t1 = [tok[:2] for tok in generate_tokens(f.readline)] + newcode = untokenize(t1) + readline = iter(newcode.splitlines(1)).next + t2 = [tok[:2] for tokin generate_tokens(readline)] + assert t1 == t2 + + The generate_tokens() generator requires one argument, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as a string. Alternately, readline + can be a callable function terminating with StopIteration: + readline = open(myfile).next # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + physical line. + stashedasync_defasync_def_indentasync_def_nlnl_pos# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.# Support bytes type in Python <= 2.5, so 2to3 turns itself into# valid Python 3 code.# Because of leftmost-then-longest match semantics, be sure to put the# longest operators first (e.g., if = came before ==, == would get# recognized as two instances of =).# backwards compatible interface# 'stashed' and 'async_*' are used for async/await parsing# This yield is new; needed for better idempotency:# testingb'Tokenization help for Python programs. + +generate_tokens(readline) is a generator that breaks a stream of +text into Python tokens. It accepts a readline-like method which is called +repeatedly to get the next line of input (or "" for EOF). It generates +5-tuples with these members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators + +Older entry points + tokenize_loop(readline, tokeneater) + tokenize(readline, tokeneater=printtoken) +are the same, except instead of generating tokens, tokeneater is a callback +function to which the 5 fields described above are passed as 5 arguments, +each time a new token is found.'u'Tokenization help for Python programs. + +generate_tokens(readline) is a generator that breaks a stream of +text into Python tokens. It accepts a readline-like method which is called +repeatedly to get the next line of input (or "" for EOF). It generates +5-tuples with these members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators + +Older entry points + tokenize_loop(readline, tokeneater) + tokenize(readline, tokeneater=printtoken) +are the same, except instead of generating tokens, tokeneater is a callback +function to which the 5 fields described above are passed as 5 arguments, +each time a new token is found.'b'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'u'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'b'0[bB]_?[01]+(?:_[01]+)*'u'0[bB]_?[01]+(?:_[01]+)*'b'0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?'u'0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?'b'0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?'u'0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?'b'[1-9]\d*(?:_\d+)*[lL]?'u'[1-9]\d*(?:_\d+)*[lL]?'b'0[lL]?'u'0[lL]?'b'[eE][-+]?\d+(?:_\d+)*'u'[eE][-+]?\d+(?:_\d+)*'b'\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?'u'\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?'b'\.\d+(?:_\d+)*'u'\.\d+(?:_\d+)*'b'\d+(?:_\d+)*'u'\d+(?:_\d+)*'b'\d+(?:_\d+)*[jJ]'u'\d+(?:_\d+)*[jJ]'b'(?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?'u'(?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?'b'\*\*=?'u'\*\*=?'b'>>=?'u'>>=?'b'<<=?'u'<<=?'b'//=?'u'//=?'b'[+\-*/%&@|^=<>]=?'u'[+\-*/%&@|^=<>]=?'b'[][(){}]'u'[][(){}]'b'[:;.,`@]'u'[:;.,`@]'u'R'b'uR'u'uR'b'Ur'u'Ur'b'UR'u'UR'b'%d,%d-%d,%d: %s %s'u'%d,%d-%d,%d: %s %s'b' + The tokenize() function accepts two parameters: one representing the + input stream, and one providing an output mechanism for tokenize(). + + The first parameter, readline, must be a callable object which provides + the same interface as the readline() method of built-in file objects. + Each call to the function should return one line of input as a string. + + The second parameter, tokeneater, must also be a callable object. It is + called once for each token, with five arguments, corresponding to the + tuples generated by generate_tokens(). + 'u' + The tokenize() function accepts two parameters: one representing the + input stream, and one providing an output mechanism for tokenize(). + + The first parameter, readline, must be a callable object which provides + the same interface as the readline() method of built-in file objects. + Each call to the function should return one line of input as a string. + + The second parameter, tokeneater, must also be a callable object. It is + called once for each token, with five arguments, corresponding to the + tuples generated by generate_tokens(). + 'b' + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read + in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, but + disagree, a SyntaxError will be raised. If the encoding cookie is an invalid + charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + 'u' + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argument, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read + in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, but + disagree, a SyntaxError will be raised. If the encoding cookie is an invalid + charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + 'b'Transform tokens back into Python source code. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited input: + # Output text will tokenize the back to the input + t1 = [tok[:2] for tok in generate_tokens(f.readline)] + newcode = untokenize(t1) + readline = iter(newcode.splitlines(1)).next + t2 = [tok[:2] for tokin generate_tokens(readline)] + assert t1 == t2 + 'u'Transform tokens back into Python source code. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited input: + # Output text will tokenize the back to the input + t1 = [tok[:2] for tok in generate_tokens(f.readline)] + newcode = untokenize(t1) + readline = iter(newcode.splitlines(1)).next + t2 = [tok[:2] for tokin generate_tokens(readline)] + assert t1 == t2 + 'b' + The generate_tokens() generator requires one argument, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as a string. Alternately, readline + can be a callable function terminating with StopIteration: + readline = open(myfile).next # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + physical line. + 'u' + The generate_tokens() generator requires one argument, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as a string. Alternately, readline + can be a callable function terminating with StopIteration: + readline = open(myfile).next # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + physical line. + 'u'lib2to3.pgen2.tokenize'u'pgen2.tokenize'Extract, format and print information about Python stack traces.extract_tbformat_stackformat_tbprint_lastprint_tbFrameSummarywalk_tbPrint the list of tuples as returned by extract_tb() or + extract_stack() as a formatted stack trace to the given file.from_listFormat a list of tuples or FrameSummary objects for printing. + + Given a list of tuples or FrameSummary objects as returned by + extract_tb() or extract_stack(), return a list of strings ready + for printing. + + Each string in the resulting list corresponds to the item with the + same index in the argument list. Each string ends in a newline; + the strings may contain internal newlines as well, for those items + whose source text line is not None. + Print up to 'limit' stack trace entries from the traceback 'tb'. + + If 'limit' is omitted or None, all entries are printed. If 'file' + is omitted or None, the output goes to sys.stderr; otherwise + 'file' should be an open file or file-like object with a write() + method. + A shorthand for 'format_list(extract_tb(tb, limit))'. + Return a StackSummary object representing a list of + pre-processed entries from traceback. + + This is useful for alternate formatting of stack traces. If + 'limit' is omitted or None, all entries are extracted. A + pre-processed stack trace entry is a FrameSummary object + containing attributes filename, lineno, name, and line + representing the information that is usually printed for a stack + trace. The line is a string with leading and trailing + whitespace stripped; if the source is not available it is None. + +The above exception was the direct cause of the following exception: + +"\nThe above exception was the direct cause ""of the following exception:\n\n"_cause_message +During handling of the above exception, another exception occurred: + +"\nDuring handling of the above exception, ""another exception occurred:\n\n"_context_messageetypePrint exception up to 'limit' stack trace entries from 'tb' to 'file'. + + This differs from print_tb() in the following ways: (1) if + traceback is not None, it prints a header "Traceback (most recent + call last):"; (2) it prints the exception type and value after the + stack trace; (3) if type is SyntaxError and value has the + appropriate format, it prints the line where the syntax error + occurred with a caret on the next line indicating the approximate + position of the error. + Format a stack trace and the exception information. + + The arguments have the same meaning as the corresponding arguments + to print_exception(). The return value is a list of strings, each + ending in a newline and some containing internal newlines. When + these lines are concatenated and printed, exactly the same text is + printed as does print_exception(). + Format the exception part of a traceback. + + The arguments are the exception type and value such as given by + sys.last_type and sys.last_value. The return value is a list of + strings, each ending in a newline. + + Normally, the list contains a single string; however, for + SyntaxError exceptions, it contains several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the list. + + _format_final_exc_line_some_strvaluestr%s: %s +Shorthand for 'print_exception(*sys.exc_info(), limit, file)'.Like print_exc() but return a string.This is a shorthand for 'print_exception(sys.last_type, + sys.last_value, sys.last_traceback, limit, file)'.no last exceptionPrint a stack trace from its invocation point. + + The optional 'f' argument can be used to specify an alternate + stack frame at which to start. The optional 'limit' and 'file' + arguments have the same meaning as for print_exception(). + Shorthand for 'format_list(extract_stack(f, limit))'.Extract the raw traceback from the current stack frame. + + The return value has the same format as for extract_tb(). The + optional 'f' and 'limit' arguments have the same meaning as for + print_stack(). Each item in the list is a quadruple (filename, + line number, function name, text), and the entries are in order + from oldest to newest stack frame. + Clear all references to local variables in the frames of a traceback.A single frame from a traceback. + + - :attr:`filename` The filename for the frame. + - :attr:`lineno` The line within filename for the frame that was + active when the frame was captured. + - :attr:`name` The name of the function or method that was executing + when the frame was captured. + - :attr:`line` The text from the linecache module for the + of code that was running when the frame was captured. + - :attr:`locals` Either None if locals were not supplied, or a dict + mapping the name to the repr() of the variable. + _linelookup_lineConstruct a FrameSummary. + + :param lookup_line: If True, `linecache` is consulted for the source + code line. Otherwise, the line will be looked up when first needed. + :param locals: If supplied the frame locals, which will be captured as + object representations. + :param line: If provided, use this instead of looking up the line in + the linecache. + Walk a stack yielding the frame and line number for each frame. + + This will follow f.f_back from the given frame. If no frame is given, the + current stack is used. Usually used with StackSummary.extract. + Walk a traceback yielding the frame and line number for each frame. + + This will follow tb.tb_next (and thus is in the opposite order to + walk_stack). Usually used with StackSummary.extract. + _RECURSIVE_CUTOFFA stack of frames.frame_genCreate a StackSummary from a traceback or stack object. + + :param frame_gen: A generator that yields (frame, lineno) tuples to + include in the stack. + :param limit: None to include all frames or the number of frames to + include. + :param lookup_lines: If True, lookup lines for each frame immediately, + otherwise lookup is deferred until the frame is rendered. + :param capture_locals: If True, the local variables from each frame will + be captured as object representations into the FrameSummary. + tracebacklimitfnamesa_list + Create a StackSummary object from a supplied list of + FrameSummary objects or old-style list of tuples. + Format the stack ready for printing. + + Returns a list of strings ready for printing. Each string in the + resulting list corresponds to a single frame from the stack. + Each string ends in a newline; the strings may contain internal + newlines as well, for those items with source text lines. + + For long sequences of the same frame and line, the first few + repetitions are shown, followed by a summary line stating the exact + number of further repetitions. + last_filelast_name [Previous line repeated more time' more ''time'] + File "{}", line {}, in {} + {} + {name} = {value} +An exception ready for rendering. + + The traceback module captures enough attributes from the original exception + to this intermediary form to ensure that no references are held, while + still being able to fully print or format it. + + Use `from_exception` to create TracebackException instances from exception + objects, or the constructor to create TracebackException instances from + individual components. + + - :attr:`__cause__` A TracebackException of the original *__cause__*. + - :attr:`__context__` A TracebackException of the original *__context__*. + - :attr:`__suppress_context__` The *__suppress_context__* value from the + original exception. + - :attr:`stack` A `StackSummary` representing the traceback. + - :attr:`exc_type` The class of the original traceback. + - :attr:`filename` For syntax errors - the filename where the error + occurred. + - :attr:`lineno` For syntax errors - the linenumber where the error + occurred. + - :attr:`text` For syntax errors - the text where the error + occurred. + - :attr:`offset` For syntax errors - the offset into the text where the + error occurred. + - :attr:`msg` For syntax errors - the compiler error message. + _seen_load_linesfrom_exceptionCreate a TracebackException from an exception.Private API. force all lines in the stack to be loaded.Format the exception part of the traceback. + + The return value is a generator of strings, each ending in a newline. + + Normally, the generator emits a single string; however, for + SyntaxError exceptions, it emits several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the output. + stypesmodfilename_suffix File "{}", line {} + ({})badlinecaretspace {}^ +{}: {}{} +Format the exception. + + If chain is not *True*, *__cause__* and *__context__* will not be formatted. + + The return value is a generator of strings, each ending in a newline and + some containing internal newlines. `print_exception` is a wrapper around + this method which just prints the lines to a file. + + The message indicating which exception occurred is always the last + string in the output. + Traceback (most recent call last): +# Formatting and printing lists of traceback lines.# Printing and Extracting Tracebacks.# Exception formatting and output.# format_exception has ignored etype for some time, and code such as cgitb# passes in bogus values as a result. For compatibility with such code we# ignore it here (rather than in the new TracebackException API).# -- not official API but folk probably use these two functions.# --# Printing and Extracting Stacks.# Ignore the exception raised if the frame is still executing.# Also hardcoded in traceback.c.# Must defer line lookups until we have called checkcache.# If immediate lookup was desired, trigger lookups now.# While doing a fast-path check for isinstance(a_list, StackSummary) is# appealing, idlelib.run.cleanup_traceback and other similar code may# break this by making arbitrary frames plain tuples, so we need to# check on a frame by frame basis.# NB: we need to accept exc_traceback, exc_value, exc_traceback to# permit backwards compat with the existing API, otherwise we# need stub thunk objects just to glue it together.# Handle loops in __cause__ or __context__.# Gracefully handle (the way Python 2.4 and earlier did) the case of# being called with no type or value (None, None, None).# TODO: locals.# Capture now to permit freeing resources: only complication is in the# unofficial API _format_final_exc_line# Handle SyntaxError's specially# It was a syntax error; show exactly where the problem was found.# non-space whitespace (likes tabs) must be kept for alignmentb'Extract, format and print information about Python stack traces.'u'Extract, format and print information about Python stack traces.'b'extract_stack'u'extract_stack'b'extract_tb'u'extract_tb'b'format_exception'u'format_exception'b'format_exception_only'u'format_exception_only'b'format_list'u'format_list'b'format_stack'u'format_stack'b'format_tb'u'format_tb'b'print_exc'u'print_exc'b'format_exc'u'format_exc'b'print_exception'u'print_exception'b'print_last'u'print_last'b'print_stack'u'print_stack'b'print_tb'u'print_tb'b'clear_frames'u'clear_frames'b'FrameSummary'u'FrameSummary'b'StackSummary'u'StackSummary'b'TracebackException'u'TracebackException'b'walk_stack'u'walk_stack'b'walk_tb'u'walk_tb'b'Print the list of tuples as returned by extract_tb() or + extract_stack() as a formatted stack trace to the given file.'u'Print the list of tuples as returned by extract_tb() or + extract_stack() as a formatted stack trace to the given file.'b'Format a list of tuples or FrameSummary objects for printing. + + Given a list of tuples or FrameSummary objects as returned by + extract_tb() or extract_stack(), return a list of strings ready + for printing. + + Each string in the resulting list corresponds to the item with the + same index in the argument list. Each string ends in a newline; + the strings may contain internal newlines as well, for those items + whose source text line is not None. + 'u'Format a list of tuples or FrameSummary objects for printing. + + Given a list of tuples or FrameSummary objects as returned by + extract_tb() or extract_stack(), return a list of strings ready + for printing. + + Each string in the resulting list corresponds to the item with the + same index in the argument list. Each string ends in a newline; + the strings may contain internal newlines as well, for those items + whose source text line is not None. + 'b'Print up to 'limit' stack trace entries from the traceback 'tb'. + + If 'limit' is omitted or None, all entries are printed. If 'file' + is omitted or None, the output goes to sys.stderr; otherwise + 'file' should be an open file or file-like object with a write() + method. + 'u'Print up to 'limit' stack trace entries from the traceback 'tb'. + + If 'limit' is omitted or None, all entries are printed. If 'file' + is omitted or None, the output goes to sys.stderr; otherwise + 'file' should be an open file or file-like object with a write() + method. + 'b'A shorthand for 'format_list(extract_tb(tb, limit))'.'u'A shorthand for 'format_list(extract_tb(tb, limit))'.'b' + Return a StackSummary object representing a list of + pre-processed entries from traceback. + + This is useful for alternate formatting of stack traces. If + 'limit' is omitted or None, all entries are extracted. A + pre-processed stack trace entry is a FrameSummary object + containing attributes filename, lineno, name, and line + representing the information that is usually printed for a stack + trace. The line is a string with leading and trailing + whitespace stripped; if the source is not available it is None. + 'u' + Return a StackSummary object representing a list of + pre-processed entries from traceback. + + This is useful for alternate formatting of stack traces. If + 'limit' is omitted or None, all entries are extracted. A + pre-processed stack trace entry is a FrameSummary object + containing attributes filename, lineno, name, and line + representing the information that is usually printed for a stack + trace. The line is a string with leading and trailing + whitespace stripped; if the source is not available it is None. + 'b' +The above exception was the direct cause of the following exception: + +'u' +The above exception was the direct cause of the following exception: + +'b' +During handling of the above exception, another exception occurred: + +'u' +During handling of the above exception, another exception occurred: + +'b'Print exception up to 'limit' stack trace entries from 'tb' to 'file'. + + This differs from print_tb() in the following ways: (1) if + traceback is not None, it prints a header "Traceback (most recent + call last):"; (2) it prints the exception type and value after the + stack trace; (3) if type is SyntaxError and value has the + appropriate format, it prints the line where the syntax error + occurred with a caret on the next line indicating the approximate + position of the error. + 'u'Print exception up to 'limit' stack trace entries from 'tb' to 'file'. + + This differs from print_tb() in the following ways: (1) if + traceback is not None, it prints a header "Traceback (most recent + call last):"; (2) it prints the exception type and value after the + stack trace; (3) if type is SyntaxError and value has the + appropriate format, it prints the line where the syntax error + occurred with a caret on the next line indicating the approximate + position of the error. + 'b'Format a stack trace and the exception information. + + The arguments have the same meaning as the corresponding arguments + to print_exception(). The return value is a list of strings, each + ending in a newline and some containing internal newlines. When + these lines are concatenated and printed, exactly the same text is + printed as does print_exception(). + 'u'Format a stack trace and the exception information. + + The arguments have the same meaning as the corresponding arguments + to print_exception(). The return value is a list of strings, each + ending in a newline and some containing internal newlines. When + these lines are concatenated and printed, exactly the same text is + printed as does print_exception(). + 'b'Format the exception part of a traceback. + + The arguments are the exception type and value such as given by + sys.last_type and sys.last_value. The return value is a list of + strings, each ending in a newline. + + Normally, the list contains a single string; however, for + SyntaxError exceptions, it contains several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the list. + + 'u'Format the exception part of a traceback. + + The arguments are the exception type and value such as given by + sys.last_type and sys.last_value. The return value is a list of + strings, each ending in a newline. + + Normally, the list contains a single string; however, for + SyntaxError exceptions, it contains several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the list. + + 'b'%s: %s +'u'%s: %s +'b''u''b'Shorthand for 'print_exception(*sys.exc_info(), limit, file)'.'u'Shorthand for 'print_exception(*sys.exc_info(), limit, file)'.'b'Like print_exc() but return a string.'u'Like print_exc() but return a string.'b'This is a shorthand for 'print_exception(sys.last_type, + sys.last_value, sys.last_traceback, limit, file)'.'u'This is a shorthand for 'print_exception(sys.last_type, + sys.last_value, sys.last_traceback, limit, file)'.'b'last_type'u'last_type'b'no last exception'u'no last exception'b'Print a stack trace from its invocation point. + + The optional 'f' argument can be used to specify an alternate + stack frame at which to start. The optional 'limit' and 'file' + arguments have the same meaning as for print_exception(). + 'u'Print a stack trace from its invocation point. + + The optional 'f' argument can be used to specify an alternate + stack frame at which to start. The optional 'limit' and 'file' + arguments have the same meaning as for print_exception(). + 'b'Shorthand for 'format_list(extract_stack(f, limit))'.'u'Shorthand for 'format_list(extract_stack(f, limit))'.'b'Extract the raw traceback from the current stack frame. + + The return value has the same format as for extract_tb(). The + optional 'f' and 'limit' arguments have the same meaning as for + print_stack(). Each item in the list is a quadruple (filename, + line number, function name, text), and the entries are in order + from oldest to newest stack frame. + 'u'Extract the raw traceback from the current stack frame. + + The return value has the same format as for extract_tb(). The + optional 'f' and 'limit' arguments have the same meaning as for + print_stack(). Each item in the list is a quadruple (filename, + line number, function name, text), and the entries are in order + from oldest to newest stack frame. + 'b'Clear all references to local variables in the frames of a traceback.'u'Clear all references to local variables in the frames of a traceback.'b'A single frame from a traceback. + + - :attr:`filename` The filename for the frame. + - :attr:`lineno` The line within filename for the frame that was + active when the frame was captured. + - :attr:`name` The name of the function or method that was executing + when the frame was captured. + - :attr:`line` The text from the linecache module for the + of code that was running when the frame was captured. + - :attr:`locals` Either None if locals were not supplied, or a dict + mapping the name to the repr() of the variable. + 'u'A single frame from a traceback. + + - :attr:`filename` The filename for the frame. + - :attr:`lineno` The line within filename for the frame that was + active when the frame was captured. + - :attr:`name` The name of the function or method that was executing + when the frame was captured. + - :attr:`line` The text from the linecache module for the + of code that was running when the frame was captured. + - :attr:`locals` Either None if locals were not supplied, or a dict + mapping the name to the repr() of the variable. + 'b'_line'u'_line'b'locals'u'locals'b'Construct a FrameSummary. + + :param lookup_line: If True, `linecache` is consulted for the source + code line. Otherwise, the line will be looked up when first needed. + :param locals: If supplied the frame locals, which will be captured as + object representations. + :param line: If provided, use this instead of looking up the line in + the linecache. + 'u'Construct a FrameSummary. + + :param lookup_line: If True, `linecache` is consulted for the source + code line. Otherwise, the line will be looked up when first needed. + :param locals: If supplied the frame locals, which will be captured as + object representations. + :param line: If provided, use this instead of looking up the line in + the linecache. + 'b''u''b'Walk a stack yielding the frame and line number for each frame. + + This will follow f.f_back from the given frame. If no frame is given, the + current stack is used. Usually used with StackSummary.extract. + 'u'Walk a stack yielding the frame and line number for each frame. + + This will follow f.f_back from the given frame. If no frame is given, the + current stack is used. Usually used with StackSummary.extract. + 'b'Walk a traceback yielding the frame and line number for each frame. + + This will follow tb.tb_next (and thus is in the opposite order to + walk_stack). Usually used with StackSummary.extract. + 'u'Walk a traceback yielding the frame and line number for each frame. + + This will follow tb.tb_next (and thus is in the opposite order to + walk_stack). Usually used with StackSummary.extract. + 'b'A stack of frames.'u'A stack of frames.'b'Create a StackSummary from a traceback or stack object. + + :param frame_gen: A generator that yields (frame, lineno) tuples to + include in the stack. + :param limit: None to include all frames or the number of frames to + include. + :param lookup_lines: If True, lookup lines for each frame immediately, + otherwise lookup is deferred until the frame is rendered. + :param capture_locals: If True, the local variables from each frame will + be captured as object representations into the FrameSummary. + 'u'Create a StackSummary from a traceback or stack object. + + :param frame_gen: A generator that yields (frame, lineno) tuples to + include in the stack. + :param limit: None to include all frames or the number of frames to + include. + :param lookup_lines: If True, lookup lines for each frame immediately, + otherwise lookup is deferred until the frame is rendered. + :param capture_locals: If True, the local variables from each frame will + be captured as object representations into the FrameSummary. + 'b'tracebacklimit'u'tracebacklimit'b' + Create a StackSummary object from a supplied list of + FrameSummary objects or old-style list of tuples. + 'u' + Create a StackSummary object from a supplied list of + FrameSummary objects or old-style list of tuples. + 'b'Format the stack ready for printing. + + Returns a list of strings ready for printing. Each string in the + resulting list corresponds to a single frame from the stack. + Each string ends in a newline; the strings may contain internal + newlines as well, for those items with source text lines. + + For long sequences of the same frame and line, the first few + repetitions are shown, followed by a summary line stating the exact + number of further repetitions. + 'u'Format the stack ready for printing. + + Returns a list of strings ready for printing. Each string in the + resulting list corresponds to a single frame from the stack. + Each string ends in a newline; the strings may contain internal + newlines as well, for those items with source text lines. + + For long sequences of the same frame and line, the first few + repetitions are shown, followed by a summary line stating the exact + number of further repetitions. + 'b' [Previous line repeated 'u' [Previous line repeated 'b' more time'u' more time'b'] +'u'] +'b' File "{}", line {}, in {} +'u' File "{}", line {}, in {} +'b' {} +'u' {} +'b' {name} = {value} +'u' {name} = {value} +'b'An exception ready for rendering. + + The traceback module captures enough attributes from the original exception + to this intermediary form to ensure that no references are held, while + still being able to fully print or format it. + + Use `from_exception` to create TracebackException instances from exception + objects, or the constructor to create TracebackException instances from + individual components. + + - :attr:`__cause__` A TracebackException of the original *__cause__*. + - :attr:`__context__` A TracebackException of the original *__context__*. + - :attr:`__suppress_context__` The *__suppress_context__* value from the + original exception. + - :attr:`stack` A `StackSummary` representing the traceback. + - :attr:`exc_type` The class of the original traceback. + - :attr:`filename` For syntax errors - the filename where the error + occurred. + - :attr:`lineno` For syntax errors - the linenumber where the error + occurred. + - :attr:`text` For syntax errors - the text where the error + occurred. + - :attr:`offset` For syntax errors - the offset into the text where the + error occurred. + - :attr:`msg` For syntax errors - the compiler error message. + 'u'An exception ready for rendering. + + The traceback module captures enough attributes from the original exception + to this intermediary form to ensure that no references are held, while + still being able to fully print or format it. + + Use `from_exception` to create TracebackException instances from exception + objects, or the constructor to create TracebackException instances from + individual components. + + - :attr:`__cause__` A TracebackException of the original *__cause__*. + - :attr:`__context__` A TracebackException of the original *__context__*. + - :attr:`__suppress_context__` The *__suppress_context__* value from the + original exception. + - :attr:`stack` A `StackSummary` representing the traceback. + - :attr:`exc_type` The class of the original traceback. + - :attr:`filename` For syntax errors - the filename where the error + occurred. + - :attr:`lineno` For syntax errors - the linenumber where the error + occurred. + - :attr:`text` For syntax errors - the text where the error + occurred. + - :attr:`offset` For syntax errors - the offset into the text where the + error occurred. + - :attr:`msg` For syntax errors - the compiler error message. + 'b'Create a TracebackException from an exception.'u'Create a TracebackException from an exception.'b'Private API. force all lines in the stack to be loaded.'u'Private API. force all lines in the stack to be loaded.'b'Format the exception part of the traceback. + + The return value is a generator of strings, each ending in a newline. + + Normally, the generator emits a single string; however, for + SyntaxError exceptions, it emits several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the output. + 'u'Format the exception part of the traceback. + + The return value is a generator of strings, each ending in a newline. + + Normally, the generator emits a single string; however, for + SyntaxError exceptions, it emits several lines that (when + printed) display detailed information about where the syntax + error occurred. + + The message indicating which exception occurred is always the last + string in the output. + 'b' File "{}", line {} +'u' File "{}", line {} +'b' ({})'u' ({})'b' {}^ +'u' {}^ +'b''u''b'{}: {}{} +'u'{}: {}{} +'b'Format the exception. + + If chain is not *True*, *__cause__* and *__context__* will not be formatted. + + The return value is a generator of strings, each ending in a newline and + some containing internal newlines. `print_exception` is a wrapper around + this method which just prints the lines to a file. + + The message indicating which exception occurred is always the last + string in the output. + 'u'Format the exception. + + If chain is not *True*, *__cause__* and *__context__* will not be formatted. + + The return value is a generator of strings, each ending in a newline and + some containing internal newlines. `print_exception` is a wrapper around + this method which just prints the lines to a file. + + The message indicating which exception occurred is always the last + string in the output. + 'b'Traceback (most recent call last): +'u'Traceback (most recent call last): +'u'traceback'_format_sizeunitKiBMiBGiBTiB%+.1f %s%.1f %s%+.0f %s%.0f %sStatistic + Statistic difference on memory allocations between two Snapshot instance. + %s: size=%s, count=%iaverage, average=%s_sort_keyStatisticDiff + Statistic difference on memory allocations between an old and a new + Snapshot instance. + size_diffcount_diff%s: size=%s (%s), count=%i (%+i)_compare_grouped_statsold_groupnew_groupstatistics + Frame of a traceback. + _frame + Sequence of Frame instances sorted from the oldest frame + to the most recent frame. + _framesmost_recent_firstframe_slice File "%s", line %s %sget_object_traceback + Get the traceback where the Python object *obj* was allocated. + Return a Traceback instance. + + Return None if the tracemalloc module is not tracing memory allocations or + did not trace the allocation of the object. + Trace + Trace of a memory block. + _trace_Tracestraces_traces_normalize_filenameBaseFilterinclusive_matchfilename_patternall_frames_filename_pattern_match_frame_impl_match_frame_match_tracebackDomainFilter_domainSnapshot + Snapshot of traces of memory blocks allocated by Python. + traceback_limit + Write the snapshot into a file. + + Load a snapshot from a file. + _filter_traceinclude_filtersexclude_filterstrace_filterfilter_traces + Create a new Snapshot instance with a filtered traces sequence, filters + is a list of Filter or DomainFilter instances. If filters is an empty + list, return a new Snapshot instance with a copy of the traces. + filters must be a list of filters, not %snew_traces_group_bykey_typecumulativeunknown key_type: %rcumulative mode cannot by used with key type %r"cumulative mode cannot by used ""with key type %r"tracebackstrace_traceback + Group statistics by key_type. Return a sorted list of Statistic + instances. + groupedcompare_toold_snapshot + Compute the differences with an old snapshot old_snapshot. Get + statistics as a sorted list of StatisticDiff instances, grouped by + group_by. + take_snapshot + Take a snapshot of traces of memory blocks allocated by Python. + the tracemalloc module must be tracing memory allocations to take a snapshot"the tracemalloc module must be tracing memory ""allocations to take a snapshot"# Import types and functions implemented in C# 3 digits (xx.x UNIT)# 4 or 5 digits (xxxx UNIT)# frame is a tuple: (filename: str, lineno: int)# frames is a tuple of frame tuples: see Frame constructor for the# format of a frame tuple; it is reversed, because _tracemalloc# returns frames sorted from most recent to oldest, but the# Python API expects oldest to most recent# trace is a tuple: (domain: int, size: int, traceback: tuple).# See Traceback constructor for the format of the traceback tuple.# traces is a tuple of trace tuples: see Trace constructor# traces is a tuple of trace tuples: see _Traces constructor for# the exact format# key_type == 'filename':# cumulative statisticsb'KiB'u'KiB'b'MiB'u'MiB'b'GiB'u'GiB'b'TiB'u'TiB'b'%+.1f %s'u'%+.1f %s'b'%.1f %s'u'%.1f %s'b'%+.0f %s'u'%+.0f %s'b'%.0f %s'u'%.0f %s'b' + Statistic difference on memory allocations between two Snapshot instance. + 'u' + Statistic difference on memory allocations between two Snapshot instance. + 'b'traceback'b'%s: size=%s, count=%i'u'%s: size=%s, count=%i'b', average=%s'u', average=%s'b''u''b' + Statistic difference on memory allocations between an old and a new + Snapshot instance. + 'u' + Statistic difference on memory allocations between an old and a new + Snapshot instance. + 'b'size_diff'u'size_diff'b'count_diff'u'count_diff'b'%s: size=%s (%s), count=%i (%+i)'u'%s: size=%s (%s), count=%i (%+i)'b''u''b' + Frame of a traceback. + 'u' + Frame of a traceback. + 'b'_frame'u'_frame'b''u''b' + Sequence of Frame instances sorted from the oldest frame + to the most recent frame. + 'u' + Sequence of Frame instances sorted from the oldest frame + to the most recent frame. + 'b'_frames'u'_frames'b''u''b' File "%s", line %s'u' File "%s", line %s'b' %s'u' %s'b' + Get the traceback where the Python object *obj* was allocated. + Return a Traceback instance. + + Return None if the tracemalloc module is not tracing memory allocations or + did not trace the allocation of the object. + 'u' + Get the traceback where the Python object *obj* was allocated. + Return a Traceback instance. + + Return None if the tracemalloc module is not tracing memory allocations or + did not trace the allocation of the object. + 'b' + Trace of a memory block. + 'u' + Trace of a memory block. + 'b'_trace'u'_trace'b''u''b''u''b' + Snapshot of traces of memory blocks allocated by Python. + 'u' + Snapshot of traces of memory blocks allocated by Python. + 'b' + Write the snapshot into a file. + 'u' + Write the snapshot into a file. + 'b' + Load a snapshot from a file. + 'u' + Load a snapshot from a file. + 'b' + Create a new Snapshot instance with a filtered traces sequence, filters + is a list of Filter or DomainFilter instances. If filters is an empty + list, return a new Snapshot instance with a copy of the traces. + 'u' + Create a new Snapshot instance with a filtered traces sequence, filters + is a list of Filter or DomainFilter instances. If filters is an empty + list, return a new Snapshot instance with a copy of the traces. + 'b'filters must be a list of filters, not %s'u'filters must be a list of filters, not %s'b'unknown key_type: %r'u'unknown key_type: %r'b'cumulative mode cannot by used with key type %r'u'cumulative mode cannot by used with key type %r'b' + Group statistics by key_type. Return a sorted list of Statistic + instances. + 'u' + Group statistics by key_type. Return a sorted list of Statistic + instances. + 'b' + Compute the differences with an old snapshot old_snapshot. Get + statistics as a sorted list of StatisticDiff instances, grouped by + group_by. + 'u' + Compute the differences with an old snapshot old_snapshot. Get + statistics as a sorted list of StatisticDiff instances, grouped by + group_by. + 'b' + Take a snapshot of traces of memory blocks allocated by Python. + 'u' + Take a snapshot of traces of memory blocks allocated by Python. + 'b'the tracemalloc module must be tracing memory allocations to take a snapshot'u'the tracemalloc module must be tracing memory allocations to take a snapshot'Abstract Transport class.DatagramTransportBase class for transports.Return True if the transport is closing or closed.Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) be + called with None as its argument. + Set a new protocol.Return the current protocol.Interface for read-only transports.Return True if the transport is receiving.Interface for write-only transports.list_of_dataWrite a list (or any iterable) of data bytes to the transport. + + The default implementation concatenates the arguments and + calls write() on the result. + Close the write end after flushing buffered data. + + (This is like typing ^D into a UNIX program reading from stdin.) + + Data may still be received. + Interface representing a bidirectional transport. + + There may be several implementations, but typically, the user does + not implement new transports; rather, the platform provides some + useful transports that are implemented using the platform's best + practices. + + The user never instantiates a transport directly; they call a + utility function, passing it a protocol factory and other + information necessary to create the transport and protocol. (E.g. + EventLoop.create_connection() or EventLoop.create_server().) + + The utility function will asynchronously create a transport and a + protocol and hook them up by calling the protocol's + connection_made() method, passing it the transport. + + The implementation here raises NotImplemented for every method + except writelines(), which calls write() in a loop. + Interface for datagram (UDP) transports.Send data to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + addr is target socket address. + If addr is None use target address pointed on transport creation. + Get subprocess id.Get subprocess returncode. + + See also + http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode + Get transport for pipe with number fd.Send signal to subprocess. + + See also: + docs.python.org/3/library/subprocess#subprocess.Popen.send_signal + Stop the subprocess. + + Alias for close() method. + + On Posix OSs the method sends SIGTERM to the subprocess. + On Windows the Win32 API function TerminateProcess() + is called to stop the subprocess. + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate + Kill the subprocess. + + On Posix OSs the function sends SIGKILL to the subprocess. + On Windows kill() is an alias for terminate(). + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.kill + All the logic for (write) flow control in a mix-in base class. + + The subclass must implement get_write_buffer_size(). It must call + _maybe_pause_protocol() whenever the write buffer size increases, + and _maybe_resume_protocol() whenever it decreases. It may also + override set_write_buffer_limits() (e.g. to specify different + defaults). + + The subclass constructor must call super().__init__(extra). This + will call set_write_buffer_limits(). + + The user may call set_write_buffer_limits() and + get_write_buffer_size(), and their protocol's pause_writing() and + resume_writing() may be called. + _high_water_low_water_set_write_buffer_limitsprotocol.pause_writing() failedprotocol.resume_writing() failedget_write_buffer_limitshigh () must be >= low () must be >= 0b'Abstract Transport class.'u'Abstract Transport class.'b'BaseTransport'u'BaseTransport'b'ReadTransport'u'ReadTransport'b'WriteTransport'u'WriteTransport'b'Transport'u'Transport'b'DatagramTransport'u'DatagramTransport'b'SubprocessTransport'u'SubprocessTransport'b'Base class for transports.'u'Base class for transports.'b'_extra'u'_extra'b'Return True if the transport is closing or closed.'u'Return True if the transport is closing or closed.'b'Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) be + called with None as its argument. + 'u'Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) be + called with None as its argument. + 'b'Set a new protocol.'u'Set a new protocol.'b'Return the current protocol.'u'Return the current protocol.'b'Interface for read-only transports.'u'Interface for read-only transports.'b'Return True if the transport is receiving.'u'Return True if the transport is receiving.'b'Interface for write-only transports.'u'Interface for write-only transports.'b'Write a list (or any iterable) of data bytes to the transport. + + The default implementation concatenates the arguments and + calls write() on the result. + 'u'Write a list (or any iterable) of data bytes to the transport. + + The default implementation concatenates the arguments and + calls write() on the result. + 'b'Close the write end after flushing buffered data. + + (This is like typing ^D into a UNIX program reading from stdin.) + + Data may still be received. + 'u'Close the write end after flushing buffered data. + + (This is like typing ^D into a UNIX program reading from stdin.) + + Data may still be received. + 'b'Interface representing a bidirectional transport. + + There may be several implementations, but typically, the user does + not implement new transports; rather, the platform provides some + useful transports that are implemented using the platform's best + practices. + + The user never instantiates a transport directly; they call a + utility function, passing it a protocol factory and other + information necessary to create the transport and protocol. (E.g. + EventLoop.create_connection() or EventLoop.create_server().) + + The utility function will asynchronously create a transport and a + protocol and hook them up by calling the protocol's + connection_made() method, passing it the transport. + + The implementation here raises NotImplemented for every method + except writelines(), which calls write() in a loop. + 'u'Interface representing a bidirectional transport. + + There may be several implementations, but typically, the user does + not implement new transports; rather, the platform provides some + useful transports that are implemented using the platform's best + practices. + + The user never instantiates a transport directly; they call a + utility function, passing it a protocol factory and other + information necessary to create the transport and protocol. (E.g. + EventLoop.create_connection() or EventLoop.create_server().) + + The utility function will asynchronously create a transport and a + protocol and hook them up by calling the protocol's + connection_made() method, passing it the transport. + + The implementation here raises NotImplemented for every method + except writelines(), which calls write() in a loop. + 'b'Interface for datagram (UDP) transports.'u'Interface for datagram (UDP) transports.'b'Send data to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + addr is target socket address. + If addr is None use target address pointed on transport creation. + 'u'Send data to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + addr is target socket address. + If addr is None use target address pointed on transport creation. + 'b'Get subprocess id.'u'Get subprocess id.'b'Get subprocess returncode. + + See also + http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode + 'u'Get subprocess returncode. + + See also + http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode + 'b'Get transport for pipe with number fd.'u'Get transport for pipe with number fd.'b'Send signal to subprocess. + + See also: + docs.python.org/3/library/subprocess#subprocess.Popen.send_signal + 'u'Send signal to subprocess. + + See also: + docs.python.org/3/library/subprocess#subprocess.Popen.send_signal + 'b'Stop the subprocess. + + Alias for close() method. + + On Posix OSs the method sends SIGTERM to the subprocess. + On Windows the Win32 API function TerminateProcess() + is called to stop the subprocess. + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate + 'u'Stop the subprocess. + + Alias for close() method. + + On Posix OSs the method sends SIGTERM to the subprocess. + On Windows the Win32 API function TerminateProcess() + is called to stop the subprocess. + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate + 'b'Kill the subprocess. + + On Posix OSs the function sends SIGKILL to the subprocess. + On Windows kill() is an alias for terminate(). + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.kill + 'u'Kill the subprocess. + + On Posix OSs the function sends SIGKILL to the subprocess. + On Windows kill() is an alias for terminate(). + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.kill + 'b'All the logic for (write) flow control in a mix-in base class. + + The subclass must implement get_write_buffer_size(). It must call + _maybe_pause_protocol() whenever the write buffer size increases, + and _maybe_resume_protocol() whenever it decreases. It may also + override set_write_buffer_limits() (e.g. to specify different + defaults). + + The subclass constructor must call super().__init__(extra). This + will call set_write_buffer_limits(). + + The user may call set_write_buffer_limits() and + get_write_buffer_size(), and their protocol's pause_writing() and + resume_writing() may be called. + 'u'All the logic for (write) flow control in a mix-in base class. + + The subclass must implement get_write_buffer_size(). It must call + _maybe_pause_protocol() whenever the write buffer size increases, + and _maybe_resume_protocol() whenever it decreases. It may also + override set_write_buffer_limits() (e.g. to specify different + defaults). + + The subclass constructor must call super().__init__(extra). This + will call set_write_buffer_limits(). + + The user may call set_write_buffer_limits() and + get_write_buffer_size(), and their protocol's pause_writing() and + resume_writing() may be called. + 'b'_protocol_paused'u'_protocol_paused'b'_high_water'u'_high_water'b'_low_water'u'_low_water'b'protocol.pause_writing() failed'u'protocol.pause_writing() failed'b'protocol.resume_writing() failed'u'protocol.resume_writing() failed'b'high ('u'high ('b') must be >= low ('u') must be >= low ('b') must be >= 0'u') must be >= 0'u'asyncio.transports'u'transports'A socket-like wrapper for exposing real transport sockets. + + These objects can be safely returned by APIs like + `transport.get_extra_info('socket')`. All potentially disruptive + operations (like "socket.close()") are banned. + _naUsing on sockets returned from get_extra_info('socket') will be prohibited in asyncio 3.9. Please report your use case to bugs.python.org." on sockets returned from get_extra_info('socket') ""will be prohibited in asyncio 3.9. Please report your use case ""to bugs.python.org." NoReturn: + raise Exception('no way') + + This type is invalid in other positions, e.g., ``List[NoReturn]`` + will fail in static type checkers. + Special type construct to mark class variables. + + An annotation wrapped in ClassVar indicates that a given + attribute is intended to be used as a class variable and + should not be set on instances of that class. Usage:: + + class Starship: + stats: ClassVar[Dict[str, int]] = {} # class variable + damage: int = 10 # instance variable + + ClassVar accepts only types and cannot be further subscribed. + + Note that ClassVar is not a class itself, and should not + be used with isinstance() or issubclass(). + Special typing construct to indicate final names to type checkers. + + A final name cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties. + Union type; Union[X, Y] means either X or Y. + + To define a union, use e.g. Union[int, str]. Details: + - The arguments must be types and there must be at least one. + - None as an argument is a special case and is replaced by + type(None). + - Unions of unions are flattened, e.g.:: + + Union[Union[int, str], float] == Union[int, str, float] + + - Unions of a single argument vanish, e.g.:: + + Union[int] == int # The constructor actually returns int + + - Redundant arguments are skipped, e.g.:: + + Union[int, str, int] == Union[int, str] + + - When comparing unions, the argument order is ignored, e.g.:: + + Union[int, str] == Union[str, int] + + - You cannot subclass or instantiate a union. + - You can use Optional[X] as a shorthand for Union[X, None]. + Optional type. + + Optional[X] is equivalent to Union[X, None]. + Special typing form to define literal types (a.k.a. value types). + + This form can be used to indicate to type checkers that the corresponding + variable or function parameter has a value equivalent to the provided + literal (or one of several literals): + + def validate_simple(data: Any) -> Literal[True]: # always returns True + ... + + MODE = Literal['r', 'rb', 'w', 'wb'] + def open_helper(file: str, mode: MODE) -> str: + ... + + open_helper('/some/path', 'r') # Passes type check + open_helper('/other/path', 'typo') # Error in type checker + + Literal[...] cannot be subclassed. At runtime, an arbitrary value + is allowed as type argument to Literal[...], but type checkers may + impose restrictions. + Internal wrapper to hold a forward reference.__forward_arg____forward_code____forward_evaluated____forward_value____forward_is_argument__Forward reference must be a string -- got Forward reference must be an expression -- got Forward references must evaluate to types.ForwardRef(Type variable. + + Usage:: + + T = TypeVar('T') # Can be anything + A = TypeVar('A', str, bytes) # Must be str or bytes + + Type variables exist primarily for the benefit of static type + checkers. They serve as the parameters for generic types as well + as for generic function definitions. See class Generic for more + information on generic types. Generic functions work as follows: + + def repeat(x: T, n: int) -> List[T]: + '''Return a list containing n references to x.''' + return [x]*n + + def longest(x: A, y: A) -> A: + '''Return the longest of two strings.''' + return x if len(x) >= len(y) else y + + The latter example's signature is essentially the overloading + of (str, str) -> str and (bytes, bytes) -> bytes. Also note + that if the arguments are instances of some subclass of str, + the return type is still plain str. + + At runtime, isinstance(x, T) and issubclass(C, T) will raise TypeError. + + Type variables defined with covariant=True or contravariant=True + can be used to declare covariant or contravariant generic types. + See PEP 484 for more details. By default generic types are invariant + in all type variables. + + Type variables can be introspected. e.g.: + + T.__name__ == 'T' + T.__constraints__ == () + T.__covariant__ == False + T.__contravariant__ = False + A.__constraints__ == (str, bytes) + + Note that only type variables defined in global scope can be pickled. + __bound____constraints____covariant____contravariant__constraintscovariantcontravariantBivariant types are not supported.Constraints cannot be combined with bound=...A single constraint is not allowedTypeVar(name, constraint, ...): constraints must be types.Bound must be a type.def_mod_normalize_aliasThe central part of internal API. + + This represents a generic version of type 'origin' with type arguments 'params'. + There are two kind of these aliases: user defined and special. The special ones + are wrappers around builtin collections and ABCs in collections.abc. These must + have 'name' always set. If 'inst' is False, then the alias can't be instantiated, + this is used by e.g. typing.List and typing.Dict. + _TypingEllipsis_TypingEmptyCannot subscript already-subscripted Parameters to generic types must be types.typing.Callabletyping.Callable[['typing.Callable''[['], '], 'Type cannot be instantiated; use " cannot be instantiated; ""use "() instead__orig_class__Subscripted generics cannot be used with class and instance checks"Subscripted generics cannot be used with"" class and instance checks"_VariadicGenericAliasSame as _GenericAlias above but for variadic aliases. Currently, + this is used only by special internal aliases: Tuple and Callable. + __getitem_inner__Callable must be used as Callable[[arg, ...], result]."Callable must be used as ""Callable[[arg, ...], result]."Callable[args, result]: args must be a list. Got "Callable[args, result]: args must be a list."" Got "Tuple[t, ...]: t must be a type.Tuple[t0, t1, ...]: each t must be a type.Callable[args, result]: result must be a type.Callable[[arg, ...], result]: each arg must be a type.Abstract base class for generic types. + + A generic type is typically declared by inheriting from + this class parameterized with one or more type variables. + For example, a generic mapping type might be defined as:: + + class Mapping(Generic[KT, VT]): + def __getitem__(self, key: KT) -> VT: + ... + # Etc. + + This class can then be used as follows:: + + def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT: + try: + return mapping[key] + except KeyError: + return default + _is_protocol cannot be instantiated; it can be used only as a base class"it can be used only as a base class"Parameter list to [...] cannot be emptyParameters to [...] must all be type variables[...] must all be uniqueCannot inherit from plain GenericgvarsCannot inherit from Generic[...] multiple types.tvarsetgvarsets_varss_argsSome type variables () are not listed in Generic[") are"" not listed in Generic["Internal placeholder for () or []. Used by TupleMeta and CallableMeta + to allow empty list/tuple in specific places, without allowing them + to sneak in where prohibited. + Internal placeholder for ... (ellipsis)._is_runtime_protocol_TYPING_INTERNALS_SPECIAL_NAMES_MutableMapping__markerEXCLUDED_ATTRIBUTES_get_protocol_attrsCollect protocol members from a protocol class objects. + + This includes names actually defined in the class dictionary, as well + as names that appear in annotations. Special names (above) are skipped. + _is_callable_members_only_no_initProtocols cannot be instantiated_allow_reckless_class_cheksAllow instnance and class checks for special stdlib modules. + + The abc and functools modules indiscriminately call isinstance() and + issubclass() on the whole MRO of a user class, which may contain protocols. + _PROTO_WHITELIST_ProtocolMetaBase class for protocol classes. + + Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing.runtime_checkable act as simple-minded runtime protocols that check + only the presence of given attributes, ignoring their type signatures. + Protocol classes can be generic, they are defined as:: + + class GenProto(Protocol[T]): + def meth(self) -> T: + ... + _proto_hookInstance and class checks can only be used with @runtime_checkable protocols"Instance and class checks can only be used with"" @runtime_checkable protocols"Protocols with non-method members don't support issubclass()"Protocols with non-method members"" don't support issubclass()"Protocols can only inherit from other protocols, got %r'Protocols can only inherit from other'' protocols, got %r'Mark a protocol class as a runtime protocol. + + Such protocol can be used with isinstance() and issubclass(). + Raise TypeError if applied to a non-protocol class. + This allows a simple-minded structural check very similar to + one trick ponies in collections.abc such as Iterable. + For example:: + + @runtime_checkable + class Closable(Protocol): + def close(self): ... + + assert isinstance(open('/some/file'), Closable) + + Warning: this will check only the presence of the required methods, + not their type signatures! + @runtime_checkable can be only applied to protocol classes, got %r'@runtime_checkable can be only applied to protocol classes,'' got %r'Cast a value to a type. + + This returns the value unchanged. To the type checker this + signals that the return value has the designated type, but at + runtime we intentionally don't check anything (we want this + to be as fast as possible). + _get_defaultsInternal helper to extract the default arguments, by name.pos_offset_allowed_typesReturn type hints for an object. + + This is often the same as obj.__annotations__, but it handles + forward references encoded as string literals, and if necessary + adds Optional[t] if a default value equal to None is set. + + The argument may be a module, class, method, or function. The annotations + are returned as a dictionary. For classes, annotations include also + inherited members. + + TypeError is raised if the argument is not of a type that can contain + annotations, and an empty dictionary is returned if no annotations are + present. + + BEWARE -- the behavior of globalns and localns is counterintuitive + (unless you are familiar with how eval() and exec() work). The + search order is locals first, then globals. + + - If no dict arguments are passed, an attempt is made to use the + globals from obj (or the respective module's globals for classes), + and these are also used as the locals. If the object does not appear + to have globals, an empty dictionary is used. + + - If one dict argument is passed, it is used for both globals and + locals. + + - If two dict arguments are passed, they specify globals and + locals, respectively. + __no_type_check__hintsbase_globalsnsobj{!r} is not a module, class, method, or function.'{!r} is not a module, class, method, ''or function.'Get the unsubscripted version of a type. + + This supports generic types, Callable, Tuple, Union, Literal, Final and ClassVar. + Return None for unsupported types. Examples:: + + get_origin(Literal[42]) is Literal + get_origin(int) is None + get_origin(ClassVar[int]) is ClassVar + get_origin(Generic) is Generic + get_origin(Generic[T]) is Generic + get_origin(Union[T, int]) is Union + get_origin(List[Tuple[T, T]][int]) == list + Get type arguments with all substitutions performed. + + For unions, basic simplifications used by Union constructor are performed. + Examples:: + get_args(Dict[str, int]) == (str, int) + get_args(int) == () + get_args(Union[int, Union[T, int], str][int]) == (int, str) + get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) + get_args(Callable[[], T][int]) == ([], int) + Decorator to indicate that annotations are not type hints. + + The argument must be a class or function; if it is a class, it + applies recursively to all methods and classes defined in that class + (but not to methods defined in its superclasses or subclasses). + + This mutates the function(s) or class(es) in place. + arg_attrsDecorator to give another decorator the @no_type_check effect. + + This wraps the decorator with something that wraps the decorated + function in @no_type_check. + wrapped_decorator_overload_dummyHelper for @overload to raise when called.You should not call an overloaded function. A series of @overload-decorated functions outside a stub module should always be followed by an implementation that is not @overload-ed."You should not call an overloaded function. ""A series of @overload-decorated functions ""outside a stub module should always be followed ""by an implementation that is not @overload-ed."Decorator for overloaded functions/methods. + + In a stub file, place two or more stub definitions for the same + function in a row, each decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + + In a non-stub file (i.e. a regular .py file), do the same but + follow it with an implementation. The implementation should *not* + be decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + def utf8(value): + # implementation goes here + A decorator to indicate final methods and final classes. + + Use this decorator to indicate to type checkers that the decorated + method cannot be overridden, and decorated class cannot be subclassed. + For example: + + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... + + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... + + There is no runtime checking of these properties. + KTVTT_coV_coVT_coT_contraCT_co_aliasCallable type; Callable[[int], str] is a function of (int) -> str. + + The subscription syntax must always be used with exactly two + values: the argument list and the return type. The argument list + must be a list of types or ellipsis; the return type must be a single type. + + There is no syntax to indicate optional or keyword arguments, + such function types are rarely used as callback types. + Tuple type; Tuple[X, Y] is the cross-product type of X and Y. + + Example: Tuple[T1, T2] is a tuple of two elements corresponding + to type variables T1 and T2. Tuple[int, float, str] is a tuple + of an int, a float and a string. + + To specify a variable-length tuple of homogeneous type, use Tuple[T, ...]. + A special construct usable to annotate class objects. + + For example, suppose we have the following classes:: + + class User: ... # Abstract base for User classes + class BasicUser(User): ... + class ProUser(User): ... + class TeamUser(User): ... + + And a function that takes a class argument that's a subclass of + User and returns an instance of the corresponding class:: + + U = TypeVar('U', bound=User) + def new_user(user_class: Type[U]) -> U: + user = user_class() + # (Here we could write the user object to a database) + return user + + joe = new_user(BasicUser) + + At this point the type checker knows that joe has type BasicUser. + An ABC with one abstract method __int__.An ABC with one abstract method __float__.An ABC with one abstract method __complex__.An ABC with one abstract method __bytes__.An ABC with one abstract method __index__.An ABC with one abstract method __abs__ that is covariant in its return type.An ABC with one abstract method __round__ that is covariant in its return type._make_nmtupleNamedTuple('Name', [(f0, t0), (f1, t1), ...]); each t must be a typenm_tpl_field_types_source_prohibitedNamedTupleMetadefaults_dictNon-default namedtuple field {field_name} cannot follow default field(s) {default_names}"Non-default namedtuple field {field_name} cannot ""follow default field(s) {default_names}"default_namesCannot overwrite NamedTuple attribute Typed version of namedtuple. + + Usage in Python versions >= 3.6:: + + class Employee(NamedTuple): + name: str + id: int + + This is equivalent to:: + + Employee = collections.namedtuple('Employee', ['name', 'id']) + + The resulting class has an extra __annotations__ attribute, giving a + dict that maps field names to types. (The field names are also in + the _fields attribute, which is part of the namedtuple API.) + Alternative equivalent keyword syntax is also accepted:: + + Employee = NamedTuple('Employee', name=str, id=int) + + In Python versions <= 3.5 use:: + + Employee = NamedTuple('Employee', [('name', str), ('id', int)]) + NamedTuple.__new__(): not enough argumentsPassing 'typename' as keyword argument is deprecatedNamedTuple.__new__() missing 1 required positional argument: 'typename'"NamedTuple.__new__() missing 1 required positional ""argument: 'typename'"NamedTuple.__new__() takes from 2 to 3 positional arguments but 'NamedTuple.__new__() takes from 2 to 3 ''positional arguments but ' were given'were given'Passing 'fields' as keyword argument is deprecatedEither list of fields or keywords can be provided to NamedTuple, not both"Either list of fields or keywords"" can be provided to NamedTuple, not both"($cls, typename, fields=None, /, **kwargs)_dict_new_typeddict_newTypedDict takes either a dict or keyword arguments, but not both"TypedDict takes either a dict or keyword arguments,"" but not both"__total___TypedDictMeta_check_failsTypedDict does not support instance and class checksCreate new typed dict class object. + + This method is called directly when TypedDict is subclassed, + or via _typeddict_new when TypedDict is instantiated. This way + TypedDict supports all three syntax forms described in its docstring. + Subclasses and instances of TypedDict return actual dictionaries + via _dict_new. + tp_dictannsTypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a typeA simple typed namespace. At runtime it is equivalent to a plain dict. + + TypedDict creates a dictionary type that expects all of its + instances to have a certain set of keys, where each key is + associated with a value of a consistent type. This expectation + is not checked at runtime but is only enforced by type checkers. + Usage:: + + class Point2D(TypedDict): + x: int + y: int + label: str + + a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + + assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + + The type info can be accessed via Point2D.__annotations__. TypedDict + supports two additional equivalent forms:: + + Point2D = TypedDict('Point2D', x=int, y=int, label=str) + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + + By default, all keys must be present in a TypedDict. It is possible + to override this by specifying totality. + Usage:: + + class point2D(TypedDict, total=False): + x: int + y: int + + This means that a point2D TypedDict can have any of the keys omitted.A type + checker is only expected to support a literal False or True as the value of + the total argument. True is the default, and makes all items defined in the + class body be required. + + The class syntax is only supported in Python 3.6+, while two other + syntax forms work for Python 2.7 and 3.2+ + NewType creates simple unique types with almost zero + runtime overhead. NewType(name, tp) is considered a subtype of tp + by static type checkers. At runtime, NewType(name, tp) returns + a dummy function that simply returns its argument. Usage:: + + UserId = NewType('UserId', int) + + def name_by_id(user_id: UserId) -> str: + ... + + UserId('user') # Fails type check + + name_by_id(42) # Fails type check + name_by_id(UserId(42)) # OK + + num = UserId(5) + 1 # type: int + new_type__supertype__IOGeneric base class for TextIO and BinaryIO. + + This is an abstract, generic version of the return of open(). + + NOTE: This does not distinguish between the different possible + classes (text vs. binary, read vs. write vs. read/write, + append-only, unbuffered). The TextIO and BinaryIO subclasses + below capture the distinctions between text vs. binary, which is + pervasive in the interface; however we currently do not offer a + way to track the other distinctions in the type system. + IO[AnyStr]BinaryIOTyped version of the return of open() in binary mode.TextIOTyped version of the return of open() in text mode.Wrapper namespace for IO generic classes..ioWrapper namespace for re type aliases..re# Avoid confusion with the re we export.# Please keep __all__ alphabetized within each category.# Super-special typing primitives.# ABCs (from collections.abc).# collections.abc.Set.# Structural checks, a.k.a. protocols.# Concrete collection types.# Not really a type.# One-off things.# The pseudo-submodules 're' and 'io' are part of the public# namespace, but excluded from __all__ because they might stomp on# legitimate imports of those modules.# Flatten out Union[Union[...], ...].# Weed out strict duplicates, preserving the first of each occurrence.# All real errors (not unhashable args) are raised below.# Close enough.# There is no '_type_check' call because arguments to Literal[...] are# values, not types.# for pickling# Special typing constructs Union, Optional, Generic, Callable and Tuple# use three special attributes for internal bookkeeping of generic types:# * __parameters__ is a tuple of unique free type parameters of a generic# type, for example, Dict[T, T].__parameters__ == (T,);# * __origin__ keeps a reference to a type that was subscripted,# e.g., Union[T, int].__origin__ == Union, or the non-generic version of# the type.# * __args__ is a tuple of all arguments used in subscripting,# e.g., Dict[T, int].__args__ == (T, int).# Mapping from non-generic type names that have a generic alias in typing# but with a different name.# This is not documented.# Can't subscript Generic[...] or Protocol[...].# We don't copy self._special.# generic version of an ABC or built-in class# We are careful for copy and pickle.# Also for simplicity we just don't relay all dunder names# Generic and Protocol can only be subscripted with unique type variables.# Subscripting a regular Generic subclass.# Look for Generic[T1, ..., Tn].# If found, tvars must be a subset of it.# If not found, tvars is it.# Also check for and reject plain Generic,# and reject multiple Generic[...].# These special attributes will be not collected as protocol members.# without object# PEP 544 prohibits using issubclass() with protocols that have non-method members.# For platforms without _getframe().# This metaclass is really unfortunate and exists only because of# the lack of __instancehook__.# We need this method for situations where attributes are# assigned in __init__.# All *methods* can be blocked by setting them to None.# Determine if this is a protocol or a concrete subclass.# Set (or override) the protocol subclass hook.# First, perform various sanity checks.# Same error message as for issubclass(1, int).# Second, perform the actual structural compatibility check.# Check if the members appears in the class dictionary...# ...or in annotations, if it is a sub-protocol.# We have nothing more to do for non-protocols...# ... otherwise check consistency of bases, and prohibit instantiation.# Some built-in functions don't have __code__, __defaults__, etc.# Classes require a special treatment.# Find globalns for the unwrapped object.# Return empty annotations for something that _could_ have them.# built-in classes# Some unconstrained type variables. These are used by the container types.# (These are not for export.)# Any type.# Key type.# Value type.# Any type covariant containers.# Value type covariant containers.# Ditto contravariant.# Internal type variable used for Type[].# A useful type variable with constraints. This represents string types.# (This one *is* for export!)# Various ABCs mimicking those in collections.abc.# Not generic.# NOTE: Mapping is only covariant in the value type.# Not generic# Prior to PEP 526, only _field_types attribute was assigned.# Now __annotations__ are used and _field_types is deprecated (remove in 3.9)# attributes prohibited to set in NamedTuple class syntax# update from user namespace without overriding special namedtuple attributes# allow the "cls" keyword be passed# allow the "typename" keyword be passed# allow the "fields" keyword be passed# Setting correct module is necessary to make typed dict classes pickleable.# Typed dicts are only for static structural subtyping.# Python-version-specific alias (Python 2: unicode; Python 3: str)# Constant that's True when type checking, but False here.b' +The typing module: Support for gradual typing as defined by PEP 484. + +At large scale, the structure of the module is following: +* Imports and exports, all public names should be explicitly added to __all__. +* Internal helper functions: these should never be used in code outside this module. +* _SpecialForm and its instances (special forms): Any, NoReturn, ClassVar, Union, Optional +* Two classes whose instances can be type arguments in addition to types: ForwardRef and TypeVar +* The core of internal generics API: _GenericAlias and _VariadicGenericAlias, the latter is + currently only used by Tuple and Callable. All subscripted types like X[int], Union[int, str], + etc., are instances of either of these classes. +* The public counterpart of the generics API consists of two classes: Generic and Protocol. +* Public helper functions: get_type_hints, overload, cast, no_type_check, + no_type_check_decorator. +* Generic aliases for collections.abc ABCs and few additional protocols. +* Special types: NewType, NamedTuple, TypedDict. +* Wrapper submodules for re and io related types. +'u' +The typing module: Support for gradual typing as defined by PEP 484. + +At large scale, the structure of the module is following: +* Imports and exports, all public names should be explicitly added to __all__. +* Internal helper functions: these should never be used in code outside this module. +* _SpecialForm and its instances (special forms): Any, NoReturn, ClassVar, Union, Optional +* Two classes whose instances can be type arguments in addition to types: ForwardRef and TypeVar +* The core of internal generics API: _GenericAlias and _VariadicGenericAlias, the latter is + currently only used by Tuple and Callable. All subscripted types like X[int], Union[int, str], + etc., are instances of either of these classes. +* The public counterpart of the generics API consists of two classes: Generic and Protocol. +* Public helper functions: get_type_hints, overload, cast, no_type_check, + no_type_check_decorator. +* Generic aliases for collections.abc ABCs and few additional protocols. +* Special types: NewType, NamedTuple, TypedDict. +* Wrapper submodules for re and io related types. +'b'Any'u'Any'b'ClassVar'u'ClassVar'b'Final'u'Final'b'ForwardRef'u'ForwardRef'b'Generic'u'Generic'b'Literal'u'Literal'b'Optional'u'Optional'b'Tuple'u'Tuple'b'Type'u'Type'b'TypeVar'u'TypeVar'b'Union'u'Union'b'AbstractSet'u'AbstractSet'b'ContextManager'u'ContextManager'b'AsyncContextManager'u'AsyncContextManager'b'SupportsAbs'u'SupportsAbs'b'SupportsBytes'u'SupportsBytes'b'SupportsComplex'u'SupportsComplex'b'SupportsFloat'u'SupportsFloat'b'SupportsIndex'u'SupportsIndex'b'SupportsInt'u'SupportsInt'b'SupportsRound'u'SupportsRound'b'Deque'u'Deque'b'Dict'u'Dict'b'DefaultDict'u'DefaultDict'b'List'u'List'b'FrozenSet'u'FrozenSet'b'NamedTuple'u'NamedTuple'b'TypedDict'u'TypedDict'b'AnyStr'u'AnyStr'b'cast'u'cast'b'get_args'u'get_args'b'get_origin'u'get_origin'b'get_type_hints'u'get_type_hints'b'NewType'u'NewType'b'no_type_check'u'no_type_check'b'no_type_check_decorator'u'no_type_check_decorator'b'NoReturn'u'NoReturn'b'overload'u'overload'b'runtime_checkable'u'runtime_checkable'b'Text'u'Text'b'TYPE_CHECKING'u'TYPE_CHECKING'b'Check that the argument is a type, and return it (internal helper). + + As a special case, accept None and return type(None) instead. Also wrap strings + into ForwardRef instances. Consider several corner cases, for example plain + special forms like Union are not valid, while Union[int, str] is OK, etc. + The msg argument is a human-readable error message, e.g:: + + "Union[arg, ...]: arg should be a type." + + We append the repr() of the actual value (truncated to 100 chars). + 'u'Check that the argument is a type, and return it (internal helper). + + As a special case, accept None and return type(None) instead. Also wrap strings + into ForwardRef instances. Consider several corner cases, for example plain + special forms like Union are not valid, while Union[int, str] is OK, etc. + The msg argument is a human-readable error message, e.g:: + + "Union[arg, ...]: arg should be a type." + + We append the repr() of the actual value (truncated to 100 chars). + 'b' is not valid as type argument'u' is not valid as type argument'b'Plain 'u'Plain 'b' Got 'u' Got 'b'Return the repr() of an object, special-casing types (internal helper). + + If obj is a type, we return a shorter version than the default + type.__repr__, based on the module and qualified name, which is + typically enough to uniquely identify a type. For everything + else, we fall back on repr(obj). + 'u'Return the repr() of an object, special-casing types (internal helper). + + If obj is a type, we return a shorter version than the default + type.__repr__, based on the module and qualified name, which is + typically enough to uniquely identify a type. For everything + else, we fall back on repr(obj). + 'b'Collect all type variable contained in types in order of + first appearance (lexicographic order). For example:: + + _collect_type_vars((T, List[S, T])) == (T, S) + 'u'Collect all type variable contained in types in order of + first appearance (lexicographic order). For example:: + + _collect_type_vars((T, List[S, T])) == (T, S) + 'b'Substitute type variables 'tvars' with substitutions 'subs'. + These two must have the same length. + 'u'Substitute type variables 'tvars' with substitutions 'subs'. + These two must have the same length. + 'b'Check correct count for parameters of a generic cls (internal helper). + This gives a nice error message in case of count mismatch. + 'u'Check correct count for parameters of a generic cls (internal helper). + This gives a nice error message in case of count mismatch. + 'b' is not a generic class'u' is not a generic class'b'Too 'u'Too 'b'many'u'many'b'few'u'few'b' parameters for 'u' parameters for 'b'; actual 'u'; actual 'b', expected 'u', expected 'b'An internal helper for Union creation and substitution: flatten Unions + among parameters, then remove duplicates. + 'u'An internal helper for Union creation and substitution: flatten Unions + among parameters, then remove duplicates. + 'b'Internal wrapper caching __getitem__ of generic types with a fallback to + original function for non-hashable arguments. + 'u'Internal wrapper caching __getitem__ of generic types with a fallback to + original function for non-hashable arguments. + 'b'Evaluate all forward references in the given type t. + For use of globalns and localns see the docstring for get_type_hints(). + 'u'Evaluate all forward references in the given type t. + For use of globalns and localns see the docstring for get_type_hints(). + 'b'Mixin to prohibit subclassing'u'Mixin to prohibit subclassing'b'_root'u'_root'b'Cannot subclass special typing classes'u'Cannot subclass special typing classes'b'Mixin to indicate that object should not be copied.'u'Mixin to indicate that object should not be copied.'b'Internal indicator of special typing constructs. + See _doc instance attribute for specific docs. + 'u'Internal indicator of special typing constructs. + See _doc instance attribute for specific docs. + 'b'_doc'u'_doc'b'Constructor. + + This only exists to give a better error message in case + someone tries to subclass a special typing object (not a good idea). + 'u'Constructor. + + This only exists to give a better error message in case + someone tries to subclass a special typing object (not a good idea). + 'b'Cannot subclass 'u'Cannot subclass 'b'Cannot instantiate 'u'Cannot instantiate 'b' cannot be used with isinstance()'u' cannot be used with isinstance()'b' cannot be used with issubclass()'u' cannot be used with issubclass()'b' accepts only single type.'u' accepts only single type.'b'Cannot take a Union of no types.'u'Cannot take a Union of no types.'b'Union[arg, ...]: each arg must be a type.'u'Union[arg, ...]: each arg must be a type.'b'Optional[t] requires a single type.'u'Optional[t] requires a single type.'b' is not subscriptable'u' is not subscriptable'b'Special type indicating an unconstrained type. + + - Any is compatible with every type. + - Any assumed to have all methods. + - All values assumed to be instances of Any. + + Note that all the above statements are true from the point of view of + static type checkers. At runtime, Any should not be used with instance + or class checks. + 'u'Special type indicating an unconstrained type. + + - Any is compatible with every type. + - Any assumed to have all methods. + - All values assumed to be instances of Any. + + Note that all the above statements are true from the point of view of + static type checkers. At runtime, Any should not be used with instance + or class checks. + 'b'Special type indicating functions that never return. + Example:: + + from typing import NoReturn + + def stop() -> NoReturn: + raise Exception('no way') + + This type is invalid in other positions, e.g., ``List[NoReturn]`` + will fail in static type checkers. + 'u'Special type indicating functions that never return. + Example:: + + from typing import NoReturn + + def stop() -> NoReturn: + raise Exception('no way') + + This type is invalid in other positions, e.g., ``List[NoReturn]`` + will fail in static type checkers. + 'b'Special type construct to mark class variables. + + An annotation wrapped in ClassVar indicates that a given + attribute is intended to be used as a class variable and + should not be set on instances of that class. Usage:: + + class Starship: + stats: ClassVar[Dict[str, int]] = {} # class variable + damage: int = 10 # instance variable + + ClassVar accepts only types and cannot be further subscribed. + + Note that ClassVar is not a class itself, and should not + be used with isinstance() or issubclass(). + 'u'Special type construct to mark class variables. + + An annotation wrapped in ClassVar indicates that a given + attribute is intended to be used as a class variable and + should not be set on instances of that class. Usage:: + + class Starship: + stats: ClassVar[Dict[str, int]] = {} # class variable + damage: int = 10 # instance variable + + ClassVar accepts only types and cannot be further subscribed. + + Note that ClassVar is not a class itself, and should not + be used with isinstance() or issubclass(). + 'b'Special typing construct to indicate final names to type checkers. + + A final name cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties. + 'u'Special typing construct to indicate final names to type checkers. + + A final name cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties. + 'b'Union type; Union[X, Y] means either X or Y. + + To define a union, use e.g. Union[int, str]. Details: + - The arguments must be types and there must be at least one. + - None as an argument is a special case and is replaced by + type(None). + - Unions of unions are flattened, e.g.:: + + Union[Union[int, str], float] == Union[int, str, float] + + - Unions of a single argument vanish, e.g.:: + + Union[int] == int # The constructor actually returns int + + - Redundant arguments are skipped, e.g.:: + + Union[int, str, int] == Union[int, str] + + - When comparing unions, the argument order is ignored, e.g.:: + + Union[int, str] == Union[str, int] + + - You cannot subclass or instantiate a union. + - You can use Optional[X] as a shorthand for Union[X, None]. + 'u'Union type; Union[X, Y] means either X or Y. + + To define a union, use e.g. Union[int, str]. Details: + - The arguments must be types and there must be at least one. + - None as an argument is a special case and is replaced by + type(None). + - Unions of unions are flattened, e.g.:: + + Union[Union[int, str], float] == Union[int, str, float] + + - Unions of a single argument vanish, e.g.:: + + Union[int] == int # The constructor actually returns int + + - Redundant arguments are skipped, e.g.:: + + Union[int, str, int] == Union[int, str] + + - When comparing unions, the argument order is ignored, e.g.:: + + Union[int, str] == Union[str, int] + + - You cannot subclass or instantiate a union. + - You can use Optional[X] as a shorthand for Union[X, None]. + 'b'Optional type. + + Optional[X] is equivalent to Union[X, None]. + 'u'Optional type. + + Optional[X] is equivalent to Union[X, None]. + 'b'Special typing form to define literal types (a.k.a. value types). + + This form can be used to indicate to type checkers that the corresponding + variable or function parameter has a value equivalent to the provided + literal (or one of several literals): + + def validate_simple(data: Any) -> Literal[True]: # always returns True + ... + + MODE = Literal['r', 'rb', 'w', 'wb'] + def open_helper(file: str, mode: MODE) -> str: + ... + + open_helper('/some/path', 'r') # Passes type check + open_helper('/other/path', 'typo') # Error in type checker + + Literal[...] cannot be subclassed. At runtime, an arbitrary value + is allowed as type argument to Literal[...], but type checkers may + impose restrictions. + 'u'Special typing form to define literal types (a.k.a. value types). + + This form can be used to indicate to type checkers that the corresponding + variable or function parameter has a value equivalent to the provided + literal (or one of several literals): + + def validate_simple(data: Any) -> Literal[True]: # always returns True + ... + + MODE = Literal['r', 'rb', 'w', 'wb'] + def open_helper(file: str, mode: MODE) -> str: + ... + + open_helper('/some/path', 'r') # Passes type check + open_helper('/other/path', 'typo') # Error in type checker + + Literal[...] cannot be subclassed. At runtime, an arbitrary value + is allowed as type argument to Literal[...], but type checkers may + impose restrictions. + 'b'Internal wrapper to hold a forward reference.'u'Internal wrapper to hold a forward reference.'b'__forward_arg__'u'__forward_arg__'b'__forward_code__'u'__forward_code__'b'__forward_evaluated__'u'__forward_evaluated__'b'__forward_value__'u'__forward_value__'b'__forward_is_argument__'u'__forward_is_argument__'b'Forward reference must be a string -- got 'u'Forward reference must be a string -- got 'b'Forward reference must be an expression -- got 'u'Forward reference must be an expression -- got 'b'Forward references must evaluate to types.'u'Forward references must evaluate to types.'b'ForwardRef('u'ForwardRef('b'Type variable. + + Usage:: + + T = TypeVar('T') # Can be anything + A = TypeVar('A', str, bytes) # Must be str or bytes + + Type variables exist primarily for the benefit of static type + checkers. They serve as the parameters for generic types as well + as for generic function definitions. See class Generic for more + information on generic types. Generic functions work as follows: + + def repeat(x: T, n: int) -> List[T]: + '''Return a list containing n references to x.''' + return [x]*n + + def longest(x: A, y: A) -> A: + '''Return the longest of two strings.''' + return x if len(x) >= len(y) else y + + The latter example's signature is essentially the overloading + of (str, str) -> str and (bytes, bytes) -> bytes. Also note + that if the arguments are instances of some subclass of str, + the return type is still plain str. + + At runtime, isinstance(x, T) and issubclass(C, T) will raise TypeError. + + Type variables defined with covariant=True or contravariant=True + can be used to declare covariant or contravariant generic types. + See PEP 484 for more details. By default generic types are invariant + in all type variables. + + Type variables can be introspected. e.g.: + + T.__name__ == 'T' + T.__constraints__ == () + T.__covariant__ == False + T.__contravariant__ = False + A.__constraints__ == (str, bytes) + + Note that only type variables defined in global scope can be pickled. + 'u'Type variable. + + Usage:: + + T = TypeVar('T') # Can be anything + A = TypeVar('A', str, bytes) # Must be str or bytes + + Type variables exist primarily for the benefit of static type + checkers. They serve as the parameters for generic types as well + as for generic function definitions. See class Generic for more + information on generic types. Generic functions work as follows: + + def repeat(x: T, n: int) -> List[T]: + '''Return a list containing n references to x.''' + return [x]*n + + def longest(x: A, y: A) -> A: + '''Return the longest of two strings.''' + return x if len(x) >= len(y) else y + + The latter example's signature is essentially the overloading + of (str, str) -> str and (bytes, bytes) -> bytes. Also note + that if the arguments are instances of some subclass of str, + the return type is still plain str. + + At runtime, isinstance(x, T) and issubclass(C, T) will raise TypeError. + + Type variables defined with covariant=True or contravariant=True + can be used to declare covariant or contravariant generic types. + See PEP 484 for more details. By default generic types are invariant + in all type variables. + + Type variables can be introspected. e.g.: + + T.__name__ == 'T' + T.__constraints__ == () + T.__covariant__ == False + T.__contravariant__ = False + A.__constraints__ == (str, bytes) + + Note that only type variables defined in global scope can be pickled. + 'b'__bound__'u'__bound__'b'__constraints__'u'__constraints__'b'__covariant__'u'__covariant__'b'__contravariant__'u'__contravariant__'b'Bivariant types are not supported.'u'Bivariant types are not supported.'b'Constraints cannot be combined with bound=...'u'Constraints cannot be combined with bound=...'b'A single constraint is not allowed'u'A single constraint is not allowed'b'TypeVar(name, constraint, ...): constraints must be types.'u'TypeVar(name, constraint, ...): constraints must be types.'b'Bound must be a type.'u'Bound must be a type.'b'frozenset'u'frozenset'b'The central part of internal API. + + This represents a generic version of type 'origin' with type arguments 'params'. + There are two kind of these aliases: user defined and special. The special ones + are wrappers around builtin collections and ABCs in collections.abc. These must + have 'name' always set. If 'inst' is False, then the alias can't be instantiated, + this is used by e.g. typing.List and typing.Dict. + 'u'The central part of internal API. + + This represents a generic version of type 'origin' with type arguments 'params'. + There are two kind of these aliases: user defined and special. The special ones + are wrappers around builtin collections and ABCs in collections.abc. These must + have 'name' always set. If 'inst' is False, then the alias can't be instantiated, + this is used by e.g. typing.List and typing.Dict. + 'b'Cannot subscript already-subscripted 'u'Cannot subscript already-subscripted 'b'Parameters to generic types must be types.'u'Parameters to generic types must be types.'b'typing.Callable'u'typing.Callable'b'typing.Callable[['u'typing.Callable[['b'], 'u'], 'b'Type 'u'Type 'b' cannot be instantiated; use 'u' cannot be instantiated; use 'b'() instead'u'() instead'b'__origin__'u'__origin__'b'_inst'u'_inst'b'_special'u'_special'b'Subscripted generics cannot be used with class and instance checks'u'Subscripted generics cannot be used with class and instance checks'b'Same as _GenericAlias above but for variadic aliases. Currently, + this is used only by special internal aliases: Tuple and Callable. + 'u'Same as _GenericAlias above but for variadic aliases. Currently, + this is used only by special internal aliases: Tuple and Callable. + 'b'Callable must be used as Callable[[arg, ...], result].'u'Callable must be used as Callable[[arg, ...], result].'b'Callable[args, result]: args must be a list. Got 'u'Callable[args, result]: args must be a list. Got 'b'Tuple[t, ...]: t must be a type.'u'Tuple[t, ...]: t must be a type.'b'Tuple[t0, t1, ...]: each t must be a type.'u'Tuple[t0, t1, ...]: each t must be a type.'b'Callable[args, result]: result must be a type.'u'Callable[args, result]: result must be a type.'b'Callable[[arg, ...], result]: each arg must be a type.'u'Callable[[arg, ...], result]: each arg must be a type.'b'Abstract base class for generic types. + + A generic type is typically declared by inheriting from + this class parameterized with one or more type variables. + For example, a generic mapping type might be defined as:: + + class Mapping(Generic[KT, VT]): + def __getitem__(self, key: KT) -> VT: + ... + # Etc. + + This class can then be used as follows:: + + def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT: + try: + return mapping[key] + except KeyError: + return default + 'u'Abstract base class for generic types. + + A generic type is typically declared by inheriting from + this class parameterized with one or more type variables. + For example, a generic mapping type might be defined as:: + + class Mapping(Generic[KT, VT]): + def __getitem__(self, key: KT) -> VT: + ... + # Etc. + + This class can then be used as follows:: + + def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT: + try: + return mapping[key] + except KeyError: + return default + 'b' cannot be instantiated; it can be used only as a base class'u' cannot be instantiated; it can be used only as a base class'b'Parameter list to 'u'Parameter list to 'b'[...] cannot be empty'u'[...] cannot be empty'b'Parameters to 'u'Parameters to 'b'[...] must all be type variables'u'[...] must all be type variables'b'[...] must all be unique'u'[...] must all be unique'b'Cannot inherit from plain Generic'u'Cannot inherit from plain Generic'b'Cannot inherit from Generic[...] multiple types.'u'Cannot inherit from Generic[...] multiple types.'b'Some type variables ('u'Some type variables ('b') are not listed in Generic['u') are not listed in Generic['b'Internal placeholder for () or []. Used by TupleMeta and CallableMeta + to allow empty list/tuple in specific places, without allowing them + to sneak in where prohibited. + 'u'Internal placeholder for () or []. Used by TupleMeta and CallableMeta + to allow empty list/tuple in specific places, without allowing them + to sneak in where prohibited. + 'b'Internal placeholder for ... (ellipsis).'u'Internal placeholder for ... (ellipsis).'b'__parameters__'u'__parameters__'b'__orig_class__'u'__orig_class__'b'_is_protocol'u'_is_protocol'b'_is_runtime_protocol'u'_is_runtime_protocol'b'__subclasshook__'u'__subclasshook__'b'_MutableMapping__marker'u'_MutableMapping__marker'b'Collect protocol members from a protocol class objects. + + This includes names actually defined in the class dictionary, as well + as names that appear in annotations. Special names (above) are skipped. + 'u'Collect protocol members from a protocol class objects. + + This includes names actually defined in the class dictionary, as well + as names that appear in annotations. Special names (above) are skipped. + 'b'Protocols cannot be instantiated'u'Protocols cannot be instantiated'b'Allow instnance and class checks for special stdlib modules. + + The abc and functools modules indiscriminately call isinstance() and + issubclass() on the whole MRO of a user class, which may contain protocols. + 'u'Allow instnance and class checks for special stdlib modules. + + The abc and functools modules indiscriminately call isinstance() and + issubclass() on the whole MRO of a user class, which may contain protocols. + 'b'contextlib'b'Base class for protocol classes. + + Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing.runtime_checkable act as simple-minded runtime protocols that check + only the presence of given attributes, ignoring their type signatures. + Protocol classes can be generic, they are defined as:: + + class GenProto(Protocol[T]): + def meth(self) -> T: + ... + 'u'Base class for protocol classes. + + Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing.runtime_checkable act as simple-minded runtime protocols that check + only the presence of given attributes, ignoring their type signatures. + Protocol classes can be generic, they are defined as:: + + class GenProto(Protocol[T]): + def meth(self) -> T: + ... + 'b'Instance and class checks can only be used with @runtime_checkable protocols'u'Instance and class checks can only be used with @runtime_checkable protocols'b'Protocols with non-method members don't support issubclass()'u'Protocols with non-method members don't support issubclass()'b'Protocols can only inherit from other protocols, got %r'u'Protocols can only inherit from other protocols, got %r'b'Mark a protocol class as a runtime protocol. + + Such protocol can be used with isinstance() and issubclass(). + Raise TypeError if applied to a non-protocol class. + This allows a simple-minded structural check very similar to + one trick ponies in collections.abc such as Iterable. + For example:: + + @runtime_checkable + class Closable(Protocol): + def close(self): ... + + assert isinstance(open('/some/file'), Closable) + + Warning: this will check only the presence of the required methods, + not their type signatures! + 'u'Mark a protocol class as a runtime protocol. + + Such protocol can be used with isinstance() and issubclass(). + Raise TypeError if applied to a non-protocol class. + This allows a simple-minded structural check very similar to + one trick ponies in collections.abc such as Iterable. + For example:: + + @runtime_checkable + class Closable(Protocol): + def close(self): ... + + assert isinstance(open('/some/file'), Closable) + + Warning: this will check only the presence of the required methods, + not their type signatures! + 'b'@runtime_checkable can be only applied to protocol classes, got %r'u'@runtime_checkable can be only applied to protocol classes, got %r'b'Cast a value to a type. + + This returns the value unchanged. To the type checker this + signals that the return value has the designated type, but at + runtime we intentionally don't check anything (we want this + to be as fast as possible). + 'u'Cast a value to a type. + + This returns the value unchanged. To the type checker this + signals that the return value has the designated type, but at + runtime we intentionally don't check anything (we want this + to be as fast as possible). + 'b'Internal helper to extract the default arguments, by name.'u'Internal helper to extract the default arguments, by name.'b'Return type hints for an object. + + This is often the same as obj.__annotations__, but it handles + forward references encoded as string literals, and if necessary + adds Optional[t] if a default value equal to None is set. + + The argument may be a module, class, method, or function. The annotations + are returned as a dictionary. For classes, annotations include also + inherited members. + + TypeError is raised if the argument is not of a type that can contain + annotations, and an empty dictionary is returned if no annotations are + present. + + BEWARE -- the behavior of globalns and localns is counterintuitive + (unless you are familiar with how eval() and exec() work). The + search order is locals first, then globals. + + - If no dict arguments are passed, an attempt is made to use the + globals from obj (or the respective module's globals for classes), + and these are also used as the locals. If the object does not appear + to have globals, an empty dictionary is used. + + - If one dict argument is passed, it is used for both globals and + locals. + + - If two dict arguments are passed, they specify globals and + locals, respectively. + 'u'Return type hints for an object. + + This is often the same as obj.__annotations__, but it handles + forward references encoded as string literals, and if necessary + adds Optional[t] if a default value equal to None is set. + + The argument may be a module, class, method, or function. The annotations + are returned as a dictionary. For classes, annotations include also + inherited members. + + TypeError is raised if the argument is not of a type that can contain + annotations, and an empty dictionary is returned if no annotations are + present. + + BEWARE -- the behavior of globalns and localns is counterintuitive + (unless you are familiar with how eval() and exec() work). The + search order is locals first, then globals. + + - If no dict arguments are passed, an attempt is made to use the + globals from obj (or the respective module's globals for classes), + and these are also used as the locals. If the object does not appear + to have globals, an empty dictionary is used. + + - If one dict argument is passed, it is used for both globals and + locals. + + - If two dict arguments are passed, they specify globals and + locals, respectively. + 'b'__no_type_check__'u'__no_type_check__'b'__globals__'u'__globals__'b'{!r} is not a module, class, method, or function.'u'{!r} is not a module, class, method, or function.'b'Get the unsubscripted version of a type. + + This supports generic types, Callable, Tuple, Union, Literal, Final and ClassVar. + Return None for unsupported types. Examples:: + + get_origin(Literal[42]) is Literal + get_origin(int) is None + get_origin(ClassVar[int]) is ClassVar + get_origin(Generic) is Generic + get_origin(Generic[T]) is Generic + get_origin(Union[T, int]) is Union + get_origin(List[Tuple[T, T]][int]) == list + 'u'Get the unsubscripted version of a type. + + This supports generic types, Callable, Tuple, Union, Literal, Final and ClassVar. + Return None for unsupported types. Examples:: + + get_origin(Literal[42]) is Literal + get_origin(int) is None + get_origin(ClassVar[int]) is ClassVar + get_origin(Generic) is Generic + get_origin(Generic[T]) is Generic + get_origin(Union[T, int]) is Union + get_origin(List[Tuple[T, T]][int]) == list + 'b'Get type arguments with all substitutions performed. + + For unions, basic simplifications used by Union constructor are performed. + Examples:: + get_args(Dict[str, int]) == (str, int) + get_args(int) == () + get_args(Union[int, Union[T, int], str][int]) == (int, str) + get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) + get_args(Callable[[], T][int]) == ([], int) + 'u'Get type arguments with all substitutions performed. + + For unions, basic simplifications used by Union constructor are performed. + Examples:: + get_args(Dict[str, int]) == (str, int) + get_args(int) == () + get_args(Union[int, Union[T, int], str][int]) == (int, str) + get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) + get_args(Callable[[], T][int]) == ([], int) + 'b'Decorator to indicate that annotations are not type hints. + + The argument must be a class or function; if it is a class, it + applies recursively to all methods and classes defined in that class + (but not to methods defined in its superclasses or subclasses). + + This mutates the function(s) or class(es) in place. + 'u'Decorator to indicate that annotations are not type hints. + + The argument must be a class or function; if it is a class, it + applies recursively to all methods and classes defined in that class + (but not to methods defined in its superclasses or subclasses). + + This mutates the function(s) or class(es) in place. + 'b'Decorator to give another decorator the @no_type_check effect. + + This wraps the decorator with something that wraps the decorated + function in @no_type_check. + 'u'Decorator to give another decorator the @no_type_check effect. + + This wraps the decorator with something that wraps the decorated + function in @no_type_check. + 'b'Helper for @overload to raise when called.'u'Helper for @overload to raise when called.'b'You should not call an overloaded function. A series of @overload-decorated functions outside a stub module should always be followed by an implementation that is not @overload-ed.'u'You should not call an overloaded function. A series of @overload-decorated functions outside a stub module should always be followed by an implementation that is not @overload-ed.'b'Decorator for overloaded functions/methods. + + In a stub file, place two or more stub definitions for the same + function in a row, each decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + + In a non-stub file (i.e. a regular .py file), do the same but + follow it with an implementation. The implementation should *not* + be decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + def utf8(value): + # implementation goes here + 'u'Decorator for overloaded functions/methods. + + In a stub file, place two or more stub definitions for the same + function in a row, each decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + + In a non-stub file (i.e. a regular .py file), do the same but + follow it with an implementation. The implementation should *not* + be decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + def utf8(value): + # implementation goes here + 'b'A decorator to indicate final methods and final classes. + + Use this decorator to indicate to type checkers that the decorated + method cannot be overridden, and decorated class cannot be subclassed. + For example: + + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... + + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... + + There is no runtime checking of these properties. + 'u'A decorator to indicate final methods and final classes. + + Use this decorator to indicate to type checkers that the decorated + method cannot be overridden, and decorated class cannot be subclassed. + For example: + + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... + + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... + + There is no runtime checking of these properties. + 'b'KT'u'KT'b'VT'u'VT'b'T_co'u'T_co'b'V_co'u'V_co'b'VT_co'u'VT_co'b'T_contra'u'T_contra'b'CT_co'u'CT_co'b'Callable type; Callable[[int], str] is a function of (int) -> str. + + The subscription syntax must always be used with exactly two + values: the argument list and the return type. The argument list + must be a list of types or ellipsis; the return type must be a single type. + + There is no syntax to indicate optional or keyword arguments, + such function types are rarely used as callback types. + 'u'Callable type; Callable[[int], str] is a function of (int) -> str. + + The subscription syntax must always be used with exactly two + values: the argument list and the return type. The argument list + must be a list of types or ellipsis; the return type must be a single type. + + There is no syntax to indicate optional or keyword arguments, + such function types are rarely used as callback types. + 'b'Tuple type; Tuple[X, Y] is the cross-product type of X and Y. + + Example: Tuple[T1, T2] is a tuple of two elements corresponding + to type variables T1 and T2. Tuple[int, float, str] is a tuple + of an int, a float and a string. + + To specify a variable-length tuple of homogeneous type, use Tuple[T, ...]. + 'u'Tuple type; Tuple[X, Y] is the cross-product type of X and Y. + + Example: Tuple[T1, T2] is a tuple of two elements corresponding + to type variables T1 and T2. Tuple[int, float, str] is a tuple + of an int, a float and a string. + + To specify a variable-length tuple of homogeneous type, use Tuple[T, ...]. + 'b'A special construct usable to annotate class objects. + + For example, suppose we have the following classes:: + + class User: ... # Abstract base for User classes + class BasicUser(User): ... + class ProUser(User): ... + class TeamUser(User): ... + + And a function that takes a class argument that's a subclass of + User and returns an instance of the corresponding class:: + + U = TypeVar('U', bound=User) + def new_user(user_class: Type[U]) -> U: + user = user_class() + # (Here we could write the user object to a database) + return user + + joe = new_user(BasicUser) + + At this point the type checker knows that joe has type BasicUser. + 'u'A special construct usable to annotate class objects. + + For example, suppose we have the following classes:: + + class User: ... # Abstract base for User classes + class BasicUser(User): ... + class ProUser(User): ... + class TeamUser(User): ... + + And a function that takes a class argument that's a subclass of + User and returns an instance of the corresponding class:: + + U = TypeVar('U', bound=User) + def new_user(user_class: Type[U]) -> U: + user = user_class() + # (Here we could write the user object to a database) + return user + + joe = new_user(BasicUser) + + At this point the type checker knows that joe has type BasicUser. + 'b'An ABC with one abstract method __int__.'u'An ABC with one abstract method __int__.'b'An ABC with one abstract method __float__.'u'An ABC with one abstract method __float__.'b'An ABC with one abstract method __complex__.'u'An ABC with one abstract method __complex__.'b'An ABC with one abstract method __bytes__.'u'An ABC with one abstract method __bytes__.'b'An ABC with one abstract method __index__.'u'An ABC with one abstract method __index__.'b'An ABC with one abstract method __abs__ that is covariant in its return type.'u'An ABC with one abstract method __abs__ that is covariant in its return type.'b'An ABC with one abstract method __round__ that is covariant in its return type.'u'An ABC with one abstract method __round__ that is covariant in its return type.'b'NamedTuple('Name', [(f0, t0), (f1, t1), ...]); each t must be a type'u'NamedTuple('Name', [(f0, t0), (f1, t1), ...]); each t must be a type'b'_field_types'u'_field_types'b'_source'u'_source'b'Non-default namedtuple field {field_name} cannot follow default field(s) {default_names}'u'Non-default namedtuple field {field_name} cannot follow default field(s) {default_names}'b'Cannot overwrite NamedTuple attribute 'u'Cannot overwrite NamedTuple attribute 'b'Typed version of namedtuple. + + Usage in Python versions >= 3.6:: + + class Employee(NamedTuple): + name: str + id: int + + This is equivalent to:: + + Employee = collections.namedtuple('Employee', ['name', 'id']) + + The resulting class has an extra __annotations__ attribute, giving a + dict that maps field names to types. (The field names are also in + the _fields attribute, which is part of the namedtuple API.) + Alternative equivalent keyword syntax is also accepted:: + + Employee = NamedTuple('Employee', name=str, id=int) + + In Python versions <= 3.5 use:: + + Employee = NamedTuple('Employee', [('name', str), ('id', int)]) + 'u'Typed version of namedtuple. + + Usage in Python versions >= 3.6:: + + class Employee(NamedTuple): + name: str + id: int + + This is equivalent to:: + + Employee = collections.namedtuple('Employee', ['name', 'id']) + + The resulting class has an extra __annotations__ attribute, giving a + dict that maps field names to types. (The field names are also in + the _fields attribute, which is part of the namedtuple API.) + Alternative equivalent keyword syntax is also accepted:: + + Employee = NamedTuple('Employee', name=str, id=int) + + In Python versions <= 3.5 use:: + + Employee = NamedTuple('Employee', [('name', str), ('id', int)]) + 'b'NamedTuple.__new__(): not enough arguments'u'NamedTuple.__new__(): not enough arguments'b'typename'u'typename'b'Passing 'typename' as keyword argument is deprecated'u'Passing 'typename' as keyword argument is deprecated'b'NamedTuple.__new__() missing 1 required positional argument: 'typename''u'NamedTuple.__new__() missing 1 required positional argument: 'typename''b'NamedTuple.__new__() takes from 2 to 3 positional arguments but 'u'NamedTuple.__new__() takes from 2 to 3 positional arguments but 'b' were given'u' were given'b'fields'u'fields'b'Passing 'fields' as keyword argument is deprecated'u'Passing 'fields' as keyword argument is deprecated'b'Either list of fields or keywords can be provided to NamedTuple, not both'u'Either list of fields or keywords can be provided to NamedTuple, not both'b'($cls, typename, fields=None, /, **kwargs)'u'($cls, typename, fields=None, /, **kwargs)'b'TypedDict takes either a dict or keyword arguments, but not both'u'TypedDict takes either a dict or keyword arguments, but not both'b'__total__'u'__total__'b'TypedDict does not support instance and class checks'u'TypedDict does not support instance and class checks'b'Create new typed dict class object. + + This method is called directly when TypedDict is subclassed, + or via _typeddict_new when TypedDict is instantiated. This way + TypedDict supports all three syntax forms described in its docstring. + Subclasses and instances of TypedDict return actual dictionaries + via _dict_new. + 'u'Create new typed dict class object. + + This method is called directly when TypedDict is subclassed, + or via _typeddict_new when TypedDict is instantiated. This way + TypedDict supports all three syntax forms described in its docstring. + Subclasses and instances of TypedDict return actual dictionaries + via _dict_new. + 'b'TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type'u'TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type'b'A simple typed namespace. At runtime it is equivalent to a plain dict. + + TypedDict creates a dictionary type that expects all of its + instances to have a certain set of keys, where each key is + associated with a value of a consistent type. This expectation + is not checked at runtime but is only enforced by type checkers. + Usage:: + + class Point2D(TypedDict): + x: int + y: int + label: str + + a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + + assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + + The type info can be accessed via Point2D.__annotations__. TypedDict + supports two additional equivalent forms:: + + Point2D = TypedDict('Point2D', x=int, y=int, label=str) + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + + By default, all keys must be present in a TypedDict. It is possible + to override this by specifying totality. + Usage:: + + class point2D(TypedDict, total=False): + x: int + y: int + + This means that a point2D TypedDict can have any of the keys omitted.A type + checker is only expected to support a literal False or True as the value of + the total argument. True is the default, and makes all items defined in the + class body be required. + + The class syntax is only supported in Python 3.6+, while two other + syntax forms work for Python 2.7 and 3.2+ + 'u'A simple typed namespace. At runtime it is equivalent to a plain dict. + + TypedDict creates a dictionary type that expects all of its + instances to have a certain set of keys, where each key is + associated with a value of a consistent type. This expectation + is not checked at runtime but is only enforced by type checkers. + Usage:: + + class Point2D(TypedDict): + x: int + y: int + label: str + + a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + + assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + + The type info can be accessed via Point2D.__annotations__. TypedDict + supports two additional equivalent forms:: + + Point2D = TypedDict('Point2D', x=int, y=int, label=str) + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + + By default, all keys must be present in a TypedDict. It is possible + to override this by specifying totality. + Usage:: + + class point2D(TypedDict, total=False): + x: int + y: int + + This means that a point2D TypedDict can have any of the keys omitted.A type + checker is only expected to support a literal False or True as the value of + the total argument. True is the default, and makes all items defined in the + class body be required. + + The class syntax is only supported in Python 3.6+, while two other + syntax forms work for Python 2.7 and 3.2+ + 'b'NewType creates simple unique types with almost zero + runtime overhead. NewType(name, tp) is considered a subtype of tp + by static type checkers. At runtime, NewType(name, tp) returns + a dummy function that simply returns its argument. Usage:: + + UserId = NewType('UserId', int) + + def name_by_id(user_id: UserId) -> str: + ... + + UserId('user') # Fails type check + + name_by_id(42) # Fails type check + name_by_id(UserId(42)) # OK + + num = UserId(5) + 1 # type: int + 'u'NewType creates simple unique types with almost zero + runtime overhead. NewType(name, tp) is considered a subtype of tp + by static type checkers. At runtime, NewType(name, tp) returns + a dummy function that simply returns its argument. Usage:: + + UserId = NewType('UserId', int) + + def name_by_id(user_id: UserId) -> str: + ... + + UserId('user') # Fails type check + + name_by_id(42) # Fails type check + name_by_id(UserId(42)) # OK + + num = UserId(5) + 1 # type: int + 'b'Generic base class for TextIO and BinaryIO. + + This is an abstract, generic version of the return of open(). + + NOTE: This does not distinguish between the different possible + classes (text vs. binary, read vs. write vs. read/write, + append-only, unbuffered). The TextIO and BinaryIO subclasses + below capture the distinctions between text vs. binary, which is + pervasive in the interface; however we currently do not offer a + way to track the other distinctions in the type system. + 'u'Generic base class for TextIO and BinaryIO. + + This is an abstract, generic version of the return of open(). + + NOTE: This does not distinguish between the different possible + classes (text vs. binary, read vs. write vs. read/write, + append-only, unbuffered). The TextIO and BinaryIO subclasses + below capture the distinctions between text vs. binary, which is + pervasive in the interface; however we currently do not offer a + way to track the other distinctions in the type system. + 'b'IO[AnyStr]'u'IO[AnyStr]'b'Typed version of the return of open() in binary mode.'u'Typed version of the return of open() in binary mode.'b'BinaryIO'u'BinaryIO'b'Typed version of the return of open() in text mode.'u'Typed version of the return of open() in text mode.'b'TextIO'u'TextIO'b'Wrapper namespace for IO generic classes.'u'Wrapper namespace for IO generic classes.'b'IO'u'IO'b'.io'u'.io'b'Wrapper namespace for re type aliases.'u'Wrapper namespace for re type aliases.'b'.re'u'.re'bidirectionalcombiningdecompositioneast_asian_widthis_normalizedmirroredunidata_versionunicodedata.UCDUCDu'This module provides access to the Unicode Character Database which +defines character properties for all Unicode characters. The data in +this database is based on the UnicodeData.txt file version +12.1.0 which is publicly available from ftp://ftp.unicode.org/. + +The module uses the same names and symbols as defined by the +UnicodeData File Format 12.1.0.'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/unicodedata.cpython-38-darwin.so'u'unicodedata'ucd_3_2_0ucnhash_CAPIu'12.1.0'Selector event loop for Unix with signal handling.base_subprocessSelectorEventLoopAbstractChildWatcherSafeChildWatcherFastChildWatcherMultiLoopChildWatcherThreadedChildWatcherSignals are not really supported on Windows_sighandler_noopDummy signal handler._UnixSelectorEventLoopUnix event loop. + + Adds signal handling and UNIX Domain Socket support to SelectorEventLoop. + _signal_handlersClosing the loop on interpreter shutdown stage, skipping signal handlers removal"on interpreter shutdown ""stage, skipping signal handlers removal"_handle_signalAdd a handler for a signal. UNIX only. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + coroutines cannot be used with add_signal_handler()"coroutines cannot be used ""with add_signal_handler()"_check_signalnexcset_wakeup_fd(-1) failed: %ssig cannot be caughtInternal helper that is the actual signal handler.Remove a handler for a signal. UNIX only. + + Return True if a signal handler was removed, False if not. + Internal helper to validate a signal. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + sig must be an int, not invalid signal number _UnixReadPipeTransport_UnixWritePipeTransportis_activeasyncio.get_child_watcher() is not activated, subprocess support is not installed."asyncio.get_child_watcher() is not activated, ""subprocess support is not installed."_UnixSubprocessTransportadd_child_handler_child_watcher_callbackyou have to pass server_hostname when using sslpath and sock can not be specified at the same timeno path and sock were specifiedA UNIX Domain Stream Socket was expected, got 'Unable to check or remove stale UNIX socket ''%r: %r'Address is already in usepath was not specified, and no sock specifiedos.sendfile() is not available_sock_sendfile_native_implregistered_fd_sock_sendfile_update_filepos_sock_add_cancellation_callbacksocket is not connectedos.sendfile call failed_pipe_filenoPipe transport is for pipes/sockets only.%r was closed by peeris_charis_fifois_socketPipe transport is only for pipes, sockets and character devices"Pipe transport is only for ""pipes, sockets and character devices"bufsize=pipe closed by peer or os.write(pipe, data) raised exception.'pipe closed by peer or ''os.write(pipe, data) raised exception.'stdin_wAbstract base class for monitoring child processes. + + Objects derived from this class monitor a collection of subprocesses and + report their termination or interruption by a signal. + + New callbacks are registered with .add_child_handler(). Starting a new + process must be done within a 'with' block to allow the watcher to suspend + its activity until the new process if fully registered (this is needed to + prevent a race condition in some implementations). + + Example: + with watcher: + proc = subprocess.Popen("sleep 1") + watcher.add_child_handler(proc.pid, callback) + + Notes: + Implementations of this class must be thread-safe. + + Since child watcher objects may catch the SIGCHLD signal and call + waitpid(-1), there should be only one active object per process. + Register a new child handler. + + Arrange for callback(pid, returncode, *args) to be called when + process 'pid' terminates. Specifying another callback for the same + process replaces the previous handler. + + Note: callback() must be thread-safe. + remove_child_handlerRemoves the handler for process 'pid'. + + The function returns True if the handler was successfully removed, + False if there was nothing to remove.attach_loopAttach the watcher to an event loop. + + If the watcher was previously attached to an event loop, then it is + first detached before attaching to the new loop. + + Note: loop may be None. + Close the watcher. + + This must be called to make sure that any underlying resource is freed. + Return ``True`` if the watcher is active and is used by the event loop. + + Return True if the watcher is installed and ready to handle process exit + notifications. + + Enter the watcher's context and allow starting new processes + + This function must return selfExit the watcher's context_compute_returncodeBaseChildWatcher_do_waitpidexpected_pid_do_waitpid_allA loop is being detached from a child watcher with pending handlers'A loop is being detached ''from a child watcher with pending handlers'_sig_chldUnknown exception in SIGCHLD handler'Safe' child watcher implementation. + + This implementation avoids disrupting other code spawning processes by + polling explicitly each process in the SIGCHLD handler instead of calling + os.waitpid(-1). + + This is a safe solution but it has a significant overhead when handling a + big number of children (O(n) each time SIGCHLD is raised) + process %s exited with returncode %sUnknown child process pid %d, will report returncode 255Child watcher got an unexpected pid: %r'Fast' child watcher implementation. + + This implementation reaps every terminated processes by calling + os.waitpid(-1) directly, possibly breaking other code spawning processes + and waiting for their termination. + + There is no noticeable overhead when handling a big number of children + (O(1) each time a child terminates). + _zombies_forkscollateral_victimsCaught subprocesses termination from unknown pids: %sMust use the context managerunknown process %s exited with returncode %s'unknown process %s exited ''with returncode %s'Caught subprocess termination from unknown pid: %d -> %d"Caught subprocess termination from unknown pid: ""%d -> %d"A watcher that doesn't require running loop in the main thread. + + This implementation registers a SIGCHLD signal handler on + instantiation (which may conflict with other code that + install own handler for this signal). + + The solution is safe but it has a significant overhead when + handling a big number of processes (*O(n)* each time a + SIGCHLD is received). + _saved_sighandlerSIGCHLD handler was changed by outside codePrevious SIGCHLD handler was set by non-Python code, restore to default handler on watcher close."Previous SIGCHLD handler was set by non-Python code, ""restore to default handler on watcher close."Loop %r that handles pid %r is closedThreaded child watcher implementation. + + The watcher uses a thread per process + for waiting for the process finish. + + It doesn't require subscription on POSIX signal + but a thread creation is not free. + + The watcher has O(1) complexity, its performance doesn't depend + on amount of spawn processes. + _pid_counter_join_threadsInternal: Join all non-daemon threads has registered but not finished child processeswaitpid-_UnixDefaultEventLoopPolicyUNIX event loop policy with a watcher for child processes._watcher_init_watcherSet the event loop. + + As a side effect, if a child watcher was set before, then calling + .set_event_loop() from the main thread will call .attach_loop(loop) on + the child watcher. + Get the watcher for child processes. + + If not yet set, a ThreadedChildWatcher object is automatically created. + # ignore null bytes written by _write_to_self()# set_wakeup_fd() raises ValueError if this is not the# main thread. By calling it early we ensure that an# event loop running in another thread cannot add a signal# handler.# Register a dummy signal handler to ask Python to write the signal# number in the wakeup file descriptor. _process_self_data() will# read signal numbers from this file descriptor to handle signals.# Set SA_RESTART to limit EINTR occurrences.# Assume it's some race condition.# Remove it properly.# Check early.# Raising exception before process creation# prevents subprocess execution if the watcher# is not ready to handle it.# Check for abstract socket. `str` and `bytes` paths are supported.# Let's improve the error message by adding# with what exact address it occurs.# Remove the callback early. It should be rare that the# selector says the fd is ready but the call still returns# EAGAIN, and I am willing to take a hit in that case in# order to simplify the common case.# If we have an ENOTCONN and this isn't a first call to# sendfile(), i.e. the connection was closed in the middle# of the operation, normalize the error to ConnectionError# to make it consistent across all Posix systems.# max bytes we read in one event loop iteration# should be called by exception handler only# Set when close() or write_eof() called.# On AIX, the reader trick (to be notified when the read end of the# socket is closed) only works for sockets. On other platforms it# works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.)# Pipe was closed by peer.# Remove writer here, _fatal_error() doesn't it# because _buffer is empty.# write_eof is all what we needed to close the write pipe# Use a socket pair for stdin, since not all platforms# support selecting read events on the write end of a# socket (which we use in order to detect closing of the# other end). Notably this is needed on AIX, and works# just fine on other platforms.# The child process died because of a signal.# The child process exited (e.g sys.exit()).# The child exited, but we don't understand its status.# This shouldn't happen, but if it does, let's just# return that status; perhaps that helps debug it.# Prevent a race condition in case a child terminated# during the switch.# self._loop should always be available here# as '_sig_chld' is added as a signal handler# in 'attach_loop'# Prevent a race condition in case the child is already terminated.# The child process is already reaped# (may happen if waitpid() is called elsewhere).# The child process is still alive.# May happen if .remove_child_handler() is called# after os.waitpid() returns.# The child is running.# The child is dead already. We can fire the callback.# Because of signal coalescing, we must keep calling waitpid() as# long as we're able to reap a child.# No more child processes exist.# A child process is still alive.# unknown child# It may not be registered yet.# Implementation note:# The class keeps compatibility with AbstractChildWatcher ABC# To achieve this it has empty attach_loop() method# and doesn't accept explicit loop argument# for add_child_handler()/remove_child_handler()# but retrieves the current loop by get_running_loop()# Don't save the loop but initialize itself if called first time# The reason to do it here is that attach_loop() is called from# unix policy only for the main thread.# Main thread is required for subscription on SIGCHLD signal# asyncio never calls remove_child_handler() !!!# The method is no-op but is implemented because# abstract base classe requires itb'Selector event loop for Unix with signal handling.'u'Selector event loop for Unix with signal handling.'b'SelectorEventLoop'u'SelectorEventLoop'b'AbstractChildWatcher'u'AbstractChildWatcher'b'SafeChildWatcher'u'SafeChildWatcher'b'FastChildWatcher'u'FastChildWatcher'b'MultiLoopChildWatcher'u'MultiLoopChildWatcher'b'ThreadedChildWatcher'u'ThreadedChildWatcher'b'DefaultEventLoopPolicy'u'DefaultEventLoopPolicy'b'Signals are not really supported on Windows'u'Signals are not really supported on Windows'b'Dummy signal handler.'u'Dummy signal handler.'b'Unix event loop. + + Adds signal handling and UNIX Domain Socket support to SelectorEventLoop. + 'u'Unix event loop. + + Adds signal handling and UNIX Domain Socket support to SelectorEventLoop. + 'b'Closing the loop 'u'Closing the loop 'b' on interpreter shutdown stage, skipping signal handlers removal'u' on interpreter shutdown stage, skipping signal handlers removal'b'Add a handler for a signal. UNIX only. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + 'u'Add a handler for a signal. UNIX only. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + 'b'coroutines cannot be used with add_signal_handler()'u'coroutines cannot be used with add_signal_handler()'b'set_wakeup_fd(-1) failed: %s'u'set_wakeup_fd(-1) failed: %s'b'sig 'u'sig 'b' cannot be caught'u' cannot be caught'b'Internal helper that is the actual signal handler.'u'Internal helper that is the actual signal handler.'b'Remove a handler for a signal. UNIX only. + + Return True if a signal handler was removed, False if not. + 'u'Remove a handler for a signal. UNIX only. + + Return True if a signal handler was removed, False if not. + 'b'Internal helper to validate a signal. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + 'u'Internal helper to validate a signal. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + 'b'sig must be an int, not 'u'sig must be an int, not 'b'invalid signal number 'u'invalid signal number 'b'asyncio.get_child_watcher() is not activated, subprocess support is not installed.'u'asyncio.get_child_watcher() is not activated, subprocess support is not installed.'b'you have to pass server_hostname when using ssl'u'you have to pass server_hostname when using ssl'b'path and sock can not be specified at the same time'u'path and sock can not be specified at the same time'b'no path and sock were specified'u'no path and sock were specified'b'A UNIX Domain Stream Socket was expected, got 'u'A UNIX Domain Stream Socket was expected, got 'b'Address 'u'Address 'b' is already in use'u' is already in use'b'path was not specified, and no sock specified'u'path was not specified, and no sock specified'b'os.sendfile() is not available'u'os.sendfile() is not available'b'socket is not connected'u'socket is not connected'b'os.sendfile call failed'u'os.sendfile call failed'b'Pipe transport is for pipes/sockets only.'u'Pipe transport is for pipes/sockets only.'b'_selector'u'_selector'b'%r was closed by peer'u'%r was closed by peer'b'Pipe transport is only for pipes, sockets and character devices'u'Pipe transport is only for pipes, sockets and character devices'b'bufsize='u'bufsize='b'pipe closed by peer or os.write(pipe, data) raised exception.'u'pipe closed by peer or os.write(pipe, data) raised exception.'b'Abstract base class for monitoring child processes. + + Objects derived from this class monitor a collection of subprocesses and + report their termination or interruption by a signal. + + New callbacks are registered with .add_child_handler(). Starting a new + process must be done within a 'with' block to allow the watcher to suspend + its activity until the new process if fully registered (this is needed to + prevent a race condition in some implementations). + + Example: + with watcher: + proc = subprocess.Popen("sleep 1") + watcher.add_child_handler(proc.pid, callback) + + Notes: + Implementations of this class must be thread-safe. + + Since child watcher objects may catch the SIGCHLD signal and call + waitpid(-1), there should be only one active object per process. + 'u'Abstract base class for monitoring child processes. + + Objects derived from this class monitor a collection of subprocesses and + report their termination or interruption by a signal. + + New callbacks are registered with .add_child_handler(). Starting a new + process must be done within a 'with' block to allow the watcher to suspend + its activity until the new process if fully registered (this is needed to + prevent a race condition in some implementations). + + Example: + with watcher: + proc = subprocess.Popen("sleep 1") + watcher.add_child_handler(proc.pid, callback) + + Notes: + Implementations of this class must be thread-safe. + + Since child watcher objects may catch the SIGCHLD signal and call + waitpid(-1), there should be only one active object per process. + 'b'Register a new child handler. + + Arrange for callback(pid, returncode, *args) to be called when + process 'pid' terminates. Specifying another callback for the same + process replaces the previous handler. + + Note: callback() must be thread-safe. + 'u'Register a new child handler. + + Arrange for callback(pid, returncode, *args) to be called when + process 'pid' terminates. Specifying another callback for the same + process replaces the previous handler. + + Note: callback() must be thread-safe. + 'b'Removes the handler for process 'pid'. + + The function returns True if the handler was successfully removed, + False if there was nothing to remove.'u'Removes the handler for process 'pid'. + + The function returns True if the handler was successfully removed, + False if there was nothing to remove.'b'Attach the watcher to an event loop. + + If the watcher was previously attached to an event loop, then it is + first detached before attaching to the new loop. + + Note: loop may be None. + 'u'Attach the watcher to an event loop. + + If the watcher was previously attached to an event loop, then it is + first detached before attaching to the new loop. + + Note: loop may be None. + 'b'Close the watcher. + + This must be called to make sure that any underlying resource is freed. + 'u'Close the watcher. + + This must be called to make sure that any underlying resource is freed. + 'b'Return ``True`` if the watcher is active and is used by the event loop. + + Return True if the watcher is installed and ready to handle process exit + notifications. + + 'u'Return ``True`` if the watcher is active and is used by the event loop. + + Return True if the watcher is installed and ready to handle process exit + notifications. + + 'b'Enter the watcher's context and allow starting new processes + + This function must return self'u'Enter the watcher's context and allow starting new processes + + This function must return self'b'Exit the watcher's context'u'Exit the watcher's context'b'A loop is being detached from a child watcher with pending handlers'u'A loop is being detached from a child watcher with pending handlers'b'Unknown exception in SIGCHLD handler'u'Unknown exception in SIGCHLD handler'b''Safe' child watcher implementation. + + This implementation avoids disrupting other code spawning processes by + polling explicitly each process in the SIGCHLD handler instead of calling + os.waitpid(-1). + + This is a safe solution but it has a significant overhead when handling a + big number of children (O(n) each time SIGCHLD is raised) + 'u''Safe' child watcher implementation. + + This implementation avoids disrupting other code spawning processes by + polling explicitly each process in the SIGCHLD handler instead of calling + os.waitpid(-1). + + This is a safe solution but it has a significant overhead when handling a + big number of children (O(n) each time SIGCHLD is raised) + 'b'process %s exited with returncode %s'u'process %s exited with returncode %s'b'Unknown child process pid %d, will report returncode 255'u'Unknown child process pid %d, will report returncode 255'b'Child watcher got an unexpected pid: %r'u'Child watcher got an unexpected pid: %r'b''Fast' child watcher implementation. + + This implementation reaps every terminated processes by calling + os.waitpid(-1) directly, possibly breaking other code spawning processes + and waiting for their termination. + + There is no noticeable overhead when handling a big number of children + (O(1) each time a child terminates). + 'u''Fast' child watcher implementation. + + This implementation reaps every terminated processes by calling + os.waitpid(-1) directly, possibly breaking other code spawning processes + and waiting for their termination. + + There is no noticeable overhead when handling a big number of children + (O(1) each time a child terminates). + 'b'Caught subprocesses termination from unknown pids: %s'u'Caught subprocesses termination from unknown pids: %s'b'Must use the context manager'u'Must use the context manager'b'unknown process %s exited with returncode %s'u'unknown process %s exited with returncode %s'b'Caught subprocess termination from unknown pid: %d -> %d'u'Caught subprocess termination from unknown pid: %d -> %d'b'A watcher that doesn't require running loop in the main thread. + + This implementation registers a SIGCHLD signal handler on + instantiation (which may conflict with other code that + install own handler for this signal). + + The solution is safe but it has a significant overhead when + handling a big number of processes (*O(n)* each time a + SIGCHLD is received). + 'u'A watcher that doesn't require running loop in the main thread. + + This implementation registers a SIGCHLD signal handler on + instantiation (which may conflict with other code that + install own handler for this signal). + + The solution is safe but it has a significant overhead when + handling a big number of processes (*O(n)* each time a + SIGCHLD is received). + 'b'SIGCHLD handler was changed by outside code'u'SIGCHLD handler was changed by outside code'b'Previous SIGCHLD handler was set by non-Python code, restore to default handler on watcher close.'u'Previous SIGCHLD handler was set by non-Python code, restore to default handler on watcher close.'b'Loop %r that handles pid %r is closed'u'Loop %r that handles pid %r is closed'b'Threaded child watcher implementation. + + The watcher uses a thread per process + for waiting for the process finish. + + It doesn't require subscription on POSIX signal + but a thread creation is not free. + + The watcher has O(1) complexity, its performance doesn't depend + on amount of spawn processes. + 'u'Threaded child watcher implementation. + + The watcher uses a thread per process + for waiting for the process finish. + + It doesn't require subscription on POSIX signal + but a thread creation is not free. + + The watcher has O(1) complexity, its performance doesn't depend + on amount of spawn processes. + 'b'Internal: Join all non-daemon threads'u'Internal: Join all non-daemon threads'b' has registered but not finished child processes'u' has registered but not finished child processes'b'waitpid-'u'waitpid-'b'UNIX event loop policy with a watcher for child processes.'u'UNIX event loop policy with a watcher for child processes.'b'Set the event loop. + + As a side effect, if a child watcher was set before, then calling + .set_event_loop() from the main thread will call .attach_loop(loop) on + the child watcher. + 'u'Set the event loop. + + As a side effect, if a child watcher was set before, then calling + .set_event_loop() from the main thread will call .attach_loop(loop) on + the child watcher. + 'b'Get the watcher for child processes. + + If not yet set, a ThreadedChildWatcher object is automatically created. + 'u'Get the watcher for child processes. + + If not yet set, a ThreadedChildWatcher object is automatically created. + 'u'asyncio.unix_events'u'unix_events'_get_build_versionReturn the version of MSVC that was used to build Python. + + For Python 2.3 and up, the version number is included in + sys.version. For earlier versions, assume the compiler is MSVC 6. + MSC v.majorVersion10.0minorVersionfind_msvcrtReturn the name of the VC runtime dllclibnamemsvcr%dctypes.macholib.dyld_dyld_findlib%s.dylib%s.dylib%s.framework/%sctypes._aix_is_elfReturn True if the given file is an ELF fileELFelf_header_findLib_gcc[^\(\)\s]*lib%s\.[^\(\)\s]*c_compilertemp-Wl,-tsunos5_get_soname/usr/ccs/bin/dump-Lpv\[.*\]\sSONAME\s+([^\s]+)objdump-j.dynamic\sSONAME\s+([^\s]+)openbsddragonflyename:-l%s\.\S+ => \S*/(lib%s\.\S+)/sbin/ldconfig_findLib_crleis64/usr/bin/crle-64Default Library Path (ELF):lib%s.solibfile_findSoname_ldconfig-32libc6,x86-64x86_64-64libc6,64bitppc64-64sparc64-64s390x-64libc6,IA-64ia64-64mach_maplibc6abi_type\s+(lib%s\.[^\s]+)\s+\(%s_findLib_ldldlibpath-l%slibm.dyliblibcrypto.dylibUsing CDLL(name, os.RTLD_MEMBER): libc.a(shr.o)Using cdll.LoadLibrary(): rpmlibrpm.solibc.a(shr_64.o)crypt :: cryptcrypto :: cryptolibm.solibcrypt.so# find_library(name) returns the pathname of a library, or None.# This function was copied from Lib/distutils/msvccompiler.py# I don't think paths are affected by minor version in version 6# else we don't know what version of the compiler this is# better be safe than sorry# CRT is no longer directly loadable. See issue23606 for the# discussion about alternative approaches.# If python was built with in debug mode# See MSDN for the REAL search order.# AIX has two styles of storing shared libraries# GNU auto_tools refer to these as svr4 and aix# svr4 (System V Release 4) is a regular file, often with .so as suffix# AIX style uses an archive (suffix .a) with members (e.g., shr.o, libssl.so)# see issue#26439 and _aix.py for more details# Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump# Run GCC's linker with the -t (aka --trace) option and examine the# library name it prints out. The GCC command will fail because we# haven't supplied a proper program with main(), but that does not# matter.# No C compiler available, give up# E.g. bad executable# Raised if the file was already removed, which is the normal# behaviour of GCC if linking fails# Check if the given file is an elf file: gcc can report# some files that are linker scripts and not actual# shared objects. See bpo-41976 for more details# use /usr/ccs/bin/dump on solaris# E.g. command not found# assuming GNU binutils / ELF# objdump is not available, give up# "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ]# XXX assuming GLIBC's ldconfig (with option -p)# See issue #9998 for why this is needed# result will be None# See issue #9998# find and load_version# load# issue-26439 - fix broken test call for AIX# librpm.so is only available as 32-bit shared libraryb'Return the version of MSVC that was used to build Python. + + For Python 2.3 and up, the version number is included in + sys.version. For earlier versions, assume the compiler is MSVC 6. + 'u'Return the version of MSVC that was used to build Python. + + For Python 2.3 and up, the version number is included in + sys.version. For earlier versions, assume the compiler is MSVC 6. + 'b'MSC v.'u'MSC v.'b'Return the name of the VC runtime dll'u'Return the name of the VC runtime dll'b'msvcrt'u'msvcrt'b'msvcr%d'u'msvcr%d'b'lib%s.dylib'u'lib%s.dylib'b'%s.dylib'u'%s.dylib'b'%s.framework/%s'u'%s.framework/%s'b'Return True if the given file is an ELF file'u'Return True if the given file is an ELF file'b'ELF'b'[^\(\)\s]*lib%s\.[^\(\)\s]*'u'[^\(\)\s]*lib%s\.[^\(\)\s]*'b'cc'u'cc'b'-Wl,-t'u'-Wl,-t'b'sunos5'u'sunos5'b'/usr/ccs/bin/dump'u'/usr/ccs/bin/dump'b'-Lpv'u'-Lpv'b'\[.*\]\sSONAME\s+([^\s]+)'b'objdump'u'objdump'b'-j'u'-j'b'.dynamic'u'.dynamic'b'\sSONAME\s+([^\s]+)'b'openbsd'u'openbsd'b'dragonfly'u'dragonfly'b':-l%s\.\S+ => \S*/(lib%s\.\S+)'u':-l%s\.\S+ => \S*/(lib%s\.\S+)'b'/sbin/ldconfig'u'/sbin/ldconfig'b'/usr/bin/crle'u'/usr/bin/crle'b'-64'u'-64'b'Default Library Path (ELF):'b'lib%s.so'u'lib%s.so'b'-32'u'-32'b'libc6,x86-64'u'libc6,x86-64'b'x86_64-64'u'x86_64-64'b'libc6,64bit'u'libc6,64bit'b'ppc64-64'u'ppc64-64'b'sparc64-64'u'sparc64-64'b's390x-64'u's390x-64'b'libc6,IA-64'u'libc6,IA-64'b'ia64-64'u'ia64-64'b'libc6'u'libc6'b'\s+(lib%s\.[^\s]+)\s+\(%s'u'\s+(lib%s\.[^\s]+)\s+\(%s'b'ld'u'ld'b'-l%s'u'-l%s'b'libm.dylib'u'libm.dylib'b'libcrypto.dylib'u'libcrypto.dylib'b'Using CDLL(name, os.RTLD_MEMBER): 'u'Using CDLL(name, os.RTLD_MEMBER): 'b'libc.a(shr.o)'u'libc.a(shr.o)'b'Using cdll.LoadLibrary(): 'u'Using cdll.LoadLibrary(): 'b'rpm'u'rpm'b'librpm.so'u'librpm.so'b'libc.a(shr_64.o)'u'libc.a(shr_64.o)'b'crypt :: 'u'crypt :: 'b'crypt'u'crypt'b'crypto :: 'u'crypto :: 'b'crypto'u'crypto'b'libm.so'u'libm.so'b'libcrypt.so'u'libcrypt.so'u'ctypes.util'u'util'Utility code for constructing importers, etc.Return the hash of *source_bytes* as used in hash-based pyc files.resolve_nameno package specified for (required for relative module names)'(required for relative module names)'_find_spec_from_pathReturn the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + Dotted names do not have their parent packages implicitly imported. You will + most likely need to explicitly import all parent packages in the proper + order for a submodule to get the correct spec. + + {}.__spec__ is None{}.__spec__ is not setReturn the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + If the name is for submodule (contains a dot), the parent module is + automatically imported. + + The name and package arguments work the same as importlib.import_module(). + In other words, relative module names (with leading dots) work. + + __path__ attribute not found on while trying to find "while trying to find "_module_to_load__initializing__set_packageSet __package__ on the returned module. + + This function is deprecated. + + set_package_wrapperThe import system now takes care of this automatically.set_loaderSet __loader__ on the returned module. + + This function is deprecated. + + set_loader_wrappermodule_for_loaderDecorator to handle selecting the proper module for loaders. + + The decorated function is passed the module to use instead of the module + name. The module passed in to the function is either from sys.modules if + it already exists or is a new module. If the module is new, then __name__ + is set the first argument to the method, __loader__ is set to self, and + __package__ is set accordingly (if self.is_package() is defined) will be set + before it is passed to the decorated function (if self.is_package() does + not work for the module it will be set post-load). + + If an exception is raised and the decorator created the module it is + subsequently removed from sys.modules. + + The decorator assumes that the decorated function takes the module name as + the second argument. + + module_for_loader_wrapper_LazyModuleA subclass of the module type which triggers loading upon attribute access.Trigger the load of the module and return the attribute.original_nameattrs_thenoriginal_typeattrs_nowattrs_updatedmodule object for substituted in sys.modules during a lazy load"substituted in sys.modules during a lazy ""load"Trigger the load and then perform the deletion.LazyLoaderA loader that creates a module which defers loading until attribute access.__check_eager_loaderloader must define exec_module()Construct a callable which returns the eager loader made lazy.Make the module load lazily.# This must be done before open() is called as the 'io' module# implicitly imports 'locale' and would otherwise trigger an# infinite loop.# (otherwise an optimization shortcut in import.c becomes wrong)# If __package__ was not set above, __import__() will do it later.# All module metadata must be garnered from __spec__ in order to avoid# using mutated values.# Stop triggering this method.# Get the original name to make sure no object substitution occurred# Figure out exactly what attributes were mutated between the creation# of the module and now.# Code that set the attribute may have kept a reference to the# assigned object, making identity more important than equality.# If exec_module() was used directly there is no guarantee the module# object was put into sys.modules.# Update after loading since that's what would happen in an eager# loading situation.# To trigger the load and raise an exception if the attribute# doesn't exist.# Don't need to worry about deep-copying as trying to set an attribute# on an object would have triggered the load,# e.g. ``module.__spec__.loader = None`` would trigger a load from# trying to access module.__spec__.b'Utility code for constructing importers, etc.'u'Utility code for constructing importers, etc.'b'Return the hash of *source_bytes* as used in hash-based pyc files.'u'Return the hash of *source_bytes* as used in hash-based pyc files.'b'no package specified for 'u'no package specified for 'b' (required for relative module names)'u' (required for relative module names)'b'Return the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + Dotted names do not have their parent packages implicitly imported. You will + most likely need to explicitly import all parent packages in the proper + order for a submodule to get the correct spec. + + 'u'Return the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + Dotted names do not have their parent packages implicitly imported. You will + most likely need to explicitly import all parent packages in the proper + order for a submodule to get the correct spec. + + 'b'{}.__spec__ is None'u'{}.__spec__ is None'b'{}.__spec__ is not set'u'{}.__spec__ is not set'b'Return the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + If the name is for submodule (contains a dot), the parent module is + automatically imported. + + The name and package arguments work the same as importlib.import_module(). + In other words, relative module names (with leading dots) work. + + 'u'Return the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + If the name is for submodule (contains a dot), the parent module is + automatically imported. + + The name and package arguments work the same as importlib.import_module(). + In other words, relative module names (with leading dots) work. + + 'b'__path__ attribute not found on 'u'__path__ attribute not found on 'b' while trying to find 'u' while trying to find 'b'Set __package__ on the returned module. + + This function is deprecated. + + 'u'Set __package__ on the returned module. + + This function is deprecated. + + 'b'The import system now takes care of this automatically.'u'The import system now takes care of this automatically.'b'Set __loader__ on the returned module. + + This function is deprecated. + + 'u'Set __loader__ on the returned module. + + This function is deprecated. + + 'b'Decorator to handle selecting the proper module for loaders. + + The decorated function is passed the module to use instead of the module + name. The module passed in to the function is either from sys.modules if + it already exists or is a new module. If the module is new, then __name__ + is set the first argument to the method, __loader__ is set to self, and + __package__ is set accordingly (if self.is_package() is defined) will be set + before it is passed to the decorated function (if self.is_package() does + not work for the module it will be set post-load). + + If an exception is raised and the decorator created the module it is + subsequently removed from sys.modules. + + The decorator assumes that the decorated function takes the module name as + the second argument. + + 'u'Decorator to handle selecting the proper module for loaders. + + The decorated function is passed the module to use instead of the module + name. The module passed in to the function is either from sys.modules if + it already exists or is a new module. If the module is new, then __name__ + is set the first argument to the method, __loader__ is set to self, and + __package__ is set accordingly (if self.is_package() is defined) will be set + before it is passed to the decorated function (if self.is_package() does + not work for the module it will be set post-load). + + If an exception is raised and the decorator created the module it is + subsequently removed from sys.modules. + + The decorator assumes that the decorated function takes the module name as + the second argument. + + 'b'A subclass of the module type which triggers loading upon attribute access.'u'A subclass of the module type which triggers loading upon attribute access.'b'Trigger the load of the module and return the attribute.'u'Trigger the load of the module and return the attribute.'b'module object for 'u'module object for 'b' substituted in sys.modules during a lazy load'u' substituted in sys.modules during a lazy load'b'Trigger the load and then perform the deletion.'u'Trigger the load and then perform the deletion.'b'A loader that creates a module which defers loading until attribute access.'u'A loader that creates a module which defers loading until attribute access.'b'loader must define exec_module()'u'loader must define exec_module()'b'Construct a callable which returns the eager loader made lazy.'u'Construct a callable which returns the eager loader made lazy.'b'Make the module load lazily.'u'Make the module load lazily.'u'importlib.util'Various utility functions._MAX_LENGTH_PLACEHOLDER_LEN_MIN_BEGIN_LEN_MIN_END_LEN_MIN_COMMON_LEN_MIN_DIFF_LEN_shortenprefixlensuffixlen%s[%d chars]%scommon_len [truncated]...sorted_list_differenceFinds elements in only one or the other of two, sorted input lists. + + Returns a two-element tuple of lists. The first list contains those + elements in the "expected" list but not in the "actual" list, and the + second contains those elements in the "actual" list but not in the + "expected" list. Duplicate elements in either input list are ignored. + unexpectedunorderable_list_differenceSame behavior as sorted_list_difference but + for lists of unorderable items (like dicts). + + As it does a linear search per item (remove) it + has O(n*n) performance.Return -1 if x < y, 0 if x == y and 1 if x > yMismatchactual expected value_MismatchReturns list of (cnt_act, cnt_exp, elem) triples where the counts differcnt_scnt_tother_elem# anything left in actual is unexpected# elements need not be hashable# elements must be hashableb'Various utility functions.'u'Various utility functions.'b'%s[%d chars]%s'u'%s[%d chars]%s'b' [truncated]...'u' [truncated]...'b'Finds elements in only one or the other of two, sorted input lists. + + Returns a two-element tuple of lists. The first list contains those + elements in the "expected" list but not in the "actual" list, and the + second contains those elements in the "actual" list but not in the + "expected" list. Duplicate elements in either input list are ignored. + 'u'Finds elements in only one or the other of two, sorted input lists. + + Returns a two-element tuple of lists. The first list contains those + elements in the "expected" list but not in the "actual" list, and the + second contains those elements in the "actual" list but not in the + "expected" list. Duplicate elements in either input list are ignored. + 'b'Same behavior as sorted_list_difference but + for lists of unorderable items (like dicts). + + As it does a linear search per item (remove) it + has O(n*n) performance.'u'Same behavior as sorted_list_difference but + for lists of unorderable items (like dicts). + + As it does a linear search per item (remove) it + has O(n*n) performance.'b'Return -1 if x < y, 0 if x == y and 1 if x > y'u'Return -1 if x < y, 0 if x == y and 1 if x > y'b'Mismatch'u'Mismatch'b'actual expected value'u'actual expected value'b'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'u'Returns list of (cnt_act, cnt_exp, elem) triples where the counts differ'u'unittest.util'ForkAwareThreadLockForkAwareLocalclose_all_fds_exceptLOGGER_NAME[%(levelname)s/%(processName)s] %(message)sDEFAULT_LOGGING_FORMAT + Returns logger used by multiprocessing + _exithandlers + Turn on logging and add a handler which prints to stderr + _platform_supports_abstract_socketsaddress type of {address!r} unrecognizedabstract_sockets_supported_remove_temp_dirpymp-created temp directory %s_afterfork_registry_afterfork_counterafter forker raised exception %s_finalizer_counter + Class which supports object finalization using weakrefs + Exitpriority ({0!r}) must be None or int, not {1!s}Without object, exitpriority cannot be None + Run the callback unless it has already been called or cancelled + finalizer ignored because different processfinalizer calling %s with args %s and kwargs %sfinalizer no longer registered + Cancel finalization of the object + still_active + Return whether this finalizer is still waiting to invoke callback + <%s object, dead><%s object, callback=%s, args=, kwargs=, exitpriority=_run_finalizersminpriority + Run all finalizers whose exit priority is not None and at least minpriority + + Finalizers with highest priority are called first; finalizers with + the same priority will be called in reverse order of creation. + calling %s + Returns true if the process is shutting down + _exitingprocess shutting downrunning all "atexit" finalizers with priority >= 0calling terminate() for daemon %scalling join() for process %srunning the remaining "atexit" finalizers_resetfd too largepassfdsClose each file descriptor given as an argument_cleanup_testsCleanup multiprocessing resources when multiprocessing tests + completed.# Module providing various facilities to other parts of the package# multiprocessing/util.py# we want threading to install it's# cleanup function before multiprocessing does# Logging# XXX multiprocessing should cleanup before logging# Abstract socket support# Function returning a temp directory which will be removed on exit# current_process() can be None if the finalizer is called# late during Python finalization# get name of a temp directory which will be automatically cleaned up# keep a strong reference to shutil.rmtree(), since the finalizer# can be called late during Python shutdown# Support for reinitialization of objects when bootstrapping a child process# Finalization using weakrefs# Need to bind these locally because the globals can have# been cleared at shutdown# This function may be called after this module's globals are# destroyed. See the _exit_function function in this module for more# notes.# Careful: _finalizer_registry may be mutated while this function# is running (either by a GC run or by another thread).# list(_finalizer_registry) should be atomic, while# list(_finalizer_registry.items()) is not.# key may have been removed from the registry# Clean up on exit# We hold on to references to functions in the arglist due to the# situation described below, where this function is called after this# module's globals are destroyed.# We check if the current process is None here because if# it's None, any call to ``active_children()`` will raise# an AttributeError (active_children winds up trying to# get attributes from util._current_process). One# situation where this can happen is if someone has# manipulated sys.modules, causing this module to be# garbage collected. The destructor for the module type# then replaces all values in the module dict with None.# For instance, after setuptools runs a test it replaces# sys.modules with a copy created earlier. See issues# #9775 and #15881. Also related: #4106, #9205, and# #9207.# Some fork aware types# Close fds except those specified# Close sys.stdin and replace stdin with os.devnull# Flush standard streams, if any# Start a program with only specified fds kept open# cleanup multiprocessing# Stop the ForkServer process if it's running# Stop the ResourceTracker process if it's running# bpo-37421: Explicitly call _run_finalizers() to remove immediately# temporary directories created by multiprocessing.util.get_temp_dir().b'sub_debug'u'sub_debug'b'sub_warning'u'sub_warning'b'get_logger'u'get_logger'b'get_temp_dir'u'get_temp_dir'b'register_after_fork'u'register_after_fork'b'is_exiting'u'is_exiting'b'Finalize'u'Finalize'b'ForkAwareThreadLock'u'ForkAwareThreadLock'b'ForkAwareLocal'u'ForkAwareLocal'b'close_all_fds_except'u'close_all_fds_except'b'SUBDEBUG'u'SUBDEBUG'b'SUBWARNING'u'SUBWARNING'b'[%(levelname)s/%(processName)s] %(message)s'u'[%(levelname)s/%(processName)s] %(message)s'b' + Returns logger used by multiprocessing + 'u' + Returns logger used by multiprocessing + 'b' + Turn on logging and add a handler which prints to stderr + 'u' + Turn on logging and add a handler which prints to stderr + 'b'address type of {address!r} unrecognized'u'address type of {address!r} unrecognized'b'pymp-'u'pymp-'b'created temp directory %s'u'created temp directory %s'b'after forker raised exception %s'u'after forker raised exception %s'b' + Class which supports object finalization using weakrefs + 'u' + Class which supports object finalization using weakrefs + 'b'Exitpriority ({0!r}) must be None or int, not {1!s}'u'Exitpriority ({0!r}) must be None or int, not {1!s}'b'Without object, exitpriority cannot be None'u'Without object, exitpriority cannot be None'b' + Run the callback unless it has already been called or cancelled + 'u' + Run the callback unless it has already been called or cancelled + 'b'finalizer ignored because different process'u'finalizer ignored because different process'b'finalizer calling %s with args %s and kwargs %s'u'finalizer calling %s with args %s and kwargs %s'b'finalizer no longer registered'u'finalizer no longer registered'b' + Cancel finalization of the object + 'u' + Cancel finalization of the object + 'b' + Return whether this finalizer is still waiting to invoke callback + 'u' + Return whether this finalizer is still waiting to invoke callback + 'b'<%s object, dead>'u'<%s object, dead>'b'<%s object, callback=%s'u'<%s object, callback=%s'b', args='u', args='b', kwargs='u', kwargs='b', exitpriority='u', exitpriority='b' + Run all finalizers whose exit priority is not None and at least minpriority + + Finalizers with highest priority are called first; finalizers with + the same priority will be called in reverse order of creation. + 'u' + Run all finalizers whose exit priority is not None and at least minpriority + + Finalizers with highest priority are called first; finalizers with + the same priority will be called in reverse order of creation. + 'b'calling %s'u'calling %s'b' + Returns true if the process is shutting down + 'u' + Returns true if the process is shutting down + 'b'process shutting down'u'process shutting down'b'running all "atexit" finalizers with priority >= 0'u'running all "atexit" finalizers with priority >= 0'b'calling terminate() for daemon %s'u'calling terminate() for daemon %s'b'calling join() for process %s'u'calling join() for process %s'b'running the remaining "atexit" finalizers'u'running the remaining "atexit" finalizers'b'fd too large'u'fd too large'b'Close each file descriptor given as an argument'u'Close each file descriptor given as an argument'b'Cleanup multiprocessing resources when multiprocessing tests + completed.'u'Cleanup multiprocessing resources when multiprocessing tests + completed.'u'multiprocessing.util'distutils.util + +Miscellaneous utility functions -- anything that doesn't fit into +one of the other *util.py modules. +Return a string that identifies the current platform. This is used mainly to + distinguish platform-specific build directories and platform-specific built + distributions. Typically includes the OS name and version and the + architecture (as supplied by 'os.uname()'), although the exact information + included depends on the OS; eg. on Linux, the kernel version isn't + particularly important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + + distutils.sysconfigx86x64armTARGET_TO_PLATVSCMD_ARG_TGT_ARCHReturn 'pathname' as a name that will work on the native filesystem, + i.e. split it on '/' and put it back together again using the current + directory separator. Needed because filenames in the setup script are + always supplied in Unix style, and have to be converted to the local + convention before we can actually use them in the filesystem. Raises + ValueError on non-Unix-ish systems if 'pathname' either starts or + ends with a slash. + path '%s' cannot be absolutepath '%s' cannot end with '/'change_rootReturn 'pathname' with 'new_root' prepended. If 'pathname' is + relative, this is equivalent to "os.path.join(new_root,pathname)". + Otherwise, it requires making 'pathname' relative and then joining the + two, which is tricky on DOS/Windows and Mac OS. + nothing known about platform '%s'_environ_checkedcheck_environEnsure that 'os.environ' has all the environment variables we + guarantee that users can use in config files, command-line options, + etc. Currently this includes: + HOME - user's home directory (Unix only) + PLAT - description of the current platform, including hardware + and OS (see 'get_platform()') + PLATsubst_varsPerform shell/Perl-style variable substitution on 'string'. Every + occurrence of '$' followed by a name is considered a variable, and + variable is substituted by the value found in the 'local_vars' + dictionary, or in 'os.environ' if it's not in 'local_vars'. + 'os.environ' is first checked/augmented to guarantee that it contains + certain values: see 'check_environ()'. Raise ValueError for any + variables not found in either 'local_vars' or 'os.environ'. + _substvar_name\$([a-zA-Z_][a-zA-Z_0-9]*)invalid variable '$%s'grok_environment_error_wordchars_re_squote_re_dquote_re_init_regex[^\\\'\"%s ]*'(?:[^'\\]|\\.)*'"(?:[^"\\]|\\.)*"Split a string up according to Unix shell-like rules for quotes and + backslashes. In short: words are delimited by spaces, as long as those + spaces are not escaped by a backslash, or inside a quoted string. + Single and double quotes are equivalent, and the quote characters can + be backslash-escaped. The backslash is stripped from any two-character + escape sequence, leaving only the escaped character. The quote + characters are stripped from any quoted string. Returns a list of + words. + this can't happen (bad char '%c')bad string (mismatched %s quotes?)Perform some action that affects the outside world (eg. by + writing to the filesystem). Such actions are special because they + are disabled by the 'dry_run' flag. This method takes care of all + that bureaucracy for you; all you have to do is supply the + function to call and an argument tuple for it (to embody the + "external action" being performed), and an optional message to + print. + %s%rstrtoboolConvert a string representation of truth to true (1) or false (0). + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + invalid truth value %rbyte_compilepy_filesdirectByte-compile a collection of Python source files to .pyc + files in a __pycache__ subdirectory. 'py_files' is a list + of files to compile; any files that don't end in ".py" are silently + skipped. 'optimize' must be one of the following: + 0 - don't optimize + 1 - normal optimization (like "python -O") + 2 - extra optimization (like "python -OO") + If 'force' is true, all files are recompiled regardless of + timestamps. + + The source filename encoded in each bytecode file defaults to the + filenames listed in 'py_files'; you can modify these with 'prefix' and + 'basedir'. 'prefix' is a string that will be stripped off of each + source filename, and 'base_dir' is a directory name that will be + prepended (after 'prefix' is stripped). You can supply either or both + (or neither) of 'prefix' and 'base_dir', as you wish. + + If 'dry_run' is true, doesn't actually do anything that would + affect the filesystem. + + Byte-compilation is either done directly in this interpreter process + with the standard py_compile module, or indirectly by writing a + temporary script and executing it. Normally, you should let + 'byte_compile()' figure out to use direct compilation or not (see + the source for details). The 'direct' flag is used by the script + generated in indirect mode; unless you know what you're doing, leave + it set to None. + byte-compiling is disabled.script_fdwriting byte-compilation script '%s'from distutils.util import byte_compile +files = [ + +byte_compile(files, optimize=%r, force=%r, + prefix=%r, base_dir=%r, + verbose=%r, dry_run=0, + direct=1) +removing %spy_compileinvalid prefix: filename %r doesn't start with %rcfile_basebyte-compiling %s to %sskipping byte-compilation of %s to %srfc822_escapeReturn a version of the string escaped for inclusion in an + RFC-822 header, by ensuring there are 8 spaces space after each newline. + run_2to3Invoke 2to3 on a list of Python files. + The files should all come from the build area, as the + modification is done in-place. To reduce the build time, + only files modified since the last invocation of this + function should be passed in the files argument.lib2to3.refactorDistutilsRefactoringToollib2to3.fixescopydir_run_2to3Recursively copy a directory, only copying new and changed files, + running run_2to3 over all newly copied Python modules afterward. + + If you give a template string, it's parsed like a MANIFEST.in. + distutils.filelistfilelistcopiedoutnameMixin2to3Mixin class for commands that run 2to3. + To configure 2to3, setup scripts may either change + the class variables, or inherit from individual commands + to override how 2to3 is invoked.# XXX what about the architecture? NT is Intel or Alpha,# Mac OS is M68k or PPC, etc.# convert_path ()# password database, do nothing# subst_vars ()# Function kept for backward compatibility.# Used to try clever things with EnvironmentErrors,# but nowadays str(exception) produces good messages.# Needed by 'split_quoted()'# This is a nice algorithm for splitting up a single string, since it# doesn't require character-by-character examination. It was a little# bit of a brain-bender to get it working right, though...# unescaped, unquoted whitespace: now# we definitely have a word delimiter# preserve whatever is being escaped;# will become part of the current word# slurp singly-quoted string# slurp doubly-quoted string# split_quoted ()# correct for singleton tuple# Late import to fix a bootstrap issue: _posixsubprocess is built by# setup.py, but setup.py uses distutils.# nothing is done if sys.dont_write_bytecode is True# First, if the caller didn't force us into direct or indirect mode,# figure out which mode we should be in. We take a conservative# approach: choose direct mode *only* if the current interpreter is# in debug mode and optimize is 0. If we're not in debug mode (-O# or -OO), we don't know which level of optimization this# interpreter is running with, so we can't do direct# byte-compilation and be certain that it's the right thing. Thus,# always compile indirectly if the current interpreter is in either# optimize mode, or if either optimization level was requested by# the caller.# "Indirect" byte-compilation: write a temporary script and then# run it with the appropriate flags.# XXX would be nice to write absolute filenames, just for# safety's sake (script should be more robust in the face of# chdir'ing before running it). But this requires abspath'ing# 'prefix' as well, and that breaks the hack in build_lib's# 'byte_compile()' method that carefully tacks on a trailing# slash (os.sep really) to make sure the prefix here is "just# right". This whole prefix business is rather delicate -- the# problem is that it's really a directory, but I'm treating it# as a dumb string, so trailing slashes and so forth matter.#py_files = map(os.path.abspath, py_files)#if prefix:# prefix = os.path.abspath(prefix)# "Direct" byte-compilation: use the py_compile module to compile# right here, right now. Note that the script generated in indirect# mode simply calls 'byte_compile()' in direct mode, a weird sort of# cross-process recursion. Hey, it works!# This lets us be lazy and not filter filenames in# the "install_lib" command.# Terminology from the py_compile module:# cfile - byte-compiled file# dfile - purported source filename (same as 'file' by default)# byte_compile ()# 2to3 support# Make this class local, to delay import of 2to3# provide list of fixers to run;# defaults to all from lib2to3.fixers# options dictionary# list of fixers to invoke even though they are marked as explicitb'distutils.util + +Miscellaneous utility functions -- anything that doesn't fit into +one of the other *util.py modules. +'u'distutils.util + +Miscellaneous utility functions -- anything that doesn't fit into +one of the other *util.py modules. +'b'Return a string that identifies the current platform. This is used mainly to + distinguish platform-specific build directories and platform-specific built + distributions. Typically includes the OS name and version and the + architecture (as supplied by 'os.uname()'), although the exact information + included depends on the OS; eg. on Linux, the kernel version isn't + particularly important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + + 'u'Return a string that identifies the current platform. This is used mainly to + distinguish platform-specific build directories and platform-specific built + distributions. Typically includes the OS name and version and the + architecture (as supplied by 'os.uname()'), although the exact information + included depends on the OS; eg. on Linux, the kernel version isn't + particularly important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + + 'b'x86'u'x86'b'x64'u'x64'b'arm'u'arm'b'VSCMD_ARG_TGT_ARCH'u'VSCMD_ARG_TGT_ARCH'b'Return 'pathname' as a name that will work on the native filesystem, + i.e. split it on '/' and put it back together again using the current + directory separator. Needed because filenames in the setup script are + always supplied in Unix style, and have to be converted to the local + convention before we can actually use them in the filesystem. Raises + ValueError on non-Unix-ish systems if 'pathname' either starts or + ends with a slash. + 'u'Return 'pathname' as a name that will work on the native filesystem, + i.e. split it on '/' and put it back together again using the current + directory separator. Needed because filenames in the setup script are + always supplied in Unix style, and have to be converted to the local + convention before we can actually use them in the filesystem. Raises + ValueError on non-Unix-ish systems if 'pathname' either starts or + ends with a slash. + 'b'path '%s' cannot be absolute'u'path '%s' cannot be absolute'b'path '%s' cannot end with '/''u'path '%s' cannot end with '/''b'Return 'pathname' with 'new_root' prepended. If 'pathname' is + relative, this is equivalent to "os.path.join(new_root,pathname)". + Otherwise, it requires making 'pathname' relative and then joining the + two, which is tricky on DOS/Windows and Mac OS. + 'u'Return 'pathname' with 'new_root' prepended. If 'pathname' is + relative, this is equivalent to "os.path.join(new_root,pathname)". + Otherwise, it requires making 'pathname' relative and then joining the + two, which is tricky on DOS/Windows and Mac OS. + 'b'nothing known about platform '%s''u'nothing known about platform '%s''b'Ensure that 'os.environ' has all the environment variables we + guarantee that users can use in config files, command-line options, + etc. Currently this includes: + HOME - user's home directory (Unix only) + PLAT - description of the current platform, including hardware + and OS (see 'get_platform()') + 'u'Ensure that 'os.environ' has all the environment variables we + guarantee that users can use in config files, command-line options, + etc. Currently this includes: + HOME - user's home directory (Unix only) + PLAT - description of the current platform, including hardware + and OS (see 'get_platform()') + 'b'PLAT'u'PLAT'b'Perform shell/Perl-style variable substitution on 'string'. Every + occurrence of '$' followed by a name is considered a variable, and + variable is substituted by the value found in the 'local_vars' + dictionary, or in 'os.environ' if it's not in 'local_vars'. + 'os.environ' is first checked/augmented to guarantee that it contains + certain values: see 'check_environ()'. Raise ValueError for any + variables not found in either 'local_vars' or 'os.environ'. + 'u'Perform shell/Perl-style variable substitution on 'string'. Every + occurrence of '$' followed by a name is considered a variable, and + variable is substituted by the value found in the 'local_vars' + dictionary, or in 'os.environ' if it's not in 'local_vars'. + 'os.environ' is first checked/augmented to guarantee that it contains + certain values: see 'check_environ()'. Raise ValueError for any + variables not found in either 'local_vars' or 'os.environ'. + 'b'\$([a-zA-Z_][a-zA-Z_0-9]*)'u'\$([a-zA-Z_][a-zA-Z_0-9]*)'b'invalid variable '$%s''u'invalid variable '$%s''b'[^\\\'\"%s ]*'u'[^\\\'\"%s ]*'b''(?:[^'\\]|\\.)*''u''(?:[^'\\]|\\.)*''b'"(?:[^"\\]|\\.)*"'u'"(?:[^"\\]|\\.)*"'b'Split a string up according to Unix shell-like rules for quotes and + backslashes. In short: words are delimited by spaces, as long as those + spaces are not escaped by a backslash, or inside a quoted string. + Single and double quotes are equivalent, and the quote characters can + be backslash-escaped. The backslash is stripped from any two-character + escape sequence, leaving only the escaped character. The quote + characters are stripped from any quoted string. Returns a list of + words. + 'u'Split a string up according to Unix shell-like rules for quotes and + backslashes. In short: words are delimited by spaces, as long as those + spaces are not escaped by a backslash, or inside a quoted string. + Single and double quotes are equivalent, and the quote characters can + be backslash-escaped. The backslash is stripped from any two-character + escape sequence, leaving only the escaped character. The quote + characters are stripped from any quoted string. Returns a list of + words. + 'b'this can't happen (bad char '%c')'u'this can't happen (bad char '%c')'b'bad string (mismatched %s quotes?)'u'bad string (mismatched %s quotes?)'b'Perform some action that affects the outside world (eg. by + writing to the filesystem). Such actions are special because they + are disabled by the 'dry_run' flag. This method takes care of all + that bureaucracy for you; all you have to do is supply the + function to call and an argument tuple for it (to embody the + "external action" being performed), and an optional message to + print. + 'u'Perform some action that affects the outside world (eg. by + writing to the filesystem). Such actions are special because they + are disabled by the 'dry_run' flag. This method takes care of all + that bureaucracy for you; all you have to do is supply the + function to call and an argument tuple for it (to embody the + "external action" being performed), and an optional message to + print. + 'b'%s%r'u'%s%r'b'Convert a string representation of truth to true (1) or false (0). + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + 'u'Convert a string representation of truth to true (1) or false (0). + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + 'b'off'u'off'b'invalid truth value %r'u'invalid truth value %r'b'Byte-compile a collection of Python source files to .pyc + files in a __pycache__ subdirectory. 'py_files' is a list + of files to compile; any files that don't end in ".py" are silently + skipped. 'optimize' must be one of the following: + 0 - don't optimize + 1 - normal optimization (like "python -O") + 2 - extra optimization (like "python -OO") + If 'force' is true, all files are recompiled regardless of + timestamps. + + The source filename encoded in each bytecode file defaults to the + filenames listed in 'py_files'; you can modify these with 'prefix' and + 'basedir'. 'prefix' is a string that will be stripped off of each + source filename, and 'base_dir' is a directory name that will be + prepended (after 'prefix' is stripped). You can supply either or both + (or neither) of 'prefix' and 'base_dir', as you wish. + + If 'dry_run' is true, doesn't actually do anything that would + affect the filesystem. + + Byte-compilation is either done directly in this interpreter process + with the standard py_compile module, or indirectly by writing a + temporary script and executing it. Normally, you should let + 'byte_compile()' figure out to use direct compilation or not (see + the source for details). The 'direct' flag is used by the script + generated in indirect mode; unless you know what you're doing, leave + it set to None. + 'u'Byte-compile a collection of Python source files to .pyc + files in a __pycache__ subdirectory. 'py_files' is a list + of files to compile; any files that don't end in ".py" are silently + skipped. 'optimize' must be one of the following: + 0 - don't optimize + 1 - normal optimization (like "python -O") + 2 - extra optimization (like "python -OO") + If 'force' is true, all files are recompiled regardless of + timestamps. + + The source filename encoded in each bytecode file defaults to the + filenames listed in 'py_files'; you can modify these with 'prefix' and + 'basedir'. 'prefix' is a string that will be stripped off of each + source filename, and 'base_dir' is a directory name that will be + prepended (after 'prefix' is stripped). You can supply either or both + (or neither) of 'prefix' and 'base_dir', as you wish. + + If 'dry_run' is true, doesn't actually do anything that would + affect the filesystem. + + Byte-compilation is either done directly in this interpreter process + with the standard py_compile module, or indirectly by writing a + temporary script and executing it. Normally, you should let + 'byte_compile()' figure out to use direct compilation or not (see + the source for details). The 'direct' flag is used by the script + generated in indirect mode; unless you know what you're doing, leave + it set to None. + 'b'byte-compiling is disabled.'u'byte-compiling is disabled.'b'writing byte-compilation script '%s''u'writing byte-compilation script '%s''b'from distutils.util import byte_compile +files = [ +'u'from distutils.util import byte_compile +files = [ +'b' +byte_compile(files, optimize=%r, force=%r, + prefix=%r, base_dir=%r, + verbose=%r, dry_run=0, + direct=1) +'u' +byte_compile(files, optimize=%r, force=%r, + prefix=%r, base_dir=%r, + verbose=%r, dry_run=0, + direct=1) +'b'removing %s'u'removing %s'b'invalid prefix: filename %r doesn't start with %r'u'invalid prefix: filename %r doesn't start with %r'b'byte-compiling %s to %s'u'byte-compiling %s to %s'b'skipping byte-compilation of %s to %s'u'skipping byte-compilation of %s to %s'b'Return a version of the string escaped for inclusion in an + RFC-822 header, by ensuring there are 8 spaces space after each newline. + 'u'Return a version of the string escaped for inclusion in an + RFC-822 header, by ensuring there are 8 spaces space after each newline. + 'b'Invoke 2to3 on a list of Python files. + The files should all come from the build area, as the + modification is done in-place. To reduce the build time, + only files modified since the last invocation of this + function should be passed in the files argument.'u'Invoke 2to3 on a list of Python files. + The files should all come from the build area, as the + modification is done in-place. To reduce the build time, + only files modified since the last invocation of this + function should be passed in the files argument.'b'lib2to3.fixes'u'lib2to3.fixes'b'Recursively copy a directory, only copying new and changed files, + running run_2to3 over all newly copied Python modules afterward. + + If you give a template string, it's parsed like a MANIFEST.in. + 'u'Recursively copy a directory, only copying new and changed files, + running run_2to3 over all newly copied Python modules afterward. + + If you give a template string, it's parsed like a MANIFEST.in. + 'b'Mixin class for commands that run 2to3. + To configure 2to3, setup scripts may either change + the class variables, or inherit from individual commands + to override how 2to3 is invoked.'u'Mixin class for commands that run 2to3. + To configure 2to3, setup scripts may either change + the class variables, or inherit from individual commands + to override how 2to3 is invoked.'u'distutils.util'Miscellaneous utilities.decode_rfc2231formataddrformat_datetimegetaddressesmake_msgidparseaddremail._parseaddr_AddressListemail.charsetUEMPTYSTRINGTICK[][\\()<>@,:;".]specialsre[\\"]escapesreReturn True if s contains surrogate-escaped binary data._sanitizeThe inverse of parseaddr(), this takes a 2-tuple of the form + (realname, email_address) and returns the string value suitable + for an RFC 2822 From, To or Cc header. + + If the first element of pair is false, then the second element is + returned unmodified. + + The optional charset is the character set that is used to encode + realname in case realname is not ASCII safe. Can be an instance of str or + a Charset-like object which has a header_encode method. Default is + 'utf-8'. + \\\g<0>%s%s%s <%s>encoded_name%s <%s>fieldvaluesReturn a list of (REALNAME, EMAIL) for each fieldvalue._format_timetuple_and_zone%s, %02d %s %04d %02d:%02d:%02d %stimevalReturns a date string as specified by RFC 2822, e.g.: + + Fri, 09 Nov 2001 01:08:47 -0000 + + Optional timeval if given is a floating point time value as accepted by + gmtime() and localtime(), otherwise the current time is used. + + Optional localtime is a flag that when True, interprets timeval, and + returns a date relative to the local timezone instead of UTC, properly + taking daylight savings time into account. + + Optional argument usegmt means that the timezone is written out as + an ascii string, not numeric one (so "GMT" instead of "+0000"). This + is needed for HTTP, and is only used when localtime==False. + Turn a datetime into a date string as specified in RFC 2822. + + If usegmt is True, dt must be an aware datetime with an offset of zero. In + this case 'GMT' will be rendered instead of the normal +0000 required by + RFC2822. This is to support HTTP headers involving date stamps. + usegmt option requires a UTC datetime-0000%zReturns a string suitable for RFC 2822 compliant Message-ID, e.g: + + <142480216486.20800.16526388040877946887@nightshade.la.mastaler.com> + + Optional idstring if given is a string used to strengthen the + uniqueness of the message id. Optional domain if given provides the + portion of the message id after the '@'. It defaults to the locally + defined hostname. + <%d.%d.%d%s@%s>msgiddtuple + Parse addr into its constituent realname and email address parts. + + Return a tuple of realname and email address, unless the parse fails, in + which case return a 2-tuple of ('', ''). + addrsRemove quotes from a string.Decode string according to RFC 2231Encode string according to RFC 2231. + + If neither charset nor language is given, then s is returned as-is. If + charset is given but not language, the string is encoded using the empty + string for language. + %s'%s'%s^(?P\w+)\*((?P[0-9]+)\*?)?$rfc2231_continuationDecode parameters list according to RFC 2231. + + params is a sequence of 2-tuples containing (param name, string value). + rfc2231_paramscontinuationsfallback_charsetrawbytesisdstReturn local time as an aware datetime object. + + If called without arguments, return current time. Otherwise *dt* + argument should be a datetime instance, and it is converted to the + local time zone according to the system time zone database. If *dt* is + naive (that is, dt.tzinfo is None), it is assumed to be in local time. + In this case, a positive or zero value for *isdst* causes localtime to + presume initially that summer time (for example, Daylight Saving Time) + is or is not (respectively) in effect for the specified time. A + negative value for *isdst* causes the localtime() function to attempt + to divine whether summer time is in effect for the specified time. + + # Copyright (C) 2001-2010 Python Software Foundation# This check is based on the fact that unless there are surrogates, utf8# (Python's default encoding) can encode any string. This is the fastest# way to check for surrogates, see issue 11454 for timings.# How to deal with a string containing bytes before handing it to the# application through the 'normal' interface.# Turn any escaped bytes into unicode 'unknown' char. If the escaped# bytes happen to be utf-8 they will instead get decoded, even if they# were invalid in the charset the source was supposed to be in. This# seems like it is not a bad thing; a defect was still registered.# The address MUST (per RFC) be ascii, so raise a UnicodeError if it isn't.# Note: we cannot use strftime() because that honors the locale and RFC# 2822 requires that day and month names be the English abbreviations.# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.# RFC2231-related functions - parameter encoding and decoding# Copy params so we don't mess with the original# Map parameter's name to a list of continuations. The values are a# 3-tuple of the continuation number, the string value, and a flag# specifying whether a particular segment is %-encoded.# Sort by number# And now append all values in numerical order, converting# %-encodings for the encoded segments. If any of the# continuation names ends in a *, then the entire string, after# decoding segments and concatenating, must have the charset and# language specifiers at the beginning of the string.# Decode as "latin-1", so the characters in s directly# represent the percent-encoded octet values.# collapse_rfc2231_value treats this as an octet sequence.# While value comes to us as a unicode string, we need it to be a bytes# object. We do not want bytes() normal utf-8 decoder, we want a straight# interpretation of the string as character bytes.# Issue 17369: if charset/lang is None, decode_rfc2231 couldn't parse# the value, so use the fallback_charset.# charset is not a known codec.# datetime doesn't provide a localtime function yet, so provide one. Code# adapted from the patch in issue 9527. This may not be perfect, but it is# better than not having it.# We have a naive datetime. Convert to a (localtime) timetuple and pass to# system mktime together with the isdst hint. System mktime will return# seconds since epoch.# Compute UTC offset and compare with the value implied by tm_isdst.# If the values match, use the zone name implied by tm_isdst.b'Miscellaneous utilities.'u'Miscellaneous utilities.'b'collapse_rfc2231_value'u'collapse_rfc2231_value'b'decode_params'u'decode_params'b'decode_rfc2231'u'decode_rfc2231'b'encode_rfc2231'u'encode_rfc2231'b'formataddr'u'formataddr'b'formatdate'u'formatdate'b'format_datetime'u'format_datetime'b'getaddresses'u'getaddresses'b'make_msgid'u'make_msgid'b'parseaddr'u'parseaddr'b'parsedate_to_datetime'u'parsedate_to_datetime'b'[][\\()<>@,:;".]'u'[][\\()<>@,:;".]'b'[\\"]'u'[\\"]'b'Return True if s contains surrogate-escaped binary data.'u'Return True if s contains surrogate-escaped binary data.'b'The inverse of parseaddr(), this takes a 2-tuple of the form + (realname, email_address) and returns the string value suitable + for an RFC 2822 From, To or Cc header. + + If the first element of pair is false, then the second element is + returned unmodified. + + The optional charset is the character set that is used to encode + realname in case realname is not ASCII safe. Can be an instance of str or + a Charset-like object which has a header_encode method. Default is + 'utf-8'. + 'u'The inverse of parseaddr(), this takes a 2-tuple of the form + (realname, email_address) and returns the string value suitable + for an RFC 2822 From, To or Cc header. + + If the first element of pair is false, then the second element is + returned unmodified. + + The optional charset is the character set that is used to encode + realname in case realname is not ASCII safe. Can be an instance of str or + a Charset-like object which has a header_encode method. Default is + 'utf-8'. + 'b'\\\g<0>'u'\\\g<0>'b'%s%s%s <%s>'u'%s%s%s <%s>'b'%s <%s>'u'%s <%s>'b'Return a list of (REALNAME, EMAIL) for each fieldvalue.'u'Return a list of (REALNAME, EMAIL) for each fieldvalue.'b'%s, %02d %s %04d %02d:%02d:%02d %s'u'%s, %02d %s %04d %02d:%02d:%02d %s'b'Returns a date string as specified by RFC 2822, e.g.: + + Fri, 09 Nov 2001 01:08:47 -0000 + + Optional timeval if given is a floating point time value as accepted by + gmtime() and localtime(), otherwise the current time is used. + + Optional localtime is a flag that when True, interprets timeval, and + returns a date relative to the local timezone instead of UTC, properly + taking daylight savings time into account. + + Optional argument usegmt means that the timezone is written out as + an ascii string, not numeric one (so "GMT" instead of "+0000"). This + is needed for HTTP, and is only used when localtime==False. + 'u'Returns a date string as specified by RFC 2822, e.g.: + + Fri, 09 Nov 2001 01:08:47 -0000 + + Optional timeval if given is a floating point time value as accepted by + gmtime() and localtime(), otherwise the current time is used. + + Optional localtime is a flag that when True, interprets timeval, and + returns a date relative to the local timezone instead of UTC, properly + taking daylight savings time into account. + + Optional argument usegmt means that the timezone is written out as + an ascii string, not numeric one (so "GMT" instead of "+0000"). This + is needed for HTTP, and is only used when localtime==False. + 'b'Turn a datetime into a date string as specified in RFC 2822. + + If usegmt is True, dt must be an aware datetime with an offset of zero. In + this case 'GMT' will be rendered instead of the normal +0000 required by + RFC2822. This is to support HTTP headers involving date stamps. + 'u'Turn a datetime into a date string as specified in RFC 2822. + + If usegmt is True, dt must be an aware datetime with an offset of zero. In + this case 'GMT' will be rendered instead of the normal +0000 required by + RFC2822. This is to support HTTP headers involving date stamps. + 'b'usegmt option requires a UTC datetime'u'usegmt option requires a UTC datetime'b'-0000'u'-0000'b'%z'u'%z'b'Returns a string suitable for RFC 2822 compliant Message-ID, e.g: + + <142480216486.20800.16526388040877946887@nightshade.la.mastaler.com> + + Optional idstring if given is a string used to strengthen the + uniqueness of the message id. Optional domain if given provides the + portion of the message id after the '@'. It defaults to the locally + defined hostname. + 'u'Returns a string suitable for RFC 2822 compliant Message-ID, e.g: + + <142480216486.20800.16526388040877946887@nightshade.la.mastaler.com> + + Optional idstring if given is a string used to strengthen the + uniqueness of the message id. Optional domain if given provides the + portion of the message id after the '@'. It defaults to the locally + defined hostname. + 'b'<%d.%d.%d%s@%s>'u'<%d.%d.%d%s@%s>'b' + Parse addr into its constituent realname and email address parts. + + Return a tuple of realname and email address, unless the parse fails, in + which case return a 2-tuple of ('', ''). + 'u' + Parse addr into its constituent realname and email address parts. + + Return a tuple of realname and email address, unless the parse fails, in + which case return a 2-tuple of ('', ''). + 'b'Remove quotes from a string.'u'Remove quotes from a string.'b'Decode string according to RFC 2231'u'Decode string according to RFC 2231'b'Encode string according to RFC 2231. + + If neither charset nor language is given, then s is returned as-is. If + charset is given but not language, the string is encoded using the empty + string for language. + 'u'Encode string according to RFC 2231. + + If neither charset nor language is given, then s is returned as-is. If + charset is given but not language, the string is encoded using the empty + string for language. + 'b'%s'%s'%s'u'%s'%s'%s'b'^(?P\w+)\*((?P[0-9]+)\*?)?$'u'^(?P\w+)\*((?P[0-9]+)\*?)?$'b'Decode parameters list according to RFC 2231. + + params is a sequence of 2-tuples containing (param name, string value). + 'u'Decode parameters list according to RFC 2231. + + params is a sequence of 2-tuples containing (param name, string value). + 'b'Return local time as an aware datetime object. + + If called without arguments, return current time. Otherwise *dt* + argument should be a datetime instance, and it is converted to the + local time zone according to the system time zone database. If *dt* is + naive (that is, dt.tzinfo is None), it is assumed to be in local time. + In this case, a positive or zero value for *isdst* causes localtime to + presume initially that summer time (for example, Daylight Saving Time) + is or is not (respectively) in effect for the specified time. A + negative value for *isdst* causes the localtime() function to attempt + to divine whether summer time is in effect for the specified time. + + 'u'Return local time as an aware datetime object. + + If called without arguments, return current time. Otherwise *dt* + argument should be a datetime instance, and it is converted to the + local time zone according to the system time zone database. If *dt* is + naive (that is, dt.tzinfo is None), it is assumed to be in local time. + In this case, a positive or zero value for *isdst* causes localtime to + presume initially that summer time (for example, Daylight Saving Time) + is or is not (respectively) in effect for the specified time. A + negative value for *isdst* causes the localtime() function to attempt + to divine whether summer time is in effect for the specified time. + + 'u'email.utils'Implementation of the UUencode and UUdecode functions. + +encode(in_file, out_file [,name, mode], *, backtick=False) +decode(in_file [, out_file, mode, quiet]) +backtickUuencode fileopened_filesbegin %o %s +` +end + +end +Decode uuencoded fileNo valid begin line found in input filehdrfields + Cannot overwrite existing file: %sWarning: %s +Truncated input fileuuencode/uudecode main programoptparseusage: %prog [-d] [-t] [input [output]]--decodeDecode (instead of encode)?--textdata is text, encoded format unix-compatible text?: cannot do -t to stdout: cannot do -t from stdin# Copyright 1994 by Lance Ellinghouse# Cathedral City, California Republic, United States of America.# All Rights Reserved# supporting documentation, and that the name of Lance Ellinghouse# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT# Modified by Jack Jansen, CWI, July 1995:# - Use binascii module to do the actual line-by-line conversion# between ascii and binary. This results in a 1000-fold speedup. The C# version is still 5 times faster, though.# - Arguments more compliant with python standard# If in_file is a pathname open it and change defaults# Open out_file if it is a pathname# Set defaults for name and mode# Remove newline chars from name# Write the data# Open the input file, if needed.# Read until a begin is encountered or we've exhausted the file# If the filename isn't ASCII, what's up with that?!?# Open the output file# Main decoding loop# Workaround for broken uuencoders by /Fredrik Lundh# Use the binary streams underlying stdin/stdoutb'Implementation of the UUencode and UUdecode functions. + +encode(in_file, out_file [,name, mode], *, backtick=False) +decode(in_file [, out_file, mode, quiet]) +'u'Implementation of the UUencode and UUdecode functions. + +encode(in_file, out_file [,name, mode], *, backtick=False) +decode(in_file [, out_file, mode, quiet]) +'b'Uuencode file'u'Uuencode file'b'begin %o %s +'u'begin %o %s +'b'` +end +'b' +end +'b'Decode uuencoded file'u'Decode uuencoded file'b'No valid begin line found in input file'u'No valid begin line found in input file'b'begin'b' + 'b'Cannot overwrite existing file: %s'u'Cannot overwrite existing file: %s'b'Warning: %s +'u'Warning: %s +'b'Truncated input file'u'Truncated input file'b'uuencode/uudecode main program'u'uuencode/uudecode main program'b'usage: %prog [-d] [-t] [input [output]]'u'usage: %prog [-d] [-t] [input [output]]'b'--decode'u'--decode'b'Decode (instead of encode)?'u'Decode (instead of encode)?'b'--text'u'--text'b'data is text, encoded format unix-compatible text?'u'data is text, encoded format unix-compatible text?'b': cannot do -t to stdout'u': cannot do -t to stdout'b': cannot do -t from stdin'u': cannot do -t from stdin'Python part of the warnings subsystem.resetwarningsHook to write a warning to a file; replace if you like._showwarnmsg_implFunction to format a warning the standard way._formatwarnmsg_impl_formatwarnmsg %s +tracingObject allocated at (most recent call last): + File "%s", lineno %s + %s +: Enable tracemalloc to get the object allocation traceback +': Enable tracemalloc to get the object ''allocation traceback\n'_showwarning_orig_showwarnmsgwarnings.showwarning() must be set to a function or method"warnings.showwarning() must be set to a ""function or method"_formatwarning_origInsert an entry into the list of warnings filters (at the front). + + 'action' -- one of "error", "ignore", "always", "default", "module", + or "once" + 'message' -- a regex that the warning message must match + 'category' -- a class that the warning must be a subclass of + 'module' -- a regex that the module name must match + 'lineno' -- an integer line number, 0 matches all warnings + 'append' -- if true, append to the list of filters + oncemessage must be a stringcategory must be a classcategory must be a Warning subclassmodule must be a stringlineno must be an int >= 0_add_filterInsert a simple entry into the list of warnings filters (at the front). + + A simple filter matches all modules and messages. + 'action' -- one of "error", "ignore", "always", "default", "module", + or "once" + 'category' -- a class that the warning must be a subclass of + 'lineno' -- an integer line number, 0 matches all warnings + 'append' -- if true, append to the list of filters + Clear the list of warning filters, so that no filters are active._OptionErrorException used by option processing helpers._processoptions_setoptionInvalid -W option ignored:too many fields (max 5): %r_getaction_getcategoryinvalid lineno %rinvalid module name: %runknown warning category: %rinvalid warning category: %r_is_internal_frameSignal whether the frame is an internal CPython implementation detail._next_external_frameFind the next frame that doesn't involve CPython internals.Issue a warning, or maybe ignore it or raise an exception.category must be a Warning subclass, not '{:s}'"category must be a Warning subclass, ""not '{:s}'"_filters_versiondefaultactiononcekeyonceregistryaltkeyUnrecognized action (%r) in warnings.filters: + %s_category_name{message : %r, category : %r, filename : %r, lineno : %s, line : %r}"{message : %r, category : %r, filename : %r, lineno : %s, ""line : %r}"A context manager that copies and restores the warnings filter upon + exiting the context. + + The 'record' argument specifies whether warnings should be captured by a + custom implementation of warnings.showwarning() and be appended to a list + returned by the context manager. Otherwise None is returned by the context + manager. The objects appended to the list are arguments whose attributes + mirror the arguments to showwarning(). + + The 'module' argument is to specify an alternative module to the module + named 'warnings' and imported under that name. This argument is only useful + when testing the warnings module itself. + + Specify whether to record warnings and if an alternative module + should be used other than sys.modules['warnings']. + + For compatibility with Python 3.0, please consider all arguments to be + keyword-only. + + _record_module_enteredrecord=Truemodule=%rCannot enter %r twice_filtersCannot exit %r without entering first_warn_unawaited_coroutinecoroutine '' was never awaited +msg_linescr_originCoroutine created at (most recent call last) +_warnings_defaults# sys.stderr is None when run with pythonw.exe:# warnings get lost# the file (probably stderr) is invalid - this warning gets lost.# When a warning is logged during Python shutdown, linecache# and the import machinery don't work anymore# Logging a warning should not raise a new exception:# catch Exception, not only ImportError and RecursionError.# don't suggest to enable tracemalloc if it's not available# When a warning is logged during Python shutdown, tracemalloc# Keep a reference to check if the function was replaced# warnings.showwarning() was replaced# warnings.formatwarning() was replaced# Remove possible duplicate filters, so new one will be placed# in correct place. If append=True and duplicate exists, do nothing.# Helper to process -W options passed via sys.warnoptions# Helper for _processoptions()# Helper for _setoption()# Alias# Code typically replaced by _warnings# Check if message is already a Warning object# Check category argument# Get context information# If frame is too small to care or if the warning originated in# internal code, then do not try to hide any frames.# Look for one frame less since the above line starts us off.# XXX What about leading pathname?# Quick test for common case# Search the filters# Early exit actions# Prime the linecache for formatting, in case the# "file" is actually in a zipfile or something.# Other actions# Unrecognized actions are errors# Print message and context# Reset showwarning() to the default implementation to make sure# that _showwarnmsg() calls _showwarnmsg_impl()# Private utility function called by _PyErr_WarnUnawaitedCoroutine# Passing source= here means that if the user happens to have tracemalloc# enabled and tracking where the coroutine was created, the warning will# contain that traceback. This does mean that if they have *both*# coroutine origin tracking *and* tracemalloc enabled, they'll get two# partially-redundant tracebacks. If we wanted to be clever we could# probably detect this case and avoid it, but for now we don't bother.# filters contains a sequence of filter 5-tuples# The components of the 5-tuple are:# - an action: error, ignore, always, default, module, or once# - a compiled regex that must match the warning message# - a class representing the warning category# - a compiled regex that must match the module that is being warned# - a line number for the line being warning, or 0 to mean any line# If either if the compiled regexs are None, match anything.# Module initialization# Several warning categories are ignored by default in regular buildsb'Python part of the warnings subsystem.'u'Python part of the warnings subsystem.'b'warn_explicit'u'warn_explicit'b'showwarning'u'showwarning'b'formatwarning'u'formatwarning'b'filterwarnings'u'filterwarnings'b'simplefilter'u'simplefilter'b'resetwarnings'u'resetwarnings'b'catch_warnings'u'catch_warnings'b'Hook to write a warning to a file; replace if you like.'u'Hook to write a warning to a file; replace if you like.'b'Function to format a warning the standard way.'u'Function to format a warning the standard way.'b' %s +'u' %s +'b'Object allocated at (most recent call last): +'u'Object allocated at (most recent call last): +'b' File "%s", lineno %s +'u' File "%s", lineno %s +'b' %s +'u' %s +'b': Enable tracemalloc to get the object allocation traceback +'u': Enable tracemalloc to get the object allocation traceback +'b'warnings.showwarning() must be set to a function or method'u'warnings.showwarning() must be set to a function or method'b'Insert an entry into the list of warnings filters (at the front). + + 'action' -- one of "error", "ignore", "always", "default", "module", + or "once" + 'message' -- a regex that the warning message must match + 'category' -- a class that the warning must be a subclass of + 'module' -- a regex that the module name must match + 'lineno' -- an integer line number, 0 matches all warnings + 'append' -- if true, append to the list of filters + 'u'Insert an entry into the list of warnings filters (at the front). + + 'action' -- one of "error", "ignore", "always", "default", "module", + or "once" + 'message' -- a regex that the warning message must match + 'category' -- a class that the warning must be a subclass of + 'module' -- a regex that the module name must match + 'lineno' -- an integer line number, 0 matches all warnings + 'append' -- if true, append to the list of filters + 'b'once'u'once'b'message must be a string'u'message must be a string'b'category must be a class'u'category must be a class'b'category must be a Warning subclass'u'category must be a Warning subclass'b'module must be a string'u'module must be a string'b'lineno must be an int >= 0'u'lineno must be an int >= 0'b'Insert a simple entry into the list of warnings filters (at the front). + + A simple filter matches all modules and messages. + 'action' -- one of "error", "ignore", "always", "default", "module", + or "once" + 'category' -- a class that the warning must be a subclass of + 'lineno' -- an integer line number, 0 matches all warnings + 'append' -- if true, append to the list of filters + 'u'Insert a simple entry into the list of warnings filters (at the front). + + A simple filter matches all modules and messages. + 'action' -- one of "error", "ignore", "always", "default", "module", + or "once" + 'category' -- a class that the warning must be a subclass of + 'lineno' -- an integer line number, 0 matches all warnings + 'append' -- if true, append to the list of filters + 'b'Clear the list of warning filters, so that no filters are active.'u'Clear the list of warning filters, so that no filters are active.'b'Exception used by option processing helpers.'u'Exception used by option processing helpers.'b'Invalid -W option ignored:'u'Invalid -W option ignored:'b'too many fields (max 5): %r'u'too many fields (max 5): %r'b'invalid lineno %r'u'invalid lineno %r'b'invalid module name: %r'u'invalid module name: %r'b'unknown warning category: %r'u'unknown warning category: %r'b'invalid warning category: %r'u'invalid warning category: %r'b'Signal whether the frame is an internal CPython implementation detail.'u'Signal whether the frame is an internal CPython implementation detail.'b'_bootstrap'b'Find the next frame that doesn't involve CPython internals.'u'Find the next frame that doesn't involve CPython internals.'b'Issue a warning, or maybe ignore it or raise an exception.'u'Issue a warning, or maybe ignore it or raise an exception.'b'category must be a Warning subclass, not '{:s}''u'category must be a Warning subclass, not '{:s}''b'Unrecognized action (%r) in warnings.filters: + %s'u'Unrecognized action (%r) in warnings.filters: + %s'b'category'u'category'b'{message : %r, category : %r, filename : %r, lineno : %s, line : %r}'u'{message : %r, category : %r, filename : %r, lineno : %s, line : %r}'b'A context manager that copies and restores the warnings filter upon + exiting the context. + + The 'record' argument specifies whether warnings should be captured by a + custom implementation of warnings.showwarning() and be appended to a list + returned by the context manager. Otherwise None is returned by the context + manager. The objects appended to the list are arguments whose attributes + mirror the arguments to showwarning(). + + The 'module' argument is to specify an alternative module to the module + named 'warnings' and imported under that name. This argument is only useful + when testing the warnings module itself. + + 'u'A context manager that copies and restores the warnings filter upon + exiting the context. + + The 'record' argument specifies whether warnings should be captured by a + custom implementation of warnings.showwarning() and be appended to a list + returned by the context manager. Otherwise None is returned by the context + manager. The objects appended to the list are arguments whose attributes + mirror the arguments to showwarning(). + + The 'module' argument is to specify an alternative module to the module + named 'warnings' and imported under that name. This argument is only useful + when testing the warnings module itself. + + 'b'Specify whether to record warnings and if an alternative module + should be used other than sys.modules['warnings']. + + For compatibility with Python 3.0, please consider all arguments to be + keyword-only. + + 'u'Specify whether to record warnings and if an alternative module + should be used other than sys.modules['warnings']. + + For compatibility with Python 3.0, please consider all arguments to be + keyword-only. + + 'b'record=True'u'record=True'b'module=%r'u'module=%r'b'Cannot enter %r twice'u'Cannot enter %r twice'b'Cannot exit %r without entering first'u'Cannot exit %r without entering first'b'coroutine ''u'coroutine ''b'' was never awaited +'u'' was never awaited +'b'Coroutine created at (most recent call last) +'u'Coroutine created at (most recent call last) +'Weak reference support for Python. + +This module is an implementation of PEP 205: + +http://www.python.org/dev/peps/pep-0205/ +ProxyTypesWeakMethod + A custom `weakref.ref` subclass which simulates a weak reference to + a bound method, working around the lifetime problem of bound methods. + _func_ref_meth_type_aliveargument should be a bound method, not {}_cbself_wrMapping class that references values weakly. + + Entries in the dictionary will be discarded when no strong + reference to the value exists anymore + _atomic_removalKeyedRefitervaluerefsReturn an iterator that yields the weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + valuerefsReturn a list of weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + Specialized reference that includes a key corresponding to the value. + + This is used in the WeakValueDictionary to avoid having to create + a function object for each key stored in the mapping. A shared + callback object can use the 'key' attribute of a KeyedRef instead + of getting a reference to the key from an enclosing scope. + + ob Mapping class that references keys weakly. + + Entries in the dictionary will be discarded when there is no + longer a strong reference to the key. This can be used to + associate additional data with an object owned by other parts of + an application without adding attributes to those objects. This + can be especially useful with objects that override attribute + accesses. + _dirty_len_scrub_removalskeyrefsReturn a list of weak references to the keys. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the keys around longer than needed. + + Class for finalization of weakrefable objects + + finalize(obj, func, *args, **kwargs) returns a callable finalizer + object which will be called when obj is garbage collected. The + first time the finalizer is called it evaluates func(*arg, **kwargs) + and returns the result. After this the finalizer is dead, and + calling it just returns None. + + When the program exits any remaining finalizers for which the + atexit attribute is true will be run in reverse order of creation. + By default atexit is true. + _registry_index_iter_dirty_registered_with_atexit_Infodescriptor '__init__' of 'finalize' object needs an argument"descriptor '__init__' of 'finalize' object "finalize expected at least 2 positional arguments, got %d'finalize expected at least 2 positional ''arguments, got %d'Passing 'obj' as keyword argument is deprecated_exitfunc($self, obj, func, /, *args, **kwargs)If alive then mark as dead and return func(*args, **kwargs); + otherwise return NoneIf alive then mark as dead and return (obj, func, args, kwargs); + otherwise return NoneIf alive then return (obj, func, args, kwargs); + otherwise return NonealiveWhether finalizer is aliveWhether finalizer should be called at exit<%s object at %#x; dead><%s object at %#x; for %r at %#x>_select_for_exitreenable_gc# Naming convention: Variables named "wr" are weak reference objects;# they are called this instead of "ref" to avoid name collisions with# the module-global ref() function imported from _weakref.# Import after _weakref to avoid circular import.# The self-weakref trick is needed to avoid creating a reference# cycle.# We inherit the constructor without worrying about the input# dictionary; since it uses our .update() method, we get the right# checks (if the other dictionary is a WeakValueDictionary,# objects are unwrapped on the way out, and we always wrap on the# way in).# Atomic removal is necessary since this function# can be called asynchronously by the GC# We shouldn't encounter any KeyError, because this method should# always be called *before* mutating the dict.# This should only happen# A list of dead weakrefs (keys to be removed)# NOTE: We don't need to call this method before mutating the dict,# because a dead weakref never compares equal to a live weakref,# even if they happened to refer to equal objects.# However, it means keys may already have been removed.# self._pending_removals may still contain keys which were# explicitly removed, we have to scrub them (see issue #21173).# Finalizer objects don't have any state of their own. They are# just used as keys to lookup _Info objects in the registry. This# ensures that they cannot be part of a ref-cycle.# We may register the exit function more than once because# of a thread race, but that is harmless# Return live finalizers marked for exit, oldest first# At shutdown invoke finalizers for which atexit is true.# This is called once all other non-daemonic threads have been# joined.# gc is disabled, so (assuming no daemonic# threads) the following is the only line in# this function which might trigger creation# of a new finalizer# prevent any more finalizers from executing during shutdownb'Weak reference support for Python. + +This module is an implementation of PEP 205: + +http://www.python.org/dev/peps/pep-0205/ +'u'Weak reference support for Python. + +This module is an implementation of PEP 205: + +http://www.python.org/dev/peps/pep-0205/ +'b'ref'u'ref'b'getweakrefcount'u'getweakrefcount'b'getweakrefs'u'getweakrefs'b'WeakKeyDictionary'u'WeakKeyDictionary'b'ReferenceType'u'ReferenceType'b'ProxyType'u'ProxyType'b'CallableProxyType'u'CallableProxyType'b'ProxyTypes'u'ProxyTypes'b'WeakValueDictionary'u'WeakValueDictionary'b'WeakMethod'u'WeakMethod'b'finalize'u'finalize'b' + A custom `weakref.ref` subclass which simulates a weak reference to + a bound method, working around the lifetime problem of bound methods. + 'u' + A custom `weakref.ref` subclass which simulates a weak reference to + a bound method, working around the lifetime problem of bound methods. + 'b'_func_ref'u'_func_ref'b'_meth_type'u'_meth_type'b'_alive'u'_alive'b'argument should be a bound method, not {}'u'argument should be a bound method, not {}'b'Mapping class that references values weakly. + + Entries in the dictionary will be discarded when no strong + reference to the value exists anymore + 'u'Mapping class that references values weakly. + + Entries in the dictionary will be discarded when no strong + reference to the value exists anymore + 'b'Return an iterator that yields the weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + 'u'Return an iterator that yields the weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + 'b'Return a list of weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + 'u'Return a list of weak references to the values. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the values around longer than needed. + + 'b'Specialized reference that includes a key corresponding to the value. + + This is used in the WeakValueDictionary to avoid having to create + a function object for each key stored in the mapping. A shared + callback object can use the 'key' attribute of a KeyedRef instead + of getting a reference to the key from an enclosing scope. + + 'u'Specialized reference that includes a key corresponding to the value. + + This is used in the WeakValueDictionary to avoid having to create + a function object for each key stored in the mapping. A shared + callback object can use the 'key' attribute of a KeyedRef instead + of getting a reference to the key from an enclosing scope. + + 'b' Mapping class that references keys weakly. + + Entries in the dictionary will be discarded when there is no + longer a strong reference to the key. This can be used to + associate additional data with an object owned by other parts of + an application without adding attributes to those objects. This + can be especially useful with objects that override attribute + accesses. + 'u' Mapping class that references keys weakly. + + Entries in the dictionary will be discarded when there is no + longer a strong reference to the key. This can be used to + associate additional data with an object owned by other parts of + an application without adding attributes to those objects. This + can be especially useful with objects that override attribute + accesses. + 'b'Return a list of weak references to the keys. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the keys around longer than needed. + + 'u'Return a list of weak references to the keys. + + The references are not guaranteed to be 'live' at the time + they are used, so the result of calling the references needs + to be checked before being used. This can be used to avoid + creating references that will cause the garbage collector to + keep the keys around longer than needed. + + 'b'Class for finalization of weakrefable objects + + finalize(obj, func, *args, **kwargs) returns a callable finalizer + object which will be called when obj is garbage collected. The + first time the finalizer is called it evaluates func(*arg, **kwargs) + and returns the result. After this the finalizer is dead, and + calling it just returns None. + + When the program exits any remaining finalizers for which the + atexit attribute is true will be run in reverse order of creation. + By default atexit is true. + 'u'Class for finalization of weakrefable objects + + finalize(obj, func, *args, **kwargs) returns a callable finalizer + object which will be called when obj is garbage collected. The + first time the finalizer is called it evaluates func(*arg, **kwargs) + and returns the result. After this the finalizer is dead, and + calling it just returns None. + + When the program exits any remaining finalizers for which the + atexit attribute is true will be run in reverse order of creation. + By default atexit is true. + 'b'weakref'u'weakref'b'kwargs'u'kwargs'b'atexit'b'descriptor '__init__' of 'finalize' object needs an argument'u'descriptor '__init__' of 'finalize' object needs an argument'b'finalize expected at least 2 positional arguments, got %d'u'finalize expected at least 2 positional arguments, got %d'b'Passing 'obj' as keyword argument is deprecated'u'Passing 'obj' as keyword argument is deprecated'b'($self, obj, func, /, *args, **kwargs)'u'($self, obj, func, /, *args, **kwargs)'b'If alive then mark as dead and return func(*args, **kwargs); + otherwise return None'u'If alive then mark as dead and return func(*args, **kwargs); + otherwise return None'b'If alive then mark as dead and return (obj, func, args, kwargs); + otherwise return None'u'If alive then mark as dead and return (obj, func, args, kwargs); + otherwise return None'b'If alive then return (obj, func, args, kwargs); + otherwise return None'u'If alive then return (obj, func, args, kwargs); + otherwise return None'b'Whether finalizer is alive'u'Whether finalizer is alive'b'Whether finalizer should be called at exit'u'Whether finalizer should be called at exit'b'<%s object at %#x; dead>'u'<%s object at %#x; dead>'b'<%s object at %#x; for %r at %#x>'u'<%s object at %#x; for %r at %#x>'Interfaces for launching and remotely controlling Web browsers.open_newopen_new_tab_browsers_tryorder_os_preferred_browserpreferredRegister a browser connector.register_standard_browsersusingReturn a browser launcher instance appropriate for the environment.browserBackgroundBrowserGenericBrowser_synthesizecould not locate runnable browserautoraiseDisplay url using the default browser. + + If possible, open url in a location determined by new. + - 0: the same browser window (the default). + - 1: a new browser window. + - 2: a new browser page ("tab"). + If possible, autoraise raises the window (the default) or not. + Open url in a new window of the default browser. + + If not possible, then open url in the only browser window. + Open url in a new page ("tab") of the default browser. + + If not possible, then the behavior becomes equivalent to open_new(). + Attempt to synthesize a controller based on existing controllers. + + This is useful to create a controller when a user specifies a path to + an entry in the BROWSER environment variable -- we can copy a general + controller to operate using a specific installation of the desired + browser in this way. + + If we can't create a controller in this way, or if there is no + executable for the requested browser, return [None, None]. + + controllerBaseBrowserParent class for all browsers. Do not use directly.Class for all browsers started with a command + and without remote functionality.webbrowser.openClass for all browsers which are to be started in the + background.UnixBrowserParent class for all Unix browsers with remote functionality.raise_optsbackground%actionremote_argsremote_actionremote_action_newwinremote_action_newtab_invokeremoteraise_optinoutBad 'new' parameter to open(); expected 0, 1, or 2, got %sMozillaLauncher class for Mozilla browsers.-new-window-new-tabNetscapeLauncher class for Netscape browser.-noraise-raise-remoteopenURL(%s%action),new-window,new-tabGaleonLauncher class for Galeon/Epiphany browsers.ChromeLauncher class for Google Chrome browser.--new-windowChromiumOperaLauncher class for Opera browser.ElinksLauncher class for Elinks browsers.KonquerorController for the KDE File Manager (kfm, or Konqueror). + + See the output of ``kfmclient --commands`` + for more information on the Konqueror remote-control interface. + newTabopenURLkfmclientkonqueror--silentkfmGrail_find_grail_rc.grail-unixmaybes_remoteLOADNEW LOAD register_X_browsersxdg-openGNOME_DESKTOP_SESSION_IDgvfs-opengnome-openKDE_FULL_SESSIONx-www-browserfirefoxiceweaseliceapeseamonkeymozilla-firefoxmozilla-firebirdfirebirdmozillanetscapegaleonepiphanyskipstonegoogle-chromechromechromiumchromium-browseroperamosaicgrailMacOSXMacOSXOSAScriptsafariwindows-defaultWindowsDefaultPROGRAMFILESC:\Program FilesInternet Explorer\IEXPLORE.EXEiexploreDISPLAYWAYLAND_DISPLAYxdg-settings get default-web-browserraw_resultwww-browserelinkslynxw3mBROWSERuserchoicesstartfileLauncher class for Aqua browsers on Mac OS X + + Optionally specify a browser name on instantiation. Note that this + will not work for Aqua browsers if the user has moved the application + package after installation. + + If no browser is specified, the default browser, as specified in the + Internet System Preferences panel, will be used. + file:open location "%s"%22OmniWebtoWindowtoWindow %dOpenURL "%s"tell application "%s" + activate + %s %s + end tellosascriptosapipe + tell application "%s" + activate + open location "%s" + end + Usage: %s [-n | -t] url + -n: open new window + -t: open new tabntdnew_win# Maintained by Georg Brandl.# Dictionary of available browser controllers# Preference order of available browsers# The preferred browser# Preferred browsers go to the front of the list.# Need to match to the default browser returned by xdg-settings, which# may be of the form e.g. "firefox.desktop".# User gave us a command line, split it into name and args# User gave us a browser name or path.# Please note: the following definition hides a builtin function.# It is recommended one does "import webbrowser" and uses webbrowser.open(url)# instead of "from webbrowser import *".# now attempt to clone to fit the new name:# General parent classes# name should be a list with arguments# In remote_args, %s will be replaced with the requested URL. %action will# be replaced depending on the value of 'new' passed to open.# remote_action is used for new=0 (open). If newwin is not None, it is# used for new=1 (open_new). If newtab is not None, it is used for# new=3 (open_new_tab). After both substitutions are made, any empty# strings in the transformed remote_args list will be removed.# use autoraise argument only for remote invocation# for TTY browsers, we need stdin/out# wait at most five seconds. If the subprocess is not finished, the# remote invocation has (hopefully) started a new instance.# if remote call failed, open() will try direct invocation# remote invocation failed, try straight way# elinks doesn't like its stdout to be redirected -# it uses redirected stdout as a signal to do -dump# XXX Currently I know no way to prevent KFM from opening a new win.# fall through to next variant# kfmclient's return code unfortunately has no meaning as it seems# Should be running now.# There should be a way to maintain a connection to Grail, but the# Grail remote control protocol doesn't really allow that at this# point. It probably never will!# need to PING each one until we find one that's live# no good; attempt to clean it out, but don't fail:# Platform support for Unix# These are the right tests because all these Unix browsers require either# a console terminal or an X display to run.# use xdg-open if around# The default GNOME3 browser# The default GNOME browser# The default KDE browser# The Mozilla browsers# The Netscape and old Mozilla browsers# Konqueror/kfm, the KDE browser.# Gnome's Galeon and Epiphany# Skipstone, another Gtk/Mozilla based browser# Google Chrome/Chromium browsers# Opera, quite popular# Next, Mosaic -- old but still in use.# Grail, the Python browser. Does anybody still use it?# OS X can use below Unix support (but we prefer using the OS X# specific stuff)# First try to use the default Windows browser# Detect some common Windows browsers, fallback to IE# Prefer X browsers if present# Also try console browsers# The Links/elinks browsers # The Lynx browser , # The w3m browser # OK, now that we know what the default preference orders for each# platform are, allow user to override them with the BROWSER variable.# Treat choices in same way as if passed into get() but do register# and prepend to _tryorder# what to do if _tryorder is now empty?# Platform support for Windows# [Error 22] No application is associated with the specified# file for this operation: ''# Platform support for MacOS# Adapted from patch submitted to SourceForge by Steven J. Burr# hack for local urls# new must be 0 or 1# User called open, open_new or get without a browser parameter# opens in default browser# User called get and chose a browser# Include toWindow parameter of OpenURL command for browsers# that support it. 0 == new window; -1 == existing# Open pipe to AppleScript through osascript command# Write script to osascript's stdinb'Interfaces for launching and remotely controlling Web browsers.'u'Interfaces for launching and remotely controlling Web browsers.'b'open_new'u'open_new'b'open_new_tab'u'open_new_tab'b'Register a browser connector.'u'Register a browser connector.'b'Return a browser launcher instance appropriate for the environment.'u'Return a browser launcher instance appropriate for the environment.'b'could not locate runnable browser'u'could not locate runnable browser'b'Display url using the default browser. + + If possible, open url in a location determined by new. + - 0: the same browser window (the default). + - 1: a new browser window. + - 2: a new browser page ("tab"). + If possible, autoraise raises the window (the default) or not. + 'u'Display url using the default browser. + + If possible, open url in a location determined by new. + - 0: the same browser window (the default). + - 1: a new browser window. + - 2: a new browser page ("tab"). + If possible, autoraise raises the window (the default) or not. + 'b'Open url in a new window of the default browser. + + If not possible, then open url in the only browser window. + 'u'Open url in a new window of the default browser. + + If not possible, then open url in the only browser window. + 'b'Open url in a new page ("tab") of the default browser. + + If not possible, then the behavior becomes equivalent to open_new(). + 'u'Open url in a new page ("tab") of the default browser. + + If not possible, then the behavior becomes equivalent to open_new(). + 'b'Attempt to synthesize a controller based on existing controllers. + + This is useful to create a controller when a user specifies a path to + an entry in the BROWSER environment variable -- we can copy a general + controller to operate using a specific installation of the desired + browser in this way. + + If we can't create a controller in this way, or if there is no + executable for the requested browser, return [None, None]. + + 'u'Attempt to synthesize a controller based on existing controllers. + + This is useful to create a controller when a user specifies a path to + an entry in the BROWSER environment variable -- we can copy a general + controller to operate using a specific installation of the desired + browser in this way. + + If we can't create a controller in this way, or if there is no + executable for the requested browser, return [None, None]. + + 'b'Parent class for all browsers. Do not use directly.'u'Parent class for all browsers. Do not use directly.'b'Class for all browsers started with a command + and without remote functionality.'u'Class for all browsers started with a command + and without remote functionality.'b'webbrowser.open'u'webbrowser.open'b'Class for all browsers which are to be started in the + background.'u'Class for all browsers which are to be started in the + background.'b'Parent class for all Unix browsers with remote functionality.'u'Parent class for all Unix browsers with remote functionality.'b'%action'u'%action'b'Bad 'new' parameter to open(); 'u'Bad 'new' parameter to open(); 'b'expected 0, 1, or 2, got %s'u'expected 0, 1, or 2, got %s'b'Launcher class for Mozilla browsers.'u'Launcher class for Mozilla browsers.'b'-new-window'u'-new-window'b'-new-tab'u'-new-tab'b'Launcher class for Netscape browser.'u'Launcher class for Netscape browser.'b'-noraise'u'-noraise'b'-raise'u'-raise'b'-remote'u'-remote'b'openURL(%s%action)'u'openURL(%s%action)'b',new-window'u',new-window'b',new-tab'u',new-tab'b'Launcher class for Galeon/Epiphany browsers.'u'Launcher class for Galeon/Epiphany browsers.'b'Launcher class for Google Chrome browser.'u'Launcher class for Google Chrome browser.'b'--new-window'u'--new-window'b'Launcher class for Opera browser.'u'Launcher class for Opera browser.'b'Launcher class for Elinks browsers.'u'Launcher class for Elinks browsers.'b'Controller for the KDE File Manager (kfm, or Konqueror). + + See the output of ``kfmclient --commands`` + for more information on the Konqueror remote-control interface. + 'u'Controller for the KDE File Manager (kfm, or Konqueror). + + See the output of ``kfmclient --commands`` + for more information on the Konqueror remote-control interface. + 'b'newTab'u'newTab'b'openURL'u'openURL'b'kfmclient'u'kfmclient'b'konqueror'u'konqueror'b'--silent'u'--silent'b'kfm'u'kfm'b'.grail-unix'u'.grail-unix'b'LOADNEW 'u'LOADNEW 'b'LOAD 'u'LOAD 'b'xdg-open'u'xdg-open'b'GNOME_DESKTOP_SESSION_ID'u'GNOME_DESKTOP_SESSION_ID'b'gvfs-open'u'gvfs-open'b'gnome-open'u'gnome-open'b'KDE_FULL_SESSION'u'KDE_FULL_SESSION'b'x-www-browser'u'x-www-browser'b'firefox'u'firefox'b'iceweasel'u'iceweasel'b'iceape'u'iceape'b'seamonkey'u'seamonkey'b'mozilla-firefox'u'mozilla-firefox'b'mozilla-firebird'u'mozilla-firebird'b'firebird'u'firebird'b'mozilla'u'mozilla'b'netscape'u'netscape'b'galeon'u'galeon'b'epiphany'u'epiphany'b'skipstone'u'skipstone'b'google-chrome'u'google-chrome'b'chrome'u'chrome'b'chromium'u'chromium'b'chromium-browser'u'chromium-browser'b'opera'u'opera'b'mosaic'u'mosaic'b'grail'u'grail'b'MacOSX'u'MacOSX'b'safari'u'safari'b'windows-default'u'windows-default'b'PROGRAMFILES'u'PROGRAMFILES'b'C:\Program Files'u'C:\Program Files'b'Internet Explorer\IEXPLORE.EXE'u'Internet Explorer\IEXPLORE.EXE'b'DISPLAY'u'DISPLAY'b'WAYLAND_DISPLAY'u'WAYLAND_DISPLAY'b'xdg-settings get default-web-browser'u'xdg-settings get default-web-browser'b'www-browser'u'www-browser'b'links'u'links'b'elinks'u'elinks'b'lynx'u'lynx'b'w3m'u'w3m'b'BROWSER'u'BROWSER'b'Launcher class for Aqua browsers on Mac OS X + + Optionally specify a browser name on instantiation. Note that this + will not work for Aqua browsers if the user has moved the application + package after installation. + + If no browser is specified, the default browser, as specified in the + Internet System Preferences panel, will be used. + 'u'Launcher class for Aqua browsers on Mac OS X + + Optionally specify a browser name on instantiation. Note that this + will not work for Aqua browsers if the user has moved the application + package after installation. + + If no browser is specified, the default browser, as specified in the + Internet System Preferences panel, will be used. + 'b'file:'u'file:'b'open location "%s"'u'open location "%s"'b'%22'u'%22'b'OmniWeb'u'OmniWeb'b'toWindow %d'u'toWindow %d'b'OpenURL "%s"'u'OpenURL "%s"'b'tell application "%s" + activate + %s %s + end tell'u'tell application "%s" + activate + %s %s + end tell'b'osascript'u'osascript'b' + tell application "%s" + activate + open location "%s" + end + 'u' + tell application "%s" + activate + open location "%s" + end + 'b'Usage: %s [-n | -t] url + -n: open new window + -t: open new tab'u'Usage: %s [-n | -t] url + -n: open new window + -t: open new tab'b'ntd'u'ntd'u'webbrowser'Selector and proactor event loops for Windows._overlappedproactor_eventswindows_utilsProactorEventLoopIocpProactorWindowsSelectorEventLoopPolicyWindowsProactorEventLoopPolicy1225ERROR_CONNECTION_REFUSED1236ERROR_CONNECTION_ABORTEDCONNECT_PIPE_INIT_DELAY0.100CONNECT_PIPE_MAX_DELAY_OverlappedFutureSubclass of Future which represents an overlapped operation. + + Cancelling it will immediately cancel the overlapped operation. + _ovoverlapped=<_cancel_overlappedCancelling an overlapped future failed_BaseWaitHandleFutureSubclass of Future which represents a wait handle.wait_handle_wait_handle_registeredhandle=waitingwait_handle=_unregister_wait_cb_unregister_waitUnregisterWaitFailed to unregister the wait handle_WaitCancelFutureSubclass of Future which represents a wait for the cancellation of a + _WaitHandleFuture using an event. + _WaitCancelFuture must not be cancelled_WaitHandleFuture_unregister_proactorCreateEvent_event_event_fut_unregisterUnregisterWaitEx_wait_cancelPipeServerClass representing a pipe server. + + This is much like a bound, listening socket. + _free_instances_accept_pipe_future_server_pipe_handle_get_unconnected_pipePipeHandle_WindowsSelectorEventLoopWindows version of selector event loop.Windows version of proactor event loop using IOCP.create_pipe_connectionconnect_pipestart_serving_pipeloop_accept_pipeaccept_pipePipe accept failedAccept pipe failed on pipe %r_WindowsSubprocessTransportProactor implementation using IOCP.concurrencyCreateIoCompletionPortINVALID_HANDLE_VALUE_iocp_unregistered_stopped_servingIocpProactor is closedoverlapped#=%sresult#=%s<%s %s>_register_with_iocpOverlappedWSARecvfinish_recvgetresultWSARecvIntoReadFileIntoWSARecvFromWSASendTofinish_sendWSASend_get_accept_socketAcceptExfinish_accept@PSO_UPDATE_ACCEPT_CONTEXTaccept_coroWSAConnectBindLocalWSAEINVALConnectExfinish_connectSO_UPDATE_CONNECT_CONTEXToffset_lowoffset_highTransmitFilefinish_sendfilefinish_accept_pipeConnectPipewait_for_handleWait for a handle. + + Return a Future object. The result of the future is True if the wait + completed, or False if the wait did not complete (on timeout). + _wait_for_handledone_callback_is_cancelRegisterWaitWithQueuefinish_wait_for_handleUnregister an overlapped object. + + Call this method when its future has been cancelled. The event can + already be signalled (pending in the proactor event queue). It is also + safe if the event is never signalled (because it was cancelled). + negative timeouttimeout too bigGetQueuedCompletionStatustransferredGetQueuedCompletionStatus() returned an unexpected event'GetQueuedCompletionStatus() returned an ''unexpected event'err=%s transferred=%s key=%#x address=%#xCancelling a future failedmsg_updatenext_msg%r is running after closing for %.1f seconds# Initial delay in seconds for connect_pipe() before retrying to connect# Maximum delay in seconds for connect_pipe() before retrying to connect# Keep a reference to the Overlapped object to keep it alive until the# wait is unregistered# Should we call UnregisterWaitEx() if the wait completes# or is cancelled?# non-blocking wait: use a timeout of 0 millisecond# The wait was unregistered: it's not safe to destroy the Overlapped# object# ERROR_IO_PENDING means that the unregister is pending# If the wait was cancelled, the wait may never be signalled, so# it's required to unregister it. Otherwise, IocpProactor.close() will# wait forever for an event which will never come.# If the IocpProactor already received the event, it's safe to call# _unregister() because we kept a reference to the Overlapped object# which is used as a unique key.# ERROR_IO_PENDING is not an error, the wait was unregistered# initialize the pipe attribute before calling _server_pipe_handle()# because this function can raise an exception and the destructor calls# the close() method# Create new instance and return previous one. This ensures# that (until the server is closed) there is always at least# one pipe handle for address. Therefore if a client attempt# to connect it will not fail with FileNotFoundError.# Return a wrapper for a new pipe handle.# Close all instances which have not been connected to by a client.# self_reading_future was just cancelled so if it hasn't been# finished yet, it never will be (it's possible that it has# already finished and its callback is waiting in the queue,# where it could still happen if the event loop is restarted).# Unregister it otherwise IocpProactor.close will wait for it# forever# A client connected before the server was closed:# drop the client (close the pipe) and exit# Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.# Coroutine closing the accept socket if the future is cancelled# WSAConnect will complete immediately for UDP sockets so we don't# need to register any IOCP operation# The socket needs to be locally bound before we call ConnectEx().# Probably already locally bound; check using getsockname().# Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.# ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means# that the pipe is connected. There is no need to wait for the# completion of the connection.# Unfortunately there is no way to do an overlapped connect to# a pipe. Call CreateFile() in a loop until it doesn't fail with# ERROR_PIPE_BUSY.# ConnectPipe() failed with ERROR_PIPE_BUSY: retry later# add_done_callback() cannot be used because the wait may only complete# in IocpProactor.close(), while the event loop is not running.# RegisterWaitForSingleObject() has a resolution of 1 millisecond,# round away from zero to wait *at least* timeout seconds.# We only create ov so we can use ov.address as a key for the cache.# Note that this second wait means that we should only use# this with handles types where a successful wait has no# effect. So events or processes are all right, but locks# or semaphores are not. Also note if the handle is# signalled and then quickly reset, then we may return# False even though we have not timed out.# To get notifications of finished ops on this objects sent to the# completion port, were must register the handle.# XXX We could also use SetFileCompletionNotificationModes()# to avoid sending notifications to completion port of ops# that succeed immediately.# Return a future which will be set with the result of the# operation when it completes. The future's value is actually# the value returned by callback().# The operation has completed, so no need to postpone the# work. We cannot take this short cut if we need the# NumberOfBytes, CompletionKey values returned by# PostQueuedCompletionStatus().# Even if GetOverlappedResult() was called, we have to wait for the# notification of the completion in GetQueuedCompletionStatus().# Register the overlapped operation to keep a reference to the# OVERLAPPED object, otherwise the memory is freed and Windows may# read uninitialized memory.# Register the overlapped operation for later. Note that# we only store obj to prevent it from being garbage# collected too early.# GetQueuedCompletionStatus() has a resolution of 1 millisecond,# key is either zero, or it is used to return a pipe# handle which should be closed to avoid a leak.# Don't call the callback if _register() already read the result or# if the overlapped has been cancelled# Remove unregistered futures# obj is a socket or pipe handle. It will be closed in# BaseProactorEventLoop._stop_serving() which will make any# pending operations fail quickly.# already closed# Cancel remaining registered operations.# Nothing to do with cancelled futures# _WaitCancelFuture must not be cancelled# Wait until all cancelled overlapped complete: don't exit with running# overlapped to prevent a crash. Display progress every second if the# loop is still running.# handle a few events, or timeoutb'Selector and proactor event loops for Windows.'u'Selector and proactor event loops for Windows.'b'ProactorEventLoop'u'ProactorEventLoop'b'IocpProactor'u'IocpProactor'b'WindowsSelectorEventLoopPolicy'u'WindowsSelectorEventLoopPolicy'b'WindowsProactorEventLoopPolicy'u'WindowsProactorEventLoopPolicy'b'Subclass of Future which represents an overlapped operation. + + Cancelling it will immediately cancel the overlapped operation. + 'u'Subclass of Future which represents an overlapped operation. + + Cancelling it will immediately cancel the overlapped operation. + 'b'overlapped=<'u'overlapped=<'b'Cancelling an overlapped future failed'u'Cancelling an overlapped future failed'b'Subclass of Future which represents a wait handle.'u'Subclass of Future which represents a wait handle.'b'handle='u'handle='b'signaled'u'signaled'b'waiting'u'waiting'b'wait_handle='u'wait_handle='b'Failed to unregister the wait handle'u'Failed to unregister the wait handle'b'Subclass of Future which represents a wait for the cancellation of a + _WaitHandleFuture using an event. + 'u'Subclass of Future which represents a wait for the cancellation of a + _WaitHandleFuture using an event. + 'b'_WaitCancelFuture must not be cancelled'u'_WaitCancelFuture must not be cancelled'b'Class representing a pipe server. + + This is much like a bound, listening socket. + 'u'Class representing a pipe server. + + This is much like a bound, listening socket. + 'b'Windows version of selector event loop.'u'Windows version of selector event loop.'b'Windows version of proactor event loop using IOCP.'u'Windows version of proactor event loop using IOCP.'b'addr'u'addr'b'Pipe accept failed'u'Pipe accept failed'b'Accept pipe failed on pipe %r'u'Accept pipe failed on pipe %r'b'Proactor implementation using IOCP.'u'Proactor implementation using IOCP.'b'IocpProactor is closed'u'IocpProactor is closed'b'overlapped#=%s'u'overlapped#=%s'b'result#=%s'u'result#=%s'b'<%s %s>'u'<%s %s>'b'@P'u'@P'b'Wait for a handle. + + Return a Future object. The result of the future is True if the wait + completed, or False if the wait did not complete (on timeout). + 'u'Wait for a handle. + + Return a Future object. The result of the future is True if the wait + completed, or False if the wait did not complete (on timeout). + 'b'Unregister an overlapped object. + + Call this method when its future has been cancelled. The event can + already be signalled (pending in the proactor event queue). It is also + safe if the event is never signalled (because it was cancelled). + 'u'Unregister an overlapped object. + + Call this method when its future has been cancelled. The event can + already be signalled (pending in the proactor event queue). It is also + safe if the event is never signalled (because it was cancelled). + 'b'negative timeout'u'negative timeout'b'timeout too big'u'timeout too big'b'GetQueuedCompletionStatus() returned an unexpected event'u'GetQueuedCompletionStatus() returned an unexpected event'b'err=%s transferred=%s key=%#x address=%#x'u'err=%s transferred=%s key=%#x address=%#x'b'Cancelling a future failed'u'Cancelling a future failed'b'%r is running after closing for %.1f seconds'u'%r is running after closing for %.1f seconds'u'asyncio.windows_events'u'windows_events'Various Windows specific bits and pieces.win32 onlyLike os.pipe() but with overlapped support and using handles not fds.\\.\pipe\python-pipe-{:d}-{:d}-flags_and_attribsWrapper for an overlapped pipe handle which is vaguely file-object like. + + The IOCP event loop can use these instead of socket objects. + I/O operation on closed pipeunclosed Replacement for subprocess.Popen using overlapped pipe handles. + + The stdin, stdout, stderr are None or instances of PipeHandle. + stdin_rfdstdout_wfdstderr_wfdstdin_whstdout_rhstderr_rhstdin_rhstdout_whstderr_wh# Constants/globals# Replacement for os.pipe() using handles instead of fds# Wrapper for a pipe handle# Replacement for subprocess.Popen using overlapped pipe handlesb'Various Windows specific bits and pieces.'u'Various Windows specific bits and pieces.'b'win32 only'u'win32 only'b'PipeHandle'u'PipeHandle'b'Like os.pipe() but with overlapped support and using handles not fds.'u'Like os.pipe() but with overlapped support and using handles not fds.'b'\\.\pipe\python-pipe-{:d}-{:d}-'u'\\.\pipe\python-pipe-{:d}-{:d}-'b'Wrapper for an overlapped pipe handle which is vaguely file-object like. + + The IOCP event loop can use these instead of socket objects. + 'u'Wrapper for an overlapped pipe handle which is vaguely file-object like. + + The IOCP event loop can use these instead of socket objects. + 'b'I/O operation on closed pipe'u'I/O operation on closed pipe'b'unclosed 'u'unclosed 'b'Replacement for subprocess.Popen using overlapped pipe handles. + + The stdin, stdout, stderr are None or instances of PipeHandle. + 'u'Replacement for subprocess.Popen using overlapped pipe handles. + + The stdin, stdout, stderr are None or instances of PipeHandle. + 'b'bufsize'u'bufsize'u'asyncio.windows_utils'u'windows_utils'BYTEWCHARUINTDOUBLEVARIANT_BOOLULONGUSHORTSHORT_LARGE_INTEGERLARGE_INTEGER_ULARGE_INTEGERULARGE_INTEGERLPCOLESTRLPOLESTROLESTRLPCWSTRLPWSTRLPCSTRLPSTRLPCVOIDLPVOIDWPARAMLPARAMATOMLANGIDCOLORREFLGRPIDLCTYPELCIDHANDLEHACCELHBITMAPHBRUSHHCOLORSPACEHDCHDESKHDWPHENHMETAFILEHFONTHGDIOBJHGLOBALHHOOKHICONHINSTANCEHKEYHKLHLOCALHMENUHMETAFILEHMODULEHMONITORHPALETTEHPENHRGNHRSRCHSTRHTASKHWINSTAHWNDSC_HANDLESERVICE_STATUS_HANDLERECTtagRECT_RECTLRECTL_SMALL_RECTLeftTopRightBottomSMALL_RECT_COORDPOINTtagPOINT_POINTLPOINTLSIZEcxtagSIZESIZELRGBredgreenblueFILETIMEdwLowDateTimedwHighDateTime_FILETIMEMSGhWndwParamlParamtagMSG260MAX_PATHWIN32_FIND_DATAAdwFileAttributesftCreationTimeftLastAccessTimeftLastWriteTimenFileSizeHighnFileSizeLowdwReserved0dwReserved1cFileNamecAlternateFileNameWIN32_FIND_DATAWLPBOOLPBOOLPBOOLEANLPBYTEPBYTEPCHARLPCOLORREFLPDWORDPDWORDLPFILETIMEPFILETIMEPFLOATLPHANDLEPHANDLEPHKEYLPHKLLPINTPINTPLARGE_INTEGERPLCIDLPLONGPLONGLPMSGPMSGLPPOINTPPOINTPPOINTLLPRECTPRECTLPRECTLPRECTLLPSC_HANDLEPSHORTLPSIZEPSIZELPSIZELPSIZELPSMALL_RECTLPUINTPUINTPULARGE_INTEGERPULONGPUSHORTPWCHARLPWIN32_FIND_DATAAPWIN32_FIND_DATAALPWIN32_FIND_DATAWPWIN32_FIND_DATAWLPWORDPWORD# The most useful windows datatypes#UCHAR = ctypes.c_uchar# in the windows header files, these are structures.# WPARAM is defined as UINT_PTR (unsigned type)# LPARAM is defined as LONG_PTR (signed type)# HANDLE types# in the header files: void *# Some important structure definitions# Pointer typesb'Left'u'Left'b'Top'u'Top'b'Right'u'Right'b'Bottom'u'Bottom'b'Y'u'Y'b'cx'u'cx'b'dwLowDateTime'u'dwLowDateTime'b'dwHighDateTime'u'dwHighDateTime'b'hWnd'u'hWnd'b'wParam'u'wParam'b'lParam'u'lParam'b'dwFileAttributes'u'dwFileAttributes'b'ftCreationTime'u'ftCreationTime'b'ftLastAccessTime'u'ftLastAccessTime'b'ftLastWriteTime'u'ftLastWriteTime'b'nFileSizeHigh'u'nFileSizeHigh'b'nFileSizeLow'u'nFileSizeLow'b'dwReserved0'u'dwReserved0'b'dwReserved1'u'dwReserved1'b'cFileName'u'cFileName'b'cAlternateFileName'u'cAlternateFileName'u'ctypes.wintypes'u'wintypes' +Read and write ZIP files. + +XXX references to utf-8 need further investigation. +BadZipFileBadZipfileZIP_STOREDZIP_BZIP2ZIP_LZMAZipInfoPyZipFileLargeZipFilePath + Raised when writing a zipfile, the zipfile requires ZIP64 extensions + and those extensions are disabled. + ZIP64_LIMITZIP_FILECOUNT_LIMITZIP_MAX_COMMENTDEFAULT_VERSIONZIP64_VERSIONBZIP2_VERSIONLZMA_VERSIONMAX_EXTRACT_VERSION<4s4H2LHstructEndArchivePKstringEndArchivesizeEndCentDir_ECD_SIGNATURE_ECD_DISK_NUMBER_ECD_DISK_START_ECD_ENTRIES_THIS_DISK_ECD_ENTRIES_TOTAL_ECD_SIZE_ECD_OFFSET_ECD_COMMENT_SIZE_ECD_COMMENT_ECD_LOCATION<4s4B4HL2L5H2LstructCentralDirPKstringCentralDirsizeCentralDir_CD_SIGNATURE_CD_CREATE_VERSION_CD_CREATE_SYSTEM_CD_EXTRACT_VERSION_CD_EXTRACT_SYSTEM_CD_FLAG_BITS_CD_COMPRESS_TYPE_CD_TIME_CD_DATE_CD_CRC_CD_COMPRESSED_SIZE_CD_UNCOMPRESSED_SIZE_CD_FILENAME_LENGTH_CD_EXTRA_FIELD_LENGTH_CD_COMMENT_LENGTH_CD_DISK_NUMBER_START_CD_INTERNAL_FILE_ATTRIBUTES_CD_EXTERNAL_FILE_ATTRIBUTES_CD_LOCAL_HEADER_OFFSET<4s2B4HL2L2HstructFileHeaderPKstringFileHeadersizeFileHeader_FH_SIGNATURE_FH_EXTRACT_VERSION_FH_EXTRACT_SYSTEM_FH_GENERAL_PURPOSE_FLAG_BITS_FH_COMPRESSION_METHOD_FH_LAST_MOD_TIME_FH_LAST_MOD_DATE_FH_CRC_FH_COMPRESSED_SIZE_FH_UNCOMPRESSED_SIZE_FH_FILENAME_LENGTH_FH_EXTRA_FIELD_LENGTH<4sLQLstructEndArchive64LocatorPKstringEndArchive64LocatorsizeEndCentDir64Locator<4sQ2H2L4QstructEndArchive64PKstringEndArchive64sizeEndCentDir64_CD64_SIGNATURE_CD64_DIRECTORY_RECSIZE_CD64_CREATE_VERSION_CD64_EXTRACT_VERSION_CD64_DISK_NUMBER_CD64_DISK_NUMBER_START_CD64_NUMBER_ENTRIES_THIS_DISK_CD64_NUMBER_ENTRIES_TOTAL_CD64_DIRECTORY_SIZE_CD64_OFFSET_START_CENTDIR1346957600x08074b50_DD_SIGNATURE|"?*illegalExtract the ZipInfo object 'member' to a physical + file on the path targetpath. + invalid_path_partsCheck for errors before writing a file to the archive.Duplicate name: %rwrite() requires mode 'w', 'x', or 'a'Attempt to write ZIP archive that was already closedrequires_zip64Files countFilesizeZipfile size would require ZIP64 extensionsPut the bytes from filename into the archive under the name + arcname.Attempt to write to ZIP archive that was already closedCan't write to ZIP archive while an open writing handle existswritestrzinfo_or_arcnameWrite a file into the archive. The contents is 'data', which + may be either a 'str' or a 'bytes' instance; if it is a 'str', + it is encoded as UTF-8 first. + 'zinfo_or_arcname' is either a ZipInfo instance or + the name of the file in the archive.168930o40775Can't write to ZIP archive while an open writing handle exists.Call the "close()" method in case the user forgot.Close the file, and for mode 'w', 'x' and 'a' write the ending + records.Can't close the ZIP file while there is an open writing handle on it. Close the writing handle before closing the zip."Can't close the ZIP file while there is ""an open writing handle on it. ""Close the writing handle before closing the zip."_write_end_recordextra_datapos2centDirCountcentDirSizecentDirOffsetCentral directory offsetCentral directory sizezip64endreczip64locrecClass to create ZIP archives with Python library files and packages.writepyfilterfuncAdd all files from "pathname" to the ZIP archive. + + If pathname is a package directory, search the directory and + all package subdirectories recursively for all *.py and enter + the modules into the archive. If pathname is a plain + directory, listdir *.py and enter all modules. Else, pathname + must be a Python *.py file and the module will be put into the + archive. Added modules are always module.pyc. + This method will compile the module.py into module.pyc if + necessary. + If filterfunc(pathname) is given, it is called with every argument. + When it is False, the file or directory is skipped. + %s %r skipped by filterfuncinitnameAdding package in_get_codenameAddingfile %r skipped by filterfuncAdding files from directoryFiles added with writepy() must end with ".py"Adding fileReturn (filename, archivename) for the path. + + Given a module name path, return the correct file path and + archive name, compiling if necessary. For example, given + /python/lib/string, return (/python/lib/string.pyc, string). + Compilingfile_pyfile_pycpycache_opt0pycache_opt1pycache_opt2invalid value for 'optimize': {!r}archivename_parents + Given a path with elements separated by + posixpath.sep, generate all parents of that path. + + >>> list(_parents('b/d')) + ['b'] + >>> list(_parents('/b/d/')) + ['/b'] + >>> list(_parents('b/d/f/')) + ['b/d', 'b'] + >>> list(_parents('b')) + [] + >>> list(_parents('')) + [] + _ancestry + Given a path with elements separated by + posixpath.sep, generate all elements of that path + + >>> list(_ancestry('b/d')) + ['b/d', 'b'] + >>> list(_ancestry('/b/d/')) + ['/b/d', '/b'] + >>> list(_ancestry('b/d/f/')) + ['b/d/f', 'b/d', 'b'] + >>> list(_ancestry('b')) + ['b'] + >>> list(_ancestry('')) + [] + _dedupeDeduplicate an iterable in original order_differenceminuendsubtrahend + Return items in minuend not in subtrahend, retaining order + with O(1) lookup. + CompleteDirs + A ZipFile subclass that ensures that implied directories + are always included in the namelist. + _implied_dirsas_dirs_name_setresolve_dir + If the name represents a directory, return that name + as a directory (with the trailing slash). + dir_matchmake + Given a source (filename or zipfile), return an + appropriate CompleteDirs subclass. + FastLookup + ZipFile subclass to ensure implicit + dirs exist and are resolved rapidly. + __names__lookup + A pathlib-compatible interface for zip files. + + Consider a zip file with this structure:: + + . + ├── a.txt + └── b + ├── c.txt + └── d + └── e.txt + + >>> data = io.BytesIO() + >>> zf = ZipFile(data, 'w') + >>> zf.writestr('a.txt', 'content of a') + >>> zf.writestr('b/c.txt', 'content of c') + >>> zf.writestr('b/d/e.txt', 'content of e') + >>> zf.filename = 'abcde.zip' + + Path accepts the zipfile object itself or a filename + + >>> root = Path(zf) + + From there, several path operations are available. + + Directory iteration (including the zip file itself): + + >>> a, b = root.iterdir() + >>> a + Path('abcde.zip', 'a.txt') + >>> b + Path('abcde.zip', 'b/') + + name property: + + >>> b.name + 'b' + + join with divide operator: + + >>> c = b / 'c.txt' + >>> c + Path('abcde.zip', 'b/c.txt') + >>> c.name + 'c.txt' + + Read text: + + >>> c.read_text() + 'content of c' + + existence: + + >>> c.exists() + True + >>> (b / 'missing.txt').exists() + False + + Coercion to string: + + >>> str(c) + 'abcde.zip/b/c.txt' + {self.__class__.__name__}({self.root.filename!r}, {self.at!r})__repratread_textstrmread_bytes_is_child_nextiterdirCan't listdir a filejoinpathparent_atA simple command-line interface for zipfile module.Show listing of a zipfileExtract zipfile into target dirCreate zipfile from sourcesTest if a zipfile is validbadfileThe following enclosed file is corrupted: {!r}Done testingzip_nameaddToZipzippathnm# We may need its compression method# Pre-3.2 compatibility names# constants for Zip file compression methods# Other ZIP compression methods not supported# we recognize (but not necessarily support) all features up to that version# Below are some formats and associated data for reading/writing headers using# the struct module. The names and structures of headers/records are those used# in the PKWARE description of the ZIP file format:# http://www.pkware.com/documents/casestudies/APPNOTE.TXT# (URL valid as of January 2008)# The "end of central directory" structure, magic number, size, and indices# (section V.I in the format document)# These last two indices are not part of the structure as defined in the# spec, but they are used internally by this module as a convenience# The "central directory" structure, magic number, size, and indices# of entries in the structure (section V.F in the format document)# indexes of entries in the central directory structure# The "local file header" structure, magic number, size, and indices# (section V.A in the format document)# The "Zip64 end of central directory locator" structure, magic number, and size# The "Zip64 end of central directory" record, magic number, size, and indices# (section V.G in the format document)# Remove Extra Fields with specified IDs.# file has correct magic number# If the seek fails, the file is not large enough to contain a ZIP64# end-of-archive record, so just return the end record we were given.# Assume no 'zip64 extensible data'# Update the original endrec using data from the ZIP64 record# Determine file size# Check to see if this is ZIP file with no archive comment (the# "end of central directory" structure should be the last item in the# file if this is the case).# the signature is correct and there's no comment, unpack structure# Append a blank comment and record start offset# Try to read the "Zip64 end of central directory" structure# Either this is not a ZIP file, or it is a ZIP file with an archive# comment. Search the end of the file for the "end of central directory"# record signature. The comment is the last item in the ZIP file and may be# up to 64K long. It is assumed that the "end of central directory" magic# number does not appear in the comment.# found the magic number; attempt to unpack and interpret# Zip file is corrupted.#as claimed by the zip file# Unable to find a valid end of central directory structure# Original file name in archive# Terminate the file name at the first null byte. Null bytes in file# names are used as tricks by viruses in archives.# This is used to ensure paths in generated ZIP files always use# forward slashes as the directory separator, as required by the# ZIP format specification.# Normalized file name# year, month, day, hour, min, sec# Standard values:# Type of compression for the file# Level for the compressor# Comment for each file# ZIP extra data# System which created ZIP archive# Assume everything else is unix-y# Version which created ZIP archive# Version needed to extract archive# Must be zero# ZIP flag bits# Volume number of file header# Internal attributes# External file attributes# Other attributes are set by class ZipFile:# header_offset Byte offset to the file header# CRC CRC-32 of the uncompressed file# compress_size Size of the compressed file# file_size Size of the uncompressed file# Set these to zero because we write them after the file data# File is larger than what fits into a 4 byte integer,# fall back to the ZIP64 extension# Try to decode the extra field.# ZIP64 extension (large files and/or large archives)# Create ZipInfo instance to store file information# Unix attributes# MS-DOS directory flag# ZIP encryption uses the CRC32 one-byte primitive for scrambling some# internal keys. We noticed that a direct implementation is faster than# relying on binascii.crc32().# ZIP supports a password-based form of encryption. Even though known# plaintext attacks have been found against it, it is still useful# to be able to get data out of such a file.# Usage:# zd = _ZipDecrypter(mypwd)# plain_bytes = zd(cypher_bytes)# compresslevel is ignored for ZIP_LZMA# Provide the tell method for unseekable stream# Max size supported by decompressor.# Read from compressed files in 4k blocks.# Chunk size to read during seek# compare against the file type from extended local headers# compare against the CRC otherwise# The first 12 bytes in the cypher stream is an encryption header# used to strengthen the algorithm. The first 11 bytes are# completely random, while the 12th contains the MSB of the CRC,# or the MSB of the file time depending on the header type# and is used to check the correctness of the password.# Shortcut common case - newline found in buffer.# Return up to 512 bytes to reduce allocation overhead for tight loops.# Update the CRC using the given data.# No need to compute the CRC if we don't have a reference value# Check the CRC if we're at the end of the file# Read up to n compressed bytes with at most one read() system call,# decrypt and decompress them.# Read from file.## Handle unconsumed data.# Seek from start of file# Seek from current position# Seek from EOF# Just move the _offset index if the new position is in the _readbuffer# Position is before the current position. Reset the ZipExtFile# Flush any data from the compressor, and update header info# Write updated header info# Write CRC and file sizes after the file data# Seek backwards and write file header (which will now include# correct CRC and file sizes)# Preserve current position in file# Successfully written: Add file to our caches# Level of printing: 0 through 3# Find file info given name# List of ZipInfo instances for archive# Method of compression# Check if we were passed a file-like object# No, it's a filename# set the modified flag so central directory gets written# even if no files are added to the archive# Some file-like objects can provide tell() but not seek()# See if file is a zip file# seek to start of directory and overwrite# file is not a zip file, just append# bytes in central directory# offset of central directory# archive comment# "concat" is zero, unless zip was concatenated to another file# If Zip64 extension structures are present, account for them# self.start_dir: Position of start of central directory# UTF-8 file names extension# Historical ZIP filename encoding# Convert date/time code to (year, month, day, hour, min, sec)# update total bytes read from central directory# Read by chunks, to avoid an OverflowError or a# MemoryError with very large embedded files.# Check CRC-32# check for valid comment length# Make sure we have an info object# 'name' is already an info object# Get info object for name# Open for reading:# Skip the file header:# Zip 2.7: compressed patched data# strong encryption# UTF-8 filename# check for encrypted flag & handle password# Sizes and CRC are overwritten with correct data after processing the file# Compressed data includes an end-of-stream (EOS) marker# permissions: ?rw-------# Compressed size can be larger than uncompressed size# remove trailing dots# rejoin, removing empty parts.# build the destination pathname, replacing# interpret absolute pathname as relative, remove drive letter or# UNC path, redundant separators, "." and ".." components.# filter illegal characters on Windows# Create all upper directories if necessary.# Start of header bytes# drwxrwxr-x# ?rw-------# Uncompressed size# write ending records# write central directory# Append a ZIP64 field to the extra's# Write end-of-zip-archive record# Need to write the ZIP64 end-of-archive records# This is a package directory, add it# Add all *.py files and package subdirectories# Recursive call# This is NOT a package directory, add its files at top level# legacy mode: use whatever file is present# Use .pyc file.# Use the __pycache__/*.pyc file, but write it to the legacy pyc# file name in the archive.# Compile py into PEP 3147 pyc file.# new mode: use given optimization level# Only allow for FastPath when supplied zipfile is read-only# else: ignoreb' +Read and write ZIP files. + +XXX references to utf-8 need further investigation. +'u' +Read and write ZIP files. + +XXX references to utf-8 need further investigation. +'b'BadZipFile'u'BadZipFile'b'BadZipfile'u'BadZipfile'b'ZIP_STORED'u'ZIP_STORED'b'ZIP_DEFLATED'u'ZIP_DEFLATED'b'ZIP_BZIP2'u'ZIP_BZIP2'b'ZIP_LZMA'u'ZIP_LZMA'b'is_zipfile'u'is_zipfile'b'ZipInfo'u'ZipInfo'b'ZipFile'u'ZipFile'b'PyZipFile'u'PyZipFile'b'LargeZipFile'u'LargeZipFile'b'Path'u'Path'b' + Raised when writing a zipfile, the zipfile requires ZIP64 extensions + and those extensions are disabled. + 'u' + Raised when writing a zipfile, the zipfile requires ZIP64 extensions + and those extensions are disabled. + 'b'<4s4H2LH'b'PK'b'<4s4B4HL2L5H2L'u'<4s4B4HL2L5H2L'b'PK'b'<4s2B4HL2L2H'u'<4s2B4HL2L2H'b'PK'b'<4sLQL'u'<4sLQL'b'PK'b'<4sQ2H2L4Q'u'<4sQ2H2L4Q'b'PK'b'|"?*'u':<>|"?*'b'Extract the ZipInfo object 'member' to a physical + file on the path targetpath. + 'u'Extract the ZipInfo object 'member' to a physical + file on the path targetpath. + 'b'Check for errors before writing a file to the archive.'u'Check for errors before writing a file to the archive.'b'Duplicate name: %r'u'Duplicate name: %r'b'write() requires mode 'w', 'x', or 'a''u'write() requires mode 'w', 'x', or 'a''b'Attempt to write ZIP archive that was already closed'u'Attempt to write ZIP archive that was already closed'b'Files count'u'Files count'b'Filesize'u'Filesize'b'Zipfile size'u'Zipfile size'b' would require ZIP64 extensions'u' would require ZIP64 extensions'b'Put the bytes from filename into the archive under the name + arcname.'u'Put the bytes from filename into the archive under the name + arcname.'b'Attempt to write to ZIP archive that was already closed'u'Attempt to write to ZIP archive that was already closed'b'Can't write to ZIP archive while an open writing handle exists'u'Can't write to ZIP archive while an open writing handle exists'b'Write a file into the archive. The contents is 'data', which + may be either a 'str' or a 'bytes' instance; if it is a 'str', + it is encoded as UTF-8 first. + 'zinfo_or_arcname' is either a ZipInfo instance or + the name of the file in the archive.'u'Write a file into the archive. The contents is 'data', which + may be either a 'str' or a 'bytes' instance; if it is a 'str', + it is encoded as UTF-8 first. + 'zinfo_or_arcname' is either a ZipInfo instance or + the name of the file in the archive.'b'Can't write to ZIP archive while an open writing handle exists.'u'Can't write to ZIP archive while an open writing handle exists.'b'Call the "close()" method in case the user forgot.'u'Call the "close()" method in case the user forgot.'b'Close the file, and for mode 'w', 'x' and 'a' write the ending + records.'u'Close the file, and for mode 'w', 'x' and 'a' write the ending + records.'b'Can't close the ZIP file while there is an open writing handle on it. Close the writing handle before closing the zip.'u'Can't close the ZIP file while there is an open writing handle on it. Close the writing handle before closing the zip.'b'Central directory offset'u'Central directory offset'b'Central directory size'u'Central directory size'b'Class to create ZIP archives with Python library files and packages.'u'Class to create ZIP archives with Python library files and packages.'b'Add all files from "pathname" to the ZIP archive. + + If pathname is a package directory, search the directory and + all package subdirectories recursively for all *.py and enter + the modules into the archive. If pathname is a plain + directory, listdir *.py and enter all modules. Else, pathname + must be a Python *.py file and the module will be put into the + archive. Added modules are always module.pyc. + This method will compile the module.py into module.pyc if + necessary. + If filterfunc(pathname) is given, it is called with every argument. + When it is False, the file or directory is skipped. + 'u'Add all files from "pathname" to the ZIP archive. + + If pathname is a package directory, search the directory and + all package subdirectories recursively for all *.py and enter + the modules into the archive. If pathname is a plain + directory, listdir *.py and enter all modules. Else, pathname + must be a Python *.py file and the module will be put into the + archive. Added modules are always module.pyc. + This method will compile the module.py into module.pyc if + necessary. + If filterfunc(pathname) is given, it is called with every argument. + When it is False, the file or directory is skipped. + 'b'%s %r skipped by filterfunc'u'%s %r skipped by filterfunc'b'Adding package in'u'Adding package in'b'Adding'u'Adding'b'file %r skipped by filterfunc'u'file %r skipped by filterfunc'b'Adding files from directory'u'Adding files from directory'b'Files added with writepy() must end with ".py"'u'Files added with writepy() must end with ".py"'b'Adding file'u'Adding file'b'Return (filename, archivename) for the path. + + Given a module name path, return the correct file path and + archive name, compiling if necessary. For example, given + /python/lib/string, return (/python/lib/string.pyc, string). + 'u'Return (filename, archivename) for the path. + + Given a module name path, return the correct file path and + archive name, compiling if necessary. For example, given + /python/lib/string, return (/python/lib/string.pyc, string). + 'b'Compiling'u'Compiling'b'invalid value for 'optimize': {!r}'u'invalid value for 'optimize': {!r}'b' + Given a path with elements separated by + posixpath.sep, generate all parents of that path. + + >>> list(_parents('b/d')) + ['b'] + >>> list(_parents('/b/d/')) + ['/b'] + >>> list(_parents('b/d/f/')) + ['b/d', 'b'] + >>> list(_parents('b')) + [] + >>> list(_parents('')) + [] + 'u' + Given a path with elements separated by + posixpath.sep, generate all parents of that path. + + >>> list(_parents('b/d')) + ['b'] + >>> list(_parents('/b/d/')) + ['/b'] + >>> list(_parents('b/d/f/')) + ['b/d', 'b'] + >>> list(_parents('b')) + [] + >>> list(_parents('')) + [] + 'b' + Given a path with elements separated by + posixpath.sep, generate all elements of that path + + >>> list(_ancestry('b/d')) + ['b/d', 'b'] + >>> list(_ancestry('/b/d/')) + ['/b/d', '/b'] + >>> list(_ancestry('b/d/f/')) + ['b/d/f', 'b/d', 'b'] + >>> list(_ancestry('b')) + ['b'] + >>> list(_ancestry('')) + [] + 'u' + Given a path with elements separated by + posixpath.sep, generate all elements of that path + + >>> list(_ancestry('b/d')) + ['b/d', 'b'] + >>> list(_ancestry('/b/d/')) + ['/b/d', '/b'] + >>> list(_ancestry('b/d/f/')) + ['b/d/f', 'b/d', 'b'] + >>> list(_ancestry('b')) + ['b'] + >>> list(_ancestry('')) + [] + 'b'Deduplicate an iterable in original order'u'Deduplicate an iterable in original order'b' + Return items in minuend not in subtrahend, retaining order + with O(1) lookup. + 'u' + Return items in minuend not in subtrahend, retaining order + with O(1) lookup. + 'b' + A ZipFile subclass that ensures that implied directories + are always included in the namelist. + 'u' + A ZipFile subclass that ensures that implied directories + are always included in the namelist. + 'b' + If the name represents a directory, return that name + as a directory (with the trailing slash). + 'u' + If the name represents a directory, return that name + as a directory (with the trailing slash). + 'b' + Given a source (filename or zipfile), return an + appropriate CompleteDirs subclass. + 'u' + Given a source (filename or zipfile), return an + appropriate CompleteDirs subclass. + 'b' + ZipFile subclass to ensure implicit + dirs exist and are resolved rapidly. + 'u' + ZipFile subclass to ensure implicit + dirs exist and are resolved rapidly. + 'u' + A pathlib-compatible interface for zip files. + + Consider a zip file with this structure:: + + . + ├── a.txt + └── b + ├── c.txt + └── d + └── e.txt + + >>> data = io.BytesIO() + >>> zf = ZipFile(data, 'w') + >>> zf.writestr('a.txt', 'content of a') + >>> zf.writestr('b/c.txt', 'content of c') + >>> zf.writestr('b/d/e.txt', 'content of e') + >>> zf.filename = 'abcde.zip' + + Path accepts the zipfile object itself or a filename + + >>> root = Path(zf) + + From there, several path operations are available. + + Directory iteration (including the zip file itself): + + >>> a, b = root.iterdir() + >>> a + Path('abcde.zip', 'a.txt') + >>> b + Path('abcde.zip', 'b/') + + name property: + + >>> b.name + 'b' + + join with divide operator: + + >>> c = b / 'c.txt' + >>> c + Path('abcde.zip', 'b/c.txt') + >>> c.name + 'c.txt' + + Read text: + + >>> c.read_text() + 'content of c' + + existence: + + >>> c.exists() + True + >>> (b / 'missing.txt').exists() + False + + Coercion to string: + + >>> str(c) + 'abcde.zip/b/c.txt' + 'b'{self.__class__.__name__}({self.root.filename!r}, {self.at!r})'u'{self.__class__.__name__}({self.root.filename!r}, {self.at!r})'b'Can't listdir a file'u'Can't listdir a file'b'A simple command-line interface for zipfile module.'u'A simple command-line interface for zipfile module.'b''u''b'Show listing of a zipfile'u'Show listing of a zipfile'b'Extract zipfile into target dir'u'Extract zipfile into target dir'b'Create zipfile from sources'u'Create zipfile from sources'b'Test if a zipfile is valid'u'Test if a zipfile is valid'b'The following enclosed file is corrupted: {!r}'u'The following enclosed file is corrupted: {!r}'b'Done testing'u'Done testing'u'zipfile'zipimport provides support for importing Python modules from Zip archives. + +This module exports three objects: +- zipimporter: a class; its constructor takes a path to a Zip archive. +- ZipImportError: exception raised by zipimporter objects. It's a + subclass of ImportError, so it can be caught as ImportError, too. +- _zip_directory_cache: a dict, mapping archive paths to zip directory + info dicts, as used in zipimporter._files. + +It is usually not needed to use the zipimport module explicitly; it is +used by the builtin import mechanism for sys.path items that are paths +to Zip archives. +ZipImportErroralt_path_sep_module_typeEND_CENTRAL_DIR_SIZESTRING_END_ARCHIVEMAX_COMMENT_LENzipimporter(archivepath) -> zipimporter object + + Create a new zipimporter instance. 'archivepath' must be a path to + a zipfile, or to a specific path inside a zipfile. For example, it can be + '/tmp/myimport.zip', or '/tmp/myimport.zip/mydirectory', if mydirectory is a + valid directory inside the archive. + + 'ZipImportError is raised if 'archivepath' doesn't point to a valid Zip + archive. + + The 'archive' attribute of zipimporter objects contains the name of the + zipfile targeted. + archive path is emptynot a Zip file_read_directory_filesfind_loader(fullname, path=None) -> self, str or None. + + Search for a module specified by 'fullname'. 'fullname' must be the + fully qualified (dotted) module name. It returns the zipimporter + instance itself if the module was found, a string containing the + full path name if it's possibly a portion of a namespace package, + or None otherwise. The optional 'path' argument is ignored -- it's + there for compatibility with the importer protocol. + _get_module_info_get_module_pathmodpath_is_dirfind_module(fullname, path=None) -> self or None. + + Search for a module specified by 'fullname'. 'fullname' must be the + fully qualified (dotted) module name. It returns the zipimporter + instance itself if the module was found, or None if it wasn't. + The optional 'path' argument is ignored -- it's there for compatibility + with the importer protocol. + get_code(fullname) -> code object. + + Return the code object for the specified module. Raise ZipImportError + if the module couldn't be found. + _get_module_codeget_data(pathname) -> string with file data. + + Return the data associated with 'pathname'. Raise OSError if + the file wasn't found. + toc_entry_get_dataget_filename(fullname) -> filename string. + + Return the filename for the specified module. + get_source(fullname) -> source string. + + Return the source code for the specified module. Raise ZipImportError + if the module couldn't be found, return None if the archive does + contain the module, but has no source for it. + can't find module is_package(fullname) -> bool. + + Return True if the module specified by fullname is a package. + Raise ZipImportError if the module couldn't be found. + load_module(fullname) -> module. + + Load the module specified by 'fullname'. 'fullname' must be the + fully qualified (dotted) module name. It returns the imported + module, or raises ZipImportError if it wasn't found. + Loaded module not found in sys.modulesimport {} # loaded from Zip {}Return the ResourceReader for a package in a zip file. + + If 'fullname' is a package within the zip file, return the + 'ResourceReader' object for the package. Otherwise return None. + _ZipImportResourceReaderimportlib.abc__init__.pyc_zip_searchorderisbytecodecan't open Zip file: header_positioncan't read Zip file: max_comment_startnot a Zip file: corrupt Zip file: header_sizebad central directory size: bad central directory offset: arc_offsetbad central directory size or offset: EOF read where not expecteddata_sizename_sizeextra_sizecomment_sizefile_offsetbad local header offset: cp437_tablezipimport: found {} names in {!r} +  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞φε∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■ '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f''\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f'' !"#$%&\'()*+,-./''0123456789:;<=>?''@ABCDEFGHIJKLMNO''PQRSTUVWXYZ[\\]^_''`abcdefghijklmno''pqrstuvwxyz{|}~\x7f''\xc7\xfc\xe9\xe2\xe4\xe0\xe5\xe7''\xea\xeb\xe8\xef\xee\xec\xc4\xc5''\xc9\xe6\xc6\xf4\xf6\xf2\xfb\xf9''\xff\xd6\xdc\xa2\xa3\xa5\u20a7\u0192''\xe1\xed\xf3\xfa\xf1\xd1\xaa\xba''\xbf\u2310\xac\xbd\xbc\xa1\xab\xbb''\u2591\u2592\u2593\u2502\u2524\u2561\u2562\u2556''\u2555\u2563\u2551\u2557\u255d\u255c\u255b\u2510''\u2514\u2534\u252c\u251c\u2500\u253c\u255e\u255f''\u255a\u2554\u2569\u2566\u2560\u2550\u256c\u2567''\u2568\u2564\u2565\u2559\u2558\u2552\u2553\u256b''\u256a\u2518\u250c\u2588\u2584\u258c\u2590\u2580''\u03b1\xdf\u0393\u03c0\u03a3\u03c3\xb5\u03c4''\u03a6\u0398\u03a9\u03b4\u221e\u03c6\u03b5\u2229''\u2261\xb1\u2265\u2264\u2320\u2321\xf7\u2248''\xb0\u2219\xb7\u221a\u207f\xb2\u25a0\xa0'_importing_zlib_get_decompress_funczipimport: zlib UNAVAILABLEcan't decompress data; zlib not availablezipimport: zlib availabledatapathnegative data sizebad local file header: raw_datazipimport: can't read data_eq_mtime_unmarshal_code_get_pyc_source_get_mtime_and_size_of_sourcecompiled module is not a code object_normalize_line_endings_compile_source_parse_dostimeuncompressed_sizetrying {}{}{}Private class used to support ZipImport.get_resource_reader(). + + This class is allowed to reference all the innards and private parts of + the zipimporter. + fullname_as_pathpathlibfullname_pathrelative_torelative_pathpackage_pathsubdirs_seenrelative#from importlib import _bootstrap_external#from importlib import _bootstrap # for _verbose_message# for _verbose_message# for check_hash_based_pycs# for open# for loads# for modules# for mktime# _read_directory() cache# Split the "subdirectory" from the Zip archive path, lookup a matching# entry in sys.path_importer_cache, fetch the file directory from there# if found, or else read it from the archive.# On Windows a ValueError is raised for too long paths.# Back up one path element.# it exists# stat.S_ISREG# it's a not file# a prefix directory following the ZIP file path.# Check whether we can satisfy the import of the module named by# 'fullname', or whether it could be a portion of a namespace# package. Return self if we can load it, a string containing the# full path if it's a possible namespace portion, None if we# can't load it.# This is a module or package.# Not a module or regular package. See if this is a directory, and# therefore possibly a portion of a namespace package.# We're only interested in the last path component of fullname# earlier components are recorded in self.prefix.# This is possibly a portion of a namespace# package. Return the string representing its path,# without a trailing separator.# 'fullname'. Return self if we can, None if we can't.# Return a string matching __file__ for the named module# Deciding the filename requires working out where the code# would come from if the module was actually loaded# we have the module, but no source# Return a bool signifying whether the module is a package or not.# Load and return the module named by 'fullname'.# add __path__ to the module *before* the code gets# executed# _zip_searchorder defines how we search for a module in the Zip# archive: we first search for a package __init__, then for# non-package .pyc, and .py entries. The .pyc entries# are swapped by initzipimport() if we run in optimized mode. Also,# '/' is replaced by path_sep there.# Given a module name, return the potential file path in the# archive (without extension).# Does this path represent a directory?# See if this is a "directory". If so, it's eligible to be part# of a namespace package. We test by seeing if the name, with an# appended path separator, exists.# If dirpath is present in self._files, we have a directory.# Return some information about a module.# implementation# _read_directory(archive) -> files dict (new reference)# Given a path to a Zip archive, build a dict, mapping file names# (local to the archive, using SEP as a separator) to toc entries.# A toc_entry is a tuple:# (__file__, # value to use for __file__, available for all files,# # encoded to the filesystem encoding# compress, # compression kind; 0 for uncompressed# data_size, # size of compressed data on disk# file_size, # size of decompressed data# file_offset, # offset of file header from start of archive# time, # mod time of file (in dos format)# date, # mod data of file (in dos format)# crc, # crc checksum of the data# )# Directories can be recognized by the trailing path_sep in the name,# data_size and file_offset are 0.# Bad: End of Central Dir signature# Check if there's a comment.# Start of Central Directory# Start of file header# Bad: Central Dir File Header# On Windows, calling fseek to skip over the fields we don't use is# slower than reading the data because fseek flushes stdio's# internal buffers. See issue #8745.# During bootstrap, we may need to load the encodings# package from a ZIP file. But the cp437 encoding is implemented# in Python in the encodings package.# Break out of this dependency by using the translation table for# the cp437 encoding.# ASCII part, 8 rows x 16 chars# non-ASCII part, 16 rows x 8 chars# Return the zlib.decompress function object, or NULL if zlib couldn't# be imported. The function is cached when found, so subsequent calls# don't import zlib again.# Someone has a zlib.py[co] in their Zip file# let's avoid a stack overflow.# Given a path to a Zip file and a toc_entry, return the (uncompressed) data.# Check to make sure the local file header is correct# Bad: Local File Header# Start of file data# data is not compressed# Decompress with zlib# Lenient date/time comparison function. The precision of the mtime# in the archive is lower than the mtime stored in a .pyc: we# must allow a difference of at most one second.# dostime only stores even seconds, so be lenient# Given the contents of a .py[co] file, unmarshal the data# and return the code object. Return None if it the magic word doesn't# match, or if the recorded .py[co] metadata does not match the source,# (we do this instead of raising an exception as we fall back# to .py if available and we don't want to mask other errors).# We don't use _bootstrap_external._validate_timestamp_pyc# to allow for a more lenient timestamp check.# Replace any occurrences of '\r\n?' in the input string with '\n'.# This converts DOS and Mac line endings to Unix line endings.# Given a string buffer containing Python source code, compile it# and return a code object.# Convert the date/time values found in the Zip archive to a value# that's compatible with the time stamp stored in .pyc files.# bits 9..15: year# bits 5..8: month# bits 0..4: day# bits 11..15: hours# bits 8..10: minutes# bits 0..7: seconds / 2# Given a path to a .pyc file in the archive, return the# modification time of the matching .py file and its size,# or (0, 0) if no source is available.# strip 'c' or 'o' from *.py[co]# fetch the time stamp of the .py file for comparison# with an embedded pyc time stamp# contents of the matching .py file, or None if no source# is available.# Get the code object associated with the module specified by# 'fullname'.# bad magic number or non-matching mtime# in byte code, try next# All resources are in the zip file, so there is no path to the file.# Raising FileNotFoundError tells the higher level API to extract the# binary data and create a temporary file.# Maybe we could do better, but if we can get the data, it's a# resource. Otherwise it isn't.# This is a bit convoluted, because fullname will be a module path,# but _files is a list of file names relative to the top of the# archive's namespace. We want to compare file paths to find all the# names of things inside the module represented by fullname. So we# turn the module path of fullname into a file path relative to the# top of the archive, and then we iterate through _files looking for# names inside that "directory".# Don't forget that fullname names a package, so its path will include# __init__.py, which we want to ignore.# If the path of the file (which is relative to the top of the zip# namespace), relative to the package given when the resource# reader was created, has a parent, then it's a name in a# subdirectory and thus we skip it.b'zipimport provides support for importing Python modules from Zip archives. + +This module exports three objects: +- zipimporter: a class; its constructor takes a path to a Zip archive. +- ZipImportError: exception raised by zipimporter objects. It's a + subclass of ImportError, so it can be caught as ImportError, too. +- _zip_directory_cache: a dict, mapping archive paths to zip directory + info dicts, as used in zipimporter._files. + +It is usually not needed to use the zipimport module explicitly; it is +used by the builtin import mechanism for sys.path items that are paths +to Zip archives. +'u'zipimport provides support for importing Python modules from Zip archives. + +This module exports three objects: +- zipimporter: a class; its constructor takes a path to a Zip archive. +- ZipImportError: exception raised by zipimporter objects. It's a + subclass of ImportError, so it can be caught as ImportError, too. +- _zip_directory_cache: a dict, mapping archive paths to zip directory + info dicts, as used in zipimporter._files. + +It is usually not needed to use the zipimport module explicitly; it is +used by the builtin import mechanism for sys.path items that are paths +to Zip archives. +'b'ZipImportError'u'ZipImportError'b'zipimporter'u'zipimporter'b'zipimporter(archivepath) -> zipimporter object + + Create a new zipimporter instance. 'archivepath' must be a path to + a zipfile, or to a specific path inside a zipfile. For example, it can be + '/tmp/myimport.zip', or '/tmp/myimport.zip/mydirectory', if mydirectory is a + valid directory inside the archive. + + 'ZipImportError is raised if 'archivepath' doesn't point to a valid Zip + archive. + + The 'archive' attribute of zipimporter objects contains the name of the + zipfile targeted. + 'b'archive path is empty'u'archive path is empty'b'not a Zip file'u'not a Zip file'b'find_loader(fullname, path=None) -> self, str or None. + + Search for a module specified by 'fullname'. 'fullname' must be the + fully qualified (dotted) module name. It returns the zipimporter + instance itself if the module was found, a string containing the + full path name if it's possibly a portion of a namespace package, + or None otherwise. The optional 'path' argument is ignored -- it's + there for compatibility with the importer protocol. + 'u'find_loader(fullname, path=None) -> self, str or None. + + Search for a module specified by 'fullname'. 'fullname' must be the + fully qualified (dotted) module name. It returns the zipimporter + instance itself if the module was found, a string containing the + full path name if it's possibly a portion of a namespace package, + or None otherwise. The optional 'path' argument is ignored -- it's + there for compatibility with the importer protocol. + 'b'find_module(fullname, path=None) -> self or None. + + Search for a module specified by 'fullname'. 'fullname' must be the + fully qualified (dotted) module name. It returns the zipimporter + instance itself if the module was found, or None if it wasn't. + The optional 'path' argument is ignored -- it's there for compatibility + with the importer protocol. + 'u'find_module(fullname, path=None) -> self or None. + + Search for a module specified by 'fullname'. 'fullname' must be the + fully qualified (dotted) module name. It returns the zipimporter + instance itself if the module was found, or None if it wasn't. + The optional 'path' argument is ignored -- it's there for compatibility + with the importer protocol. + 'b'get_code(fullname) -> code object. + + Return the code object for the specified module. Raise ZipImportError + if the module couldn't be found. + 'u'get_code(fullname) -> code object. + + Return the code object for the specified module. Raise ZipImportError + if the module couldn't be found. + 'b'get_data(pathname) -> string with file data. + + Return the data associated with 'pathname'. Raise OSError if + the file wasn't found. + 'u'get_data(pathname) -> string with file data. + + Return the data associated with 'pathname'. Raise OSError if + the file wasn't found. + 'b'get_filename(fullname) -> filename string. + + Return the filename for the specified module. + 'u'get_filename(fullname) -> filename string. + + Return the filename for the specified module. + 'b'get_source(fullname) -> source string. + + Return the source code for the specified module. Raise ZipImportError + if the module couldn't be found, return None if the archive does + contain the module, but has no source for it. + 'u'get_source(fullname) -> source string. + + Return the source code for the specified module. Raise ZipImportError + if the module couldn't be found, return None if the archive does + contain the module, but has no source for it. + 'b'can't find module 'u'can't find module 'b'is_package(fullname) -> bool. + + Return True if the module specified by fullname is a package. + Raise ZipImportError if the module couldn't be found. + 'u'is_package(fullname) -> bool. + + Return True if the module specified by fullname is a package. + Raise ZipImportError if the module couldn't be found. + 'b'load_module(fullname) -> module. + + Load the module specified by 'fullname'. 'fullname' must be the + fully qualified (dotted) module name. It returns the imported + module, or raises ZipImportError if it wasn't found. + 'u'load_module(fullname) -> module. + + Load the module specified by 'fullname'. 'fullname' must be the + fully qualified (dotted) module name. It returns the imported + module, or raises ZipImportError if it wasn't found. + 'b'Loaded module 'u'Loaded module 'b' not found in sys.modules'u' not found in sys.modules'b'import {} # loaded from Zip {}'u'import {} # loaded from Zip {}'b'Return the ResourceReader for a package in a zip file. + + If 'fullname' is a package within the zip file, return the + 'ResourceReader' object for the package. Otherwise return None. + 'u'Return the ResourceReader for a package in a zip file. + + If 'fullname' is a package within the zip file, return the + 'ResourceReader' object for the package. Otherwise return None. + 'b''u'">'b'__init__.pyc'u'__init__.pyc'b'can't open Zip file: 'u'can't open Zip file: 'b'can't read Zip file: 'u'can't read Zip file: 'b'not a Zip file: 'u'not a Zip file: 'b'corrupt Zip file: 'u'corrupt Zip file: 'b'bad central directory size: 'u'bad central directory size: 'b'bad central directory offset: 'u'bad central directory offset: 'b'bad central directory size or offset: 'u'bad central directory size or offset: 'b'EOF read where not expected'u'EOF read where not expected'b'bad local header offset: 'u'bad local header offset: 'b'zipimport: found {} names in {!r}'u'zipimport: found {} names in {!r}'u' +  !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜ¢£¥₧ƒáíóúñѪº¿⌐¬½¼¡«»░▒▓│┤╡╢╖╕╣║╗╝╜╛┐└┴┬├─┼╞╟╚╔╩╦╠═╬╧╨╤╥╙╘╒╓╫╪┘┌█▄▌▐▀αßΓπΣσµτΦΘΩδ∞φε∩≡±≥≤⌠⌡÷≈°∙·√ⁿ²■ 'b'zipimport: zlib UNAVAILABLE'u'zipimport: zlib UNAVAILABLE'b'can't decompress data; zlib not available'u'can't decompress data; zlib not available'b'zipimport: zlib available'u'zipimport: zlib available'b'negative data size'u'negative data size'b'bad local file header: 'u'bad local file header: 'b'zipimport: can't read data'u'zipimport: can't read data'b'compiled module 'u'compiled module 'b' is not a code object'u' is not a code object'u'o'b'trying {}{}{}'u'trying {}{}{}'b'Private class used to support ZipImport.get_resource_reader(). + + This class is allowed to reference all the innards and private parts of + the zipimporter. + 'u'Private class used to support ZipImport.get_resource_reader(). + + This class is allowed to reference all the innards and private parts of + the zipimporter. + 'DEF_BUF_SIZEu'1.2.11'ZLIB_RUNTIME_VERSIONZLIB_VERSIONZ_BEST_COMPRESSIONZ_BEST_SPEEDZ_BLOCKZ_DEFAULT_STRATEGYZ_FILTEREDZ_FINISHZ_FIXEDZ_FULL_FLUSHZ_HUFFMAN_ONLYZ_NO_COMPRESSIONZ_NO_FLUSHZ_PARTIAL_FLUSHZ_RLEZ_TREESu'The functions in this module allow compression and decompression using the +zlib library, which is based on GNU zip. + +adler32(string[, start]) -- Compute an Adler-32 checksum. +compress(data[, level]) -- Compress data, with compression level 0-9 or -1. +compressobj([level[, ...]]) -- Return a compressor object. +crc32(string[, start]) -- Compute a CRC-32 checksum. +decompress(string,[wbits],[bufsize]) -- Decompresses a compressed string. +decompressobj([wbits[, zdict]]]) -- Return a decompressor object. + +'wbits' is window buffer size and container format. +Compressor objects support compress() and flush() methods; decompressor +objects support decompress() and flush().'u'/Users/pwntester/.pyenv/versions/3.8.13/lib/python3.8/lib-dynload/zlib.cpython-38-darwin.so'adler32zlib.error�q��Fe$ \ No newline at end of file diff --git a/example/codeql-db/db-python/default/pools/1/buckets/info b/example/codeql-db/db-python/default/pools/1/buckets/info new file mode 100644 index 0000000000000000000000000000000000000000..0111728636533e2c31d7b0489e64f46bcd4d6cf2 Binary files /dev/null and b/example/codeql-db/db-python/default/pools/1/buckets/info differ diff --git a/example/codeql-db/db-python/default/pools/1/buckets/page-000000 b/example/codeql-db/db-python/default/pools/1/buckets/page-000000 new file mode 100644 index 0000000000000000000000000000000000000000..6d17cf9d15fb9f4a2358a2d079f3b8c755d005fa Binary files /dev/null and b/example/codeql-db/db-python/default/pools/1/buckets/page-000000 differ diff --git a/example/codeql-db/db-python/default/pools/1/ids1/info b/example/codeql-db/db-python/default/pools/1/ids1/info new file mode 100644 index 0000000000000000000000000000000000000000..799471fd4d54d409c98d3b7826deaac67913dc99 Binary files /dev/null and b/example/codeql-db/db-python/default/pools/1/ids1/info differ diff --git a/example/codeql-db/db-python/default/pools/1/ids1/page-000000 b/example/codeql-db/db-python/default/pools/1/ids1/page-000000 new file mode 100644 index 0000000000000000000000000000000000000000..6d17cf9d15fb9f4a2358a2d079f3b8c755d005fa Binary files /dev/null and b/example/codeql-db/db-python/default/pools/1/ids1/page-000000 differ diff --git a/example/codeql-db/db-python/default/pools/1/indices1/info b/example/codeql-db/db-python/default/pools/1/indices1/info new file mode 100644 index 0000000000000000000000000000000000000000..799471fd4d54d409c98d3b7826deaac67913dc99 Binary files /dev/null and b/example/codeql-db/db-python/default/pools/1/indices1/info differ diff --git a/example/codeql-db/db-python/default/pools/1/indices1/page-000000 b/example/codeql-db/db-python/default/pools/1/indices1/page-000000 new file mode 100644 index 0000000000000000000000000000000000000000..6d17cf9d15fb9f4a2358a2d079f3b8c755d005fa Binary files /dev/null and b/example/codeql-db/db-python/default/pools/1/indices1/page-000000 differ diff --git a/example/codeql-db/db-python/default/pools/1/info b/example/codeql-db/db-python/default/pools/1/info new file mode 100644 index 0000000000000000000000000000000000000000..580055b18558be4790eb512770689b4aeaca7a20 Binary files /dev/null and b/example/codeql-db/db-python/default/pools/1/info differ diff --git a/example/codeql-db/db-python/default/pools/1/metadata/info b/example/codeql-db/db-python/default/pools/1/metadata/info new file mode 100644 index 0000000000000000000000000000000000000000..9cdb710dfd9490f67f5103cbab69eb12829f96b4 Binary files /dev/null and b/example/codeql-db/db-python/default/pools/1/metadata/info differ diff --git a/example/codeql-db/db-python/default/pools/1/metadata/page-000000 b/example/codeql-db/db-python/default/pools/1/metadata/page-000000 new file mode 100644 index 0000000000000000000000000000000000000000..6d17cf9d15fb9f4a2358a2d079f3b8c755d005fa Binary files /dev/null and b/example/codeql-db/db-python/default/pools/1/metadata/page-000000 differ diff --git a/example/codeql-db/db-python/default/pools/1/pageDump/page-000000000 b/example/codeql-db/db-python/default/pools/1/pageDump/page-000000000 new file mode 100644 index 0000000000000000000000000000000000000000..c6556d51e2eab48a3c18fefaacf8a606cd159f6f --- /dev/null +++ b/example/codeql-db/db-python/default/pools/1/pageDump/page-000000000 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4b4feafeeb1da14d81e10706fa949f5675af06bc9a25bd3f901ce6758ccaddc +size 1048592 diff --git a/example/codeql-db/db-python/default/pools/poolInfo b/example/codeql-db/db-python/default/pools/poolInfo new file mode 100644 index 0000000000000000000000000000000000000000..7313c1b55903de6ea2ab56276c3eff10c54e1189 Binary files /dev/null and b/example/codeql-db/db-python/default/pools/poolInfo differ diff --git a/example/codeql-db/db-python/default/py_Classes.rel b/example/codeql-db/db-python/default/py_Classes.rel new file mode 100644 index 0000000000000000000000000000000000000000..d0db2812ba8f41f968dd7bc96dbd45d5d8f81109 Binary files /dev/null and b/example/codeql-db/db-python/default/py_Classes.rel differ diff --git a/example/codeql-db/db-python/default/py_Classes.rel.checksum b/example/codeql-db/db-python/default/py_Classes.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..aa52f51816f85f5c93da5786d02918996dad5991 Binary files /dev/null and b/example/codeql-db/db-python/default/py_Classes.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_Functions.rel b/example/codeql-db/db-python/default/py_Functions.rel new file mode 100644 index 0000000000000000000000000000000000000000..17efb76b1919ac262ec3053094fc6cbedf6983a3 Binary files /dev/null and b/example/codeql-db/db-python/default/py_Functions.rel differ diff --git a/example/codeql-db/db-python/default/py_Functions.rel.checksum b/example/codeql-db/db-python/default/py_Functions.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..fd3ec1d1cff41bf20c431d3170f0493eb91a5412 Binary files /dev/null and b/example/codeql-db/db-python/default/py_Functions.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_Modules.rel b/example/codeql-db/db-python/default/py_Modules.rel new file mode 100644 index 0000000000000000000000000000000000000000..b50427aec87778c424ec79dbf524cd56557713ff Binary files /dev/null and b/example/codeql-db/db-python/default/py_Modules.rel differ diff --git a/example/codeql-db/db-python/default/py_Modules.rel.checksum b/example/codeql-db/db-python/default/py_Modules.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..146671c455190f368163d3842d8b7e851b2b6d3b Binary files /dev/null and b/example/codeql-db/db-python/default/py_Modules.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_StringPart_lists.rel b/example/codeql-db/db-python/default/py_StringPart_lists.rel new file mode 100644 index 0000000000000000000000000000000000000000..e13c8ab18bef5b7250632dcabbfc34c7f03a315a Binary files /dev/null and b/example/codeql-db/db-python/default/py_StringPart_lists.rel differ diff --git a/example/codeql-db/db-python/default/py_StringPart_lists.rel.checksum b/example/codeql-db/db-python/default/py_StringPart_lists.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..fea0ea02cab707670569d5fc2a9c6802097beb83 Binary files /dev/null and b/example/codeql-db/db-python/default/py_StringPart_lists.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_StringParts.rel b/example/codeql-db/db-python/default/py_StringParts.rel new file mode 100644 index 0000000000000000000000000000000000000000..8658af1d7e9f60cb12279e080dcb3a49e2a7317d Binary files /dev/null and b/example/codeql-db/db-python/default/py_StringParts.rel differ diff --git a/example/codeql-db/db-python/default/py_StringParts.rel.checksum b/example/codeql-db/db-python/default/py_StringParts.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..55c2c9a24a38a9a2048820dce7587fc1c718f255 Binary files /dev/null and b/example/codeql-db/db-python/default/py_StringParts.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_alias_lists.rel b/example/codeql-db/db-python/default/py_alias_lists.rel new file mode 100644 index 0000000000000000000000000000000000000000..0bd73ed7f0071f85676968d2989ae392e1ca2fb0 Binary files /dev/null and b/example/codeql-db/db-python/default/py_alias_lists.rel differ diff --git a/example/codeql-db/db-python/default/py_alias_lists.rel.checksum b/example/codeql-db/db-python/default/py_alias_lists.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..4d8470cc47853f09101a27dfac67d1da09999fa2 Binary files /dev/null and b/example/codeql-db/db-python/default/py_alias_lists.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_aliases.rel b/example/codeql-db/db-python/default/py_aliases.rel new file mode 100644 index 0000000000000000000000000000000000000000..f4913c4e2f0511b4bcf28eaca2e20e5c76b5cf86 Binary files /dev/null and b/example/codeql-db/db-python/default/py_aliases.rel differ diff --git a/example/codeql-db/db-python/default/py_aliases.rel.checksum b/example/codeql-db/db-python/default/py_aliases.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..7be75e811221677b2fce554c1145313474878d7e Binary files /dev/null and b/example/codeql-db/db-python/default/py_aliases.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_alllines.rel b/example/codeql-db/db-python/default/py_alllines.rel new file mode 100644 index 0000000000000000000000000000000000000000..0712e1c32eb902cef4a23d91a7785b618a407f65 Binary files /dev/null and b/example/codeql-db/db-python/default/py_alllines.rel differ diff --git a/example/codeql-db/db-python/default/py_alllines.rel.checksum b/example/codeql-db/db-python/default/py_alllines.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..6f75f62e01bc95939ee5961f54eeb5736afd11be Binary files /dev/null and b/example/codeql-db/db-python/default/py_alllines.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_arguments.rel b/example/codeql-db/db-python/default/py_arguments.rel new file mode 100644 index 0000000000000000000000000000000000000000..725636856e2a154d32600c0a344a19b21a7dbe36 Binary files /dev/null and b/example/codeql-db/db-python/default/py_arguments.rel differ diff --git a/example/codeql-db/db-python/default/py_arguments.rel.checksum b/example/codeql-db/db-python/default/py_arguments.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..6184e72fe1e6af9ff62b029ecbf382c0f7e22361 Binary files /dev/null and b/example/codeql-db/db-python/default/py_arguments.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_boolops.rel b/example/codeql-db/db-python/default/py_boolops.rel new file mode 100644 index 0000000000000000000000000000000000000000..565679a1e6012a48da96c9c8a12c63d59ec556a6 Binary files /dev/null and b/example/codeql-db/db-python/default/py_boolops.rel differ diff --git a/example/codeql-db/db-python/default/py_boolops.rel.checksum b/example/codeql-db/db-python/default/py_boolops.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..f008b9c8ef484b9f23691fedf0c9d73a3555b756 Binary files /dev/null and b/example/codeql-db/db-python/default/py_boolops.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_bools.rel b/example/codeql-db/db-python/default/py_bools.rel new file mode 100644 index 0000000000000000000000000000000000000000..fc45074812b691256b273cf06b0a0d065d782503 Binary files /dev/null and b/example/codeql-db/db-python/default/py_bools.rel differ diff --git a/example/codeql-db/db-python/default/py_bools.rel.checksum b/example/codeql-db/db-python/default/py_bools.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..5af935783fa0587b39534a413573accc6f4ac7cc Binary files /dev/null and b/example/codeql-db/db-python/default/py_bools.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_citems.rel b/example/codeql-db/db-python/default/py_citems.rel new file mode 100644 index 0000000000000000000000000000000000000000..0dc0c2ceef6415e61481f5beedfc0f120693e386 Binary files /dev/null and b/example/codeql-db/db-python/default/py_citems.rel differ diff --git a/example/codeql-db/db-python/default/py_citems.rel.checksum b/example/codeql-db/db-python/default/py_citems.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..93326061de52b57bd9d42bf0fa71232ef3d1a3e1 Binary files /dev/null and b/example/codeql-db/db-python/default/py_citems.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_cmembers_versioned.rel b/example/codeql-db/db-python/default/py_cmembers_versioned.rel new file mode 100644 index 0000000000000000000000000000000000000000..1b6385735900cf949f8552da8df338741a1e64d6 Binary files /dev/null and b/example/codeql-db/db-python/default/py_cmembers_versioned.rel differ diff --git a/example/codeql-db/db-python/default/py_cmembers_versioned.rel.checksum b/example/codeql-db/db-python/default/py_cmembers_versioned.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..37f9f23332121f28239fba112337217650ee5549 Binary files /dev/null and b/example/codeql-db/db-python/default/py_cmembers_versioned.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_cmpop_lists.rel b/example/codeql-db/db-python/default/py_cmpop_lists.rel new file mode 100644 index 0000000000000000000000000000000000000000..11967e6a0c61ca7264af3a60d31153e5c4f5260d Binary files /dev/null and b/example/codeql-db/db-python/default/py_cmpop_lists.rel differ diff --git a/example/codeql-db/db-python/default/py_cmpop_lists.rel.checksum b/example/codeql-db/db-python/default/py_cmpop_lists.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..ed2f8bde764495f8160d30f54a6e148098e0dc2e Binary files /dev/null and b/example/codeql-db/db-python/default/py_cmpop_lists.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_cmpops.rel b/example/codeql-db/db-python/default/py_cmpops.rel new file mode 100644 index 0000000000000000000000000000000000000000..0e8f4bb73ac27bbbe898ad38348bad048e611628 Binary files /dev/null and b/example/codeql-db/db-python/default/py_cmpops.rel differ diff --git a/example/codeql-db/db-python/default/py_cmpops.rel.checksum b/example/codeql-db/db-python/default/py_cmpops.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..c6fdc9b3365ab47e6948da4c93c6aafec9ec847c Binary files /dev/null and b/example/codeql-db/db-python/default/py_cmpops.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_cobject_sources.rel b/example/codeql-db/db-python/default/py_cobject_sources.rel new file mode 100644 index 0000000000000000000000000000000000000000..f0f460f4258f572e225a63a9b778c1ed585332b4 Binary files /dev/null and b/example/codeql-db/db-python/default/py_cobject_sources.rel differ diff --git a/example/codeql-db/db-python/default/py_cobject_sources.rel.checksum b/example/codeql-db/db-python/default/py_cobject_sources.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..2deb4f22a4b9756c50381697806095350740569b Binary files /dev/null and b/example/codeql-db/db-python/default/py_cobject_sources.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_cobjectnames.rel b/example/codeql-db/db-python/default/py_cobjectnames.rel new file mode 100644 index 0000000000000000000000000000000000000000..65760236d95debb9cc04f4d9c93a6d474941c32d Binary files /dev/null and b/example/codeql-db/db-python/default/py_cobjectnames.rel differ diff --git a/example/codeql-db/db-python/default/py_cobjectnames.rel.checksum b/example/codeql-db/db-python/default/py_cobjectnames.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..8f8c447d4b707999da1cfa2c7c2be66284a34c6d Binary files /dev/null and b/example/codeql-db/db-python/default/py_cobjectnames.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_cobjects.rel b/example/codeql-db/db-python/default/py_cobjects.rel new file mode 100644 index 0000000000000000000000000000000000000000..af413334599e835dadc38d5c7809d3c443262de6 Binary files /dev/null and b/example/codeql-db/db-python/default/py_cobjects.rel differ diff --git a/example/codeql-db/db-python/default/py_cobjects.rel.checksum b/example/codeql-db/db-python/default/py_cobjects.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..617ba5d0c29bd15a4535827497218a43f7290b11 Binary files /dev/null and b/example/codeql-db/db-python/default/py_cobjects.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_cobjecttypes.rel b/example/codeql-db/db-python/default/py_cobjecttypes.rel new file mode 100644 index 0000000000000000000000000000000000000000..a980aee774d26d6382848bfa249b19e677f0797e Binary files /dev/null and b/example/codeql-db/db-python/default/py_cobjecttypes.rel differ diff --git a/example/codeql-db/db-python/default/py_cobjecttypes.rel.checksum b/example/codeql-db/db-python/default/py_cobjecttypes.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..bdc7415dba757c5b6494b106b99819166f456f70 Binary files /dev/null and b/example/codeql-db/db-python/default/py_cobjecttypes.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_codelines.rel b/example/codeql-db/db-python/default/py_codelines.rel new file mode 100644 index 0000000000000000000000000000000000000000..f324c74564cad670d13248dbbc555d45c31b5ada Binary files /dev/null and b/example/codeql-db/db-python/default/py_codelines.rel differ diff --git a/example/codeql-db/db-python/default/py_codelines.rel.checksum b/example/codeql-db/db-python/default/py_codelines.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..a398f1b369dbde35ab2277a9607c55f3a92ed2a1 Binary files /dev/null and b/example/codeql-db/db-python/default/py_codelines.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_commentlines.rel b/example/codeql-db/db-python/default/py_commentlines.rel new file mode 100644 index 0000000000000000000000000000000000000000..cd730cdee14ab3f56400b823eaec7d43e2e63dd5 Binary files /dev/null and b/example/codeql-db/db-python/default/py_commentlines.rel differ diff --git a/example/codeql-db/db-python/default/py_commentlines.rel.checksum b/example/codeql-db/db-python/default/py_commentlines.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..85c16d03747dbf0c6a5a6b61f8ab0e3f0bbb529c Binary files /dev/null and b/example/codeql-db/db-python/default/py_commentlines.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_comments.rel b/example/codeql-db/db-python/default/py_comments.rel new file mode 100644 index 0000000000000000000000000000000000000000..cfbaa7d4d87170d34833a2d988e01db3e8f63fe8 Binary files /dev/null and b/example/codeql-db/db-python/default/py_comments.rel differ diff --git a/example/codeql-db/db-python/default/py_comments.rel.checksum b/example/codeql-db/db-python/default/py_comments.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..299090d36ba1a388712e4e7ca124004500785a9e Binary files /dev/null and b/example/codeql-db/db-python/default/py_comments.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_dict_item_lists.rel b/example/codeql-db/db-python/default/py_dict_item_lists.rel new file mode 100644 index 0000000000000000000000000000000000000000..68170bd743839dc952a263ec74f805948a8eb29d Binary files /dev/null and b/example/codeql-db/db-python/default/py_dict_item_lists.rel differ diff --git a/example/codeql-db/db-python/default/py_dict_item_lists.rel.checksum b/example/codeql-db/db-python/default/py_dict_item_lists.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..64c077db1abb90ddabcdfd89284415be9dabf42f Binary files /dev/null and b/example/codeql-db/db-python/default/py_dict_item_lists.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_dict_items.rel b/example/codeql-db/db-python/default/py_dict_items.rel new file mode 100644 index 0000000000000000000000000000000000000000..0f8f9154f514e65fe189ee2643bb044edbfbe523 Binary files /dev/null and b/example/codeql-db/db-python/default/py_dict_items.rel differ diff --git a/example/codeql-db/db-python/default/py_dict_items.rel.checksum b/example/codeql-db/db-python/default/py_dict_items.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..2d2bc8034d7cce904360ed55972f8092e1954f26 Binary files /dev/null and b/example/codeql-db/db-python/default/py_dict_items.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_docstringlines.rel b/example/codeql-db/db-python/default/py_docstringlines.rel new file mode 100644 index 0000000000000000000000000000000000000000..b60797a86d5294b1504596fb63f4eb2c2ce7a2f4 Binary files /dev/null and b/example/codeql-db/db-python/default/py_docstringlines.rel differ diff --git a/example/codeql-db/db-python/default/py_docstringlines.rel.checksum b/example/codeql-db/db-python/default/py_docstringlines.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..27edd939e423dfd8db89b70e9c21895f41418b07 Binary files /dev/null and b/example/codeql-db/db-python/default/py_docstringlines.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_exception_successors.rel b/example/codeql-db/db-python/default/py_exception_successors.rel new file mode 100644 index 0000000000000000000000000000000000000000..47c0b4ea69cf6c5151841f6892b84f0f1b036d78 Binary files /dev/null and b/example/codeql-db/db-python/default/py_exception_successors.rel differ diff --git a/example/codeql-db/db-python/default/py_exception_successors.rel.checksum b/example/codeql-db/db-python/default/py_exception_successors.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..4da0bcc9030bf199fcae55af04e99c6befd8e066 Binary files /dev/null and b/example/codeql-db/db-python/default/py_exception_successors.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_exports.rel b/example/codeql-db/db-python/default/py_exports.rel new file mode 100644 index 0000000000000000000000000000000000000000..0502c6d1aca776e8d3db0ba8ce860b26da52008d Binary files /dev/null and b/example/codeql-db/db-python/default/py_exports.rel differ diff --git a/example/codeql-db/db-python/default/py_exports.rel.checksum b/example/codeql-db/db-python/default/py_exports.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..9e93abcd50b1c219909aa91b3757dcaaa3094667 Binary files /dev/null and b/example/codeql-db/db-python/default/py_exports.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_expr_contexts.rel b/example/codeql-db/db-python/default/py_expr_contexts.rel new file mode 100644 index 0000000000000000000000000000000000000000..454f781f8b56c963f0c662bdbee0d7e76919796b --- /dev/null +++ b/example/codeql-db/db-python/default/py_expr_contexts.rel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55a000b3bf5989ecefa33c560823756f2491ebcf3b58f04badc896053dc8d3ba +size 2719080 diff --git a/example/codeql-db/db-python/default/py_expr_contexts.rel.checksum b/example/codeql-db/db-python/default/py_expr_contexts.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..ee90d892c9492330be1babe207664789433e026e Binary files /dev/null and b/example/codeql-db/db-python/default/py_expr_contexts.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_expr_lists.rel b/example/codeql-db/db-python/default/py_expr_lists.rel new file mode 100644 index 0000000000000000000000000000000000000000..69c290f42ac0280169b52a8522fa1a8297a77e89 --- /dev/null +++ b/example/codeql-db/db-python/default/py_expr_lists.rel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c88696ca921bb8a4fe5d69c7a948cccfe716faf2294a2bcae3466ead9c71d4a +size 1160664 diff --git a/example/codeql-db/db-python/default/py_expr_lists.rel.checksum b/example/codeql-db/db-python/default/py_expr_lists.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..fc1b902c04aa2be7ecbc81d5defc944d7ef50a74 Binary files /dev/null and b/example/codeql-db/db-python/default/py_expr_lists.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_exprs.rel b/example/codeql-db/db-python/default/py_exprs.rel new file mode 100644 index 0000000000000000000000000000000000000000..0c92da8defd248ec10d996e4ddc1ace7e1a362dc --- /dev/null +++ b/example/codeql-db/db-python/default/py_exprs.rel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45f5732875ba31d7c2f37972a773b9f8d216ec3396024b537e7a20d8d6abbbc2 +size 5462784 diff --git a/example/codeql-db/db-python/default/py_exprs.rel.checksum b/example/codeql-db/db-python/default/py_exprs.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..8cd2a6aafe3d404d013f42a3193d320e05597911 Binary files /dev/null and b/example/codeql-db/db-python/default/py_exprs.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_extracted_version.rel b/example/codeql-db/db-python/default/py_extracted_version.rel new file mode 100644 index 0000000000000000000000000000000000000000..6fb8bea6175bc3f16327b9fa2899a638799f1d40 Binary files /dev/null and b/example/codeql-db/db-python/default/py_extracted_version.rel differ diff --git a/example/codeql-db/db-python/default/py_extracted_version.rel.checksum b/example/codeql-db/db-python/default/py_extracted_version.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..8b1f02fcc6cb9f34424c2a8e6883c52304b64675 Binary files /dev/null and b/example/codeql-db/db-python/default/py_extracted_version.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_false_successors.rel b/example/codeql-db/db-python/default/py_false_successors.rel new file mode 100644 index 0000000000000000000000000000000000000000..1e88b445b49077a624ef765851f4d1276b641c8c Binary files /dev/null and b/example/codeql-db/db-python/default/py_false_successors.rel differ diff --git a/example/codeql-db/db-python/default/py_false_successors.rel.checksum b/example/codeql-db/db-python/default/py_false_successors.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..f86187469e2932da5ee87b08f04884dc8ba2c8b3 Binary files /dev/null and b/example/codeql-db/db-python/default/py_false_successors.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_flags_versioned.rel b/example/codeql-db/db-python/default/py_flags_versioned.rel new file mode 100644 index 0000000000000000000000000000000000000000..6c560b70aa3566df030581fb092fa03336a85e70 Binary files /dev/null and b/example/codeql-db/db-python/default/py_flags_versioned.rel differ diff --git a/example/codeql-db/db-python/default/py_flags_versioned.rel.checksum b/example/codeql-db/db-python/default/py_flags_versioned.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..bb23291717b7eab92fd027404a8da5de3bc9e7d3 Binary files /dev/null and b/example/codeql-db/db-python/default/py_flags_versioned.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_flow_bb_node.rel b/example/codeql-db/db-python/default/py_flow_bb_node.rel new file mode 100644 index 0000000000000000000000000000000000000000..1c64bc2513a77775e93c588790db9714acc80f5f --- /dev/null +++ b/example/codeql-db/db-python/default/py_flow_bb_node.rel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67b8bd252b567ae938a25617b306b51bedab09944155b13f87ea2b17ec767fdd +size 6700224 diff --git a/example/codeql-db/db-python/default/py_flow_bb_node.rel.checksum b/example/codeql-db/db-python/default/py_flow_bb_node.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..b9e99b187d2eab959e3b1fe1d750fb20f0f93954 Binary files /dev/null and b/example/codeql-db/db-python/default/py_flow_bb_node.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_idoms.rel b/example/codeql-db/db-python/default/py_idoms.rel new file mode 100644 index 0000000000000000000000000000000000000000..f29351bb850339db9c8d538f2b896d55ebdb2a3d --- /dev/null +++ b/example/codeql-db/db-python/default/py_idoms.rel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:587845e17b8ad1b8d268037bd79672bf9c2899f40a939873aca5ec72de9569c8 +size 3266200 diff --git a/example/codeql-db/db-python/default/py_idoms.rel.checksum b/example/codeql-db/db-python/default/py_idoms.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..5be1430e4b60b91733c623be9fff16452164fce0 Binary files /dev/null and b/example/codeql-db/db-python/default/py_idoms.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_ints.rel b/example/codeql-db/db-python/default/py_ints.rel new file mode 100644 index 0000000000000000000000000000000000000000..5ad1adf459bb2b2bc060832be5961d1a4a640379 Binary files /dev/null and b/example/codeql-db/db-python/default/py_ints.rel differ diff --git a/example/codeql-db/db-python/default/py_ints.rel.checksum b/example/codeql-db/db-python/default/py_ints.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..45e635e59e1d626dd7d640a2d61e6b1439cf4ef5 Binary files /dev/null and b/example/codeql-db/db-python/default/py_ints.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_locations.rel b/example/codeql-db/db-python/default/py_locations.rel new file mode 100644 index 0000000000000000000000000000000000000000..9c02d5dd4f7ef5f1ff8186995232741b71d3fae8 --- /dev/null +++ b/example/codeql-db/db-python/default/py_locations.rel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a82d97ca4cb20df503196637d73ce5bc742a685ef99ddeaf623e2265994c61e +size 3456840 diff --git a/example/codeql-db/db-python/default/py_locations.rel.checksum b/example/codeql-db/db-python/default/py_locations.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..f8ca215939db1be78f47cf177a961d57aeefb014 Binary files /dev/null and b/example/codeql-db/db-python/default/py_locations.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_module_path.rel b/example/codeql-db/db-python/default/py_module_path.rel new file mode 100644 index 0000000000000000000000000000000000000000..70ec4fc3a54948e190133639396f8d587f19adb6 Binary files /dev/null and b/example/codeql-db/db-python/default/py_module_path.rel differ diff --git a/example/codeql-db/db-python/default/py_module_path.rel.checksum b/example/codeql-db/db-python/default/py_module_path.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..6c2d044bfe84e606e28d2fe605c1cec43a525782 Binary files /dev/null and b/example/codeql-db/db-python/default/py_module_path.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_numbers.rel b/example/codeql-db/db-python/default/py_numbers.rel new file mode 100644 index 0000000000000000000000000000000000000000..b2e1f027d9cbfce07521ed82eec89a212910d7c3 Binary files /dev/null and b/example/codeql-db/db-python/default/py_numbers.rel differ diff --git a/example/codeql-db/db-python/default/py_numbers.rel.checksum b/example/codeql-db/db-python/default/py_numbers.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..195873be1156e9f0a2378120068261fe14a94c81 Binary files /dev/null and b/example/codeql-db/db-python/default/py_numbers.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_operators.rel b/example/codeql-db/db-python/default/py_operators.rel new file mode 100644 index 0000000000000000000000000000000000000000..ab3218bf8764647f7f1c30f5fc3a0f2ae19aad72 Binary files /dev/null and b/example/codeql-db/db-python/default/py_operators.rel differ diff --git a/example/codeql-db/db-python/default/py_operators.rel.checksum b/example/codeql-db/db-python/default/py_operators.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..0dbb37c1477f8684d77207600f2d29f2ff029d75 Binary files /dev/null and b/example/codeql-db/db-python/default/py_operators.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_parameter_lists.rel b/example/codeql-db/db-python/default/py_parameter_lists.rel new file mode 100644 index 0000000000000000000000000000000000000000..56b958651d93445864db2ed7f4109e30bcd6ecc6 Binary files /dev/null and b/example/codeql-db/db-python/default/py_parameter_lists.rel differ diff --git a/example/codeql-db/db-python/default/py_parameter_lists.rel.checksum b/example/codeql-db/db-python/default/py_parameter_lists.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..d7e4da3e7d85de6dd31d3fee5d1b80c8660ef7fe Binary files /dev/null and b/example/codeql-db/db-python/default/py_parameter_lists.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_scope_flow.rel b/example/codeql-db/db-python/default/py_scope_flow.rel new file mode 100644 index 0000000000000000000000000000000000000000..a5d8ab37846912c7c23a102a83562f5c174295cb Binary files /dev/null and b/example/codeql-db/db-python/default/py_scope_flow.rel differ diff --git a/example/codeql-db/db-python/default/py_scope_flow.rel.checksum b/example/codeql-db/db-python/default/py_scope_flow.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..05f5cb32ebe9bf10a8a1da9b247602a358ed5a7a Binary files /dev/null and b/example/codeql-db/db-python/default/py_scope_flow.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_scope_location.rel b/example/codeql-db/db-python/default/py_scope_location.rel new file mode 100644 index 0000000000000000000000000000000000000000..0cdb5405cea8a932d229d278f96186a49daadb71 Binary files /dev/null and b/example/codeql-db/db-python/default/py_scope_location.rel differ diff --git a/example/codeql-db/db-python/default/py_scope_location.rel.checksum b/example/codeql-db/db-python/default/py_scope_location.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..8380fe85dac3133e39579ee376d555e47a5dbfad Binary files /dev/null and b/example/codeql-db/db-python/default/py_scope_location.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_scopes.rel b/example/codeql-db/db-python/default/py_scopes.rel new file mode 100644 index 0000000000000000000000000000000000000000..90168212773e303b05c789080bc44136cb417dc3 --- /dev/null +++ b/example/codeql-db/db-python/default/py_scopes.rel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac1c887e6efb8676532f97b2070d1d82139850eedc56f14974dc38419ad04ea3 +size 3387288 diff --git a/example/codeql-db/db-python/default/py_scopes.rel.checksum b/example/codeql-db/db-python/default/py_scopes.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..3dd2494fdf83a8cda0a55e4ad85f531101a97830 Binary files /dev/null and b/example/codeql-db/db-python/default/py_scopes.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_special_objects.rel b/example/codeql-db/db-python/default/py_special_objects.rel new file mode 100644 index 0000000000000000000000000000000000000000..68a0dd1fd2c6bd1e9116cb2864e10c4e41355714 Binary files /dev/null and b/example/codeql-db/db-python/default/py_special_objects.rel differ diff --git a/example/codeql-db/db-python/default/py_special_objects.rel.checksum b/example/codeql-db/db-python/default/py_special_objects.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..f2f67be508d4cd05b39517a87fce112e018d0555 Binary files /dev/null and b/example/codeql-db/db-python/default/py_special_objects.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_ssa_defn.rel b/example/codeql-db/db-python/default/py_ssa_defn.rel new file mode 100644 index 0000000000000000000000000000000000000000..aabb534e8da78b60f07af5f9d2b8568a88dd2b4a Binary files /dev/null and b/example/codeql-db/db-python/default/py_ssa_defn.rel differ diff --git a/example/codeql-db/db-python/default/py_ssa_defn.rel.checksum b/example/codeql-db/db-python/default/py_ssa_defn.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..9b5d5d5b27cf5d91bef76cbdbad60f94b87c200d Binary files /dev/null and b/example/codeql-db/db-python/default/py_ssa_defn.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_ssa_phi.rel b/example/codeql-db/db-python/default/py_ssa_phi.rel new file mode 100644 index 0000000000000000000000000000000000000000..259508cd0d35e32dee5f2c749672baa03c73afaf Binary files /dev/null and b/example/codeql-db/db-python/default/py_ssa_phi.rel differ diff --git a/example/codeql-db/db-python/default/py_ssa_phi.rel.checksum b/example/codeql-db/db-python/default/py_ssa_phi.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..7c6933017f1128f5ce95f7151aeb65d4cd3eb3b0 Binary files /dev/null and b/example/codeql-db/db-python/default/py_ssa_phi.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_ssa_use.rel b/example/codeql-db/db-python/default/py_ssa_use.rel new file mode 100644 index 0000000000000000000000000000000000000000..7808e5c218265b377a7c73a6ef6ed754e47e15b8 Binary files /dev/null and b/example/codeql-db/db-python/default/py_ssa_use.rel differ diff --git a/example/codeql-db/db-python/default/py_ssa_use.rel.checksum b/example/codeql-db/db-python/default/py_ssa_use.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..5d9668faffc4da5bcf828cc0922519a742ad8378 Binary files /dev/null and b/example/codeql-db/db-python/default/py_ssa_use.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_ssa_var.rel b/example/codeql-db/db-python/default/py_ssa_var.rel new file mode 100644 index 0000000000000000000000000000000000000000..cc1549f9149d8a6ac36e616852a6c1c2f9c97b6c Binary files /dev/null and b/example/codeql-db/db-python/default/py_ssa_var.rel differ diff --git a/example/codeql-db/db-python/default/py_ssa_var.rel.checksum b/example/codeql-db/db-python/default/py_ssa_var.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..3e66a027f210f101ab3e432b9709a342b6e932bd Binary files /dev/null and b/example/codeql-db/db-python/default/py_ssa_var.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_stmt_lists.rel b/example/codeql-db/db-python/default/py_stmt_lists.rel new file mode 100644 index 0000000000000000000000000000000000000000..815f3d438655812eb07b273f34700d5fa81900ec Binary files /dev/null and b/example/codeql-db/db-python/default/py_stmt_lists.rel differ diff --git a/example/codeql-db/db-python/default/py_stmt_lists.rel.checksum b/example/codeql-db/db-python/default/py_stmt_lists.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..9adb8626a045231528f56bbf8ae16c63fb7d5093 Binary files /dev/null and b/example/codeql-db/db-python/default/py_stmt_lists.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_stmts.rel b/example/codeql-db/db-python/default/py_stmts.rel new file mode 100644 index 0000000000000000000000000000000000000000..e935c03205655121e1532485ddd2aa55bbf4a1e0 --- /dev/null +++ b/example/codeql-db/db-python/default/py_stmts.rel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f00523d75a745890ad8e1e876dff76cd3a78ca28a791ca14582a2e2257a42f52 +size 1311792 diff --git a/example/codeql-db/db-python/default/py_stmts.rel.checksum b/example/codeql-db/db-python/default/py_stmts.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..d6c745921ba27820c9513611bdf0d3ef27e9ff56 Binary files /dev/null and b/example/codeql-db/db-python/default/py_stmts.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_str_lists.rel b/example/codeql-db/db-python/default/py_str_lists.rel new file mode 100644 index 0000000000000000000000000000000000000000..e0322187d0a7dd599f3c1bca08a53bcb2fd7a61f Binary files /dev/null and b/example/codeql-db/db-python/default/py_str_lists.rel differ diff --git a/example/codeql-db/db-python/default/py_str_lists.rel.checksum b/example/codeql-db/db-python/default/py_str_lists.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..6e0aecc43993c75bb16389cdcfc6fac29781a765 Binary files /dev/null and b/example/codeql-db/db-python/default/py_str_lists.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_strs.rel b/example/codeql-db/db-python/default/py_strs.rel new file mode 100644 index 0000000000000000000000000000000000000000..46ae715bcdddab343ef5c0105b3cbb3841d728a0 --- /dev/null +++ b/example/codeql-db/db-python/default/py_strs.rel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad03144ace17f3c09f1e483054b03e0875e8aafcaf11a538aa1f11afe92a3ab4 +size 1599648 diff --git a/example/codeql-db/db-python/default/py_strs.rel.checksum b/example/codeql-db/db-python/default/py_strs.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..0b8e769fa88c91c83efb5b50c5f065430319a3d3 Binary files /dev/null and b/example/codeql-db/db-python/default/py_strs.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_successors.rel b/example/codeql-db/db-python/default/py_successors.rel new file mode 100644 index 0000000000000000000000000000000000000000..00fad33a87c137bf60152b49a27793c19bb38182 --- /dev/null +++ b/example/codeql-db/db-python/default/py_successors.rel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dee030415cd2eaf7b000fe75069fd151b0c7699e3f2c0bc6f6890845853746a +size 3499632 diff --git a/example/codeql-db/db-python/default/py_successors.rel.checksum b/example/codeql-db/db-python/default/py_successors.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..60e0f74b667e9e52b0735a74c99f0ef0adec3a76 Binary files /dev/null and b/example/codeql-db/db-python/default/py_successors.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_true_successors.rel b/example/codeql-db/db-python/default/py_true_successors.rel new file mode 100644 index 0000000000000000000000000000000000000000..80b92f9ca8c318f18afc8dacb4113f0ccf6af96f Binary files /dev/null and b/example/codeql-db/db-python/default/py_true_successors.rel differ diff --git a/example/codeql-db/db-python/default/py_true_successors.rel.checksum b/example/codeql-db/db-python/default/py_true_successors.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..12894576ded871f8f023a5bbc986c2722678c1d1 Binary files /dev/null and b/example/codeql-db/db-python/default/py_true_successors.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_unaryops.rel b/example/codeql-db/db-python/default/py_unaryops.rel new file mode 100644 index 0000000000000000000000000000000000000000..8ad2a73742c4573e24daafbaae9899ef0e11dce1 Binary files /dev/null and b/example/codeql-db/db-python/default/py_unaryops.rel differ diff --git a/example/codeql-db/db-python/default/py_unaryops.rel.checksum b/example/codeql-db/db-python/default/py_unaryops.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..b68a7e2edf2d4822dc20b6bfdeb22e1d8f0d7948 Binary files /dev/null and b/example/codeql-db/db-python/default/py_unaryops.rel.checksum differ diff --git a/example/codeql-db/db-python/default/py_variables.rel b/example/codeql-db/db-python/default/py_variables.rel new file mode 100644 index 0000000000000000000000000000000000000000..d41d51c5847247298fcf55480fb306f77d470f8a --- /dev/null +++ b/example/codeql-db/db-python/default/py_variables.rel @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01970b1c804f3ea912c3da271bad576f1ef94292be9b0e1f5e9e7e4d968392cf +size 1378760 diff --git a/example/codeql-db/db-python/default/py_variables.rel.checksum b/example/codeql-db/db-python/default/py_variables.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..e823439d3f5c58f4ee0c7bcd1a1fb79854c071ad Binary files /dev/null and b/example/codeql-db/db-python/default/py_variables.rel.checksum differ diff --git a/example/codeql-db/db-python/default/sourceLocationPrefix.rel b/example/codeql-db/db-python/default/sourceLocationPrefix.rel new file mode 100644 index 0000000000000000000000000000000000000000..cf00db2e95525bd6b8f6764e5d474e0c779361fd Binary files /dev/null and b/example/codeql-db/db-python/default/sourceLocationPrefix.rel differ diff --git a/example/codeql-db/db-python/default/sourceLocationPrefix.rel.checksum b/example/codeql-db/db-python/default/sourceLocationPrefix.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..1560eb8f8e7408626f981be2cb7b80fcd88fbff8 Binary files /dev/null and b/example/codeql-db/db-python/default/sourceLocationPrefix.rel.checksum differ diff --git a/example/codeql-db/db-python/default/strings/0/buckets/page-000000 b/example/codeql-db/db-python/default/strings/0/buckets/page-000000 new file mode 100644 index 0000000000000000000000000000000000000000..6d17cf9d15fb9f4a2358a2d079f3b8c755d005fa Binary files /dev/null and b/example/codeql-db/db-python/default/strings/0/buckets/page-000000 differ diff --git a/example/codeql-db/db-python/default/strings/0/metadata/page-000000 b/example/codeql-db/db-python/default/strings/0/metadata/page-000000 new file mode 100644 index 0000000000000000000000000000000000000000..6d17cf9d15fb9f4a2358a2d079f3b8c755d005fa Binary files /dev/null and b/example/codeql-db/db-python/default/strings/0/metadata/page-000000 differ diff --git a/example/codeql-db/db-python/default/strings/0/pageDump/page-000000000 b/example/codeql-db/db-python/default/strings/0/pageDump/page-000000000 new file mode 100644 index 0000000000000000000000000000000000000000..f137446919cc212812a47b616eea1963e2367b01 --- /dev/null +++ b/example/codeql-db/db-python/default/strings/0/pageDump/page-000000000 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a86ee907205f2af34454484ed36c48579b7ca92e1f80b3d4df208a2f9e620fc5 +size 1048592 diff --git a/example/codeql-db/db-python/default/variable.rel b/example/codeql-db/db-python/default/variable.rel new file mode 100644 index 0000000000000000000000000000000000000000..f05d244dad5ea0c4734fd1e21ad318404e22d094 Binary files /dev/null and b/example/codeql-db/db-python/default/variable.rel differ diff --git a/example/codeql-db/db-python/default/variable.rel.checksum b/example/codeql-db/db-python/default/variable.rel.checksum new file mode 100644 index 0000000000000000000000000000000000000000..ecc24c545df73c1296f98ceb31fc218d8b87aeb7 Binary files /dev/null and b/example/codeql-db/db-python/default/variable.rel.checksum differ diff --git a/example/codeql-db/db-python/semmlecode.python.dbscheme b/example/codeql-db/db-python/semmlecode.python.dbscheme new file mode 100644 index 0000000000000000000000000000000000000000..0565f7466437d52e1dc64a3b930926ab2f60cd64 --- /dev/null +++ b/example/codeql-db/db-python/semmlecode.python.dbscheme @@ -0,0 +1,1196 @@ +/* + * This dbscheme is auto-generated by 'semmle/dbscheme_gen.py'. + * WARNING: Any modifications to this file will be lost. + * Relations can be changed by modifying master.py or + * by adding rules to dbscheme.template + */ + +/* This is a dummy line to alter the dbscheme, so we can make a database upgrade + * without actually changing any of the dbscheme predicates. It contains a date + * to allow for such updates in the future as well. + * + * 2020-07-02 + * + * DO NOT remove this comment carelessly, since it can revert the dbscheme back to a + * previously seen state (matching a previously seen SHA), which would make the upgrade + * mechanism not work properly. + */ + +/*- DEPRECATED: External defects and metrics -*/ + +externalDefects( + unique int id : @externalDefect, + varchar(900) queryPath : string ref, + int location : @location ref, + varchar(900) message : string ref, + float severity : float ref +); + +externalMetrics( + unique int id : @externalMetric, + varchar(900) queryPath : string ref, + int location : @location ref, + float value : float ref +); + +/*- External data -*/ + +/** + * External data, loaded from CSV files during snapshot creation. See + * [Tutorial: Incorporating external data](https://help.semmle.com/wiki/display/SD/Tutorial%3A+Incorporating+external+data) + * for more information. + */ +externalData( + int id : @externalDataElement, + string path : string ref, + int column: int ref, + string value : string ref +); + +/*- DEPRECATED: Snapshot date -*/ + +snapshotDate(unique date snapshotDate : date ref); + +/*- Source location prefix -*/ + +/** + * The source location of the snapshot. + */ +sourceLocationPrefix(string prefix : string ref); + +/*- DEPRECATED: Duplicate code -*/ + +duplicateCode( + unique int id : @duplication, + string relativePath : string ref, + int equivClass : int ref +); + +similarCode( + unique int id : @similarity, + string relativePath : string ref, + int equivClass : int ref +); + +@duplication_or_similarity = @duplication | @similarity + +tokens( + int id : @duplication_or_similarity ref, + int offset : int ref, + int beginLine : int ref, + int beginColumn : int ref, + int endLine : int ref, + int endColumn : int ref +); + +/*- DEPRECATED: Version control data -*/ + +svnentries( + unique int id : @svnentry, + string revision : string ref, + string author : string ref, + date revisionDate : date ref, + int changeSize : int ref +) + +svnaffectedfiles( + int id : @svnentry ref, + int file : @file ref, + string action : string ref +) + +svnentrymsg( + unique int id : @svnentry ref, + string message : string ref +) + +svnchurn( + int commit : @svnentry ref, + int file : @file ref, + int addedLines : int ref, + int deletedLines : int ref +) + +/*- Lines of code -*/ + +numlines( + int element_id: @sourceline ref, + int num_lines: int ref, + int num_code: int ref, + int num_comment: int ref +); + +/*- Files and folders -*/ + +/** + * The location of an element. + * The location spans column `startcolumn` of line `startline` to + * column `endcolumn` of line `endline` in file `file`. + * For more information, see + * [Locations](https://codeql.github.com/docs/writing-codeql-queries/providing-locations-in-codeql-queries/). + */ +locations_default( + unique int id: @location_default, + int file: @file ref, + int beginLine: int ref, + int beginColumn: int ref, + int endLine: int ref, + int endColumn: int ref +); + +files( + unique int id: @file, + string name: string ref +); + +folders( + unique int id: @folder, + string name: string ref +); + +@container = @file | @folder + +containerparent( + int parent: @container ref, + unique int child: @container ref +); + +/*- XML Files -*/ + +xmlEncoding( + unique int id: @file ref, + string encoding: string ref +); + +xmlDTDs( + unique int id: @xmldtd, + string root: string ref, + string publicId: string ref, + string systemId: string ref, + int fileid: @file ref +); + +xmlElements( + unique int id: @xmlelement, + string name: string ref, + int parentid: @xmlparent ref, + int idx: int ref, + int fileid: @file ref +); + +xmlAttrs( + unique int id: @xmlattribute, + int elementid: @xmlelement ref, + string name: string ref, + string value: string ref, + int idx: int ref, + int fileid: @file ref +); + +xmlNs( + int id: @xmlnamespace, + string prefixName: string ref, + string URI: string ref, + int fileid: @file ref +); + +xmlHasNs( + int elementId: @xmlnamespaceable ref, + int nsId: @xmlnamespace ref, + int fileid: @file ref +); + +xmlComments( + unique int id: @xmlcomment, + string text: string ref, + int parentid: @xmlparent ref, + int fileid: @file ref +); + +xmlChars( + unique int id: @xmlcharacters, + string text: string ref, + int parentid: @xmlparent ref, + int idx: int ref, + int isCDATA: int ref, + int fileid: @file ref +); + +@xmlparent = @file | @xmlelement; +@xmlnamespaceable = @xmlelement | @xmlattribute; + +xmllocations( + int xmlElement: @xmllocatable ref, + int location: @location_default ref +); + +@xmllocatable = @xmlcharacters | @xmlelement | @xmlcomment | @xmlattribute | @xmldtd | @file | @xmlnamespace; + +/*- YAML -*/ + +#keyset[parent, idx] +yaml (unique int id: @yaml_node, + int kind: int ref, + int parent: @yaml_node_parent ref, + int idx: int ref, + string tag: string ref, + string tostring: string ref); + +case @yaml_node.kind of + 0 = @yaml_scalar_node +| 1 = @yaml_mapping_node +| 2 = @yaml_sequence_node +| 3 = @yaml_alias_node +; + +@yaml_collection_node = @yaml_mapping_node | @yaml_sequence_node; + +@yaml_node_parent = @yaml_collection_node | @file; + +yaml_anchors (unique int node: @yaml_node ref, + string anchor: string ref); + +yaml_aliases (unique int alias: @yaml_alias_node ref, + string target: string ref); + +yaml_scalars (unique int scalar: @yaml_scalar_node ref, + int style: int ref, + string value: string ref); + +yaml_errors (unique int id: @yaml_error, + string message: string ref); + +yaml_locations(unique int locatable: @yaml_locatable ref, + int location: @location_default ref); + +@yaml_locatable = @yaml_node | @yaml_error; + +/*- Python dbscheme -*/ + +/* + * Line metrics + */ +py_codelines(int id : @py_scope ref, + int count : int ref); + +py_commentlines(int id : @py_scope ref, + int count : int ref); + +py_docstringlines(int id : @py_scope ref, + int count : int ref); + +py_alllines(int id : @py_scope ref, + int count : int ref); + +/**************************** + Python dbscheme +****************************/ + +@sourceline = @file | @py_Module | @xmllocatable; + +@location = @location_ast | @location_default ; + +locations_ast(unique int id: @location_ast, + int module: @py_Module ref, + int beginLine: int ref, + int beginColumn: int ref, + int endLine: int ref, + int endColumn: int ref); + +file_contents(unique int file: @file ref, string contents: string ref); + +py_module_path(int module: @py_Module ref, int file: @container ref); + +variable(unique int id : @py_variable, + int scope : @py_scope ref, + varchar(1) name : string ref); + +py_line_lengths(unique int id : @py_line, + int file: @py_Module ref, + int line : int ref, + int length : int ref); + +py_extracted_version(int module : @py_Module ref, + varchar(1) version : string ref); + +/* AUTO GENERATED PART STARTS HERE */ + + +/* AnnAssign.location = 0, location */ +/* AnnAssign.value = 1, expr */ +/* AnnAssign.annotation = 2, expr */ +/* AnnAssign.target = 3, expr */ + +/* Assert.location = 0, location */ +/* Assert.test = 1, expr */ +/* Assert.msg = 2, expr */ + +/* Assign.location = 0, location */ +/* Assign.value = 1, expr */ +/* Assign.targets = 2, expr_list */ + +/* AssignExpr.location = 0, location */ +/* AssignExpr.parenthesised = 1, bool */ +/* AssignExpr.value = 2, expr */ +/* AssignExpr.target = 3, expr */ + +/* Attribute.location = 0, location */ +/* Attribute.parenthesised = 1, bool */ +/* Attribute.value = 2, expr */ +/* Attribute.attr = 3, str */ +/* Attribute.ctx = 4, expr_context */ + +/* AugAssign.location = 0, location */ +/* AugAssign.operation = 1, BinOp */ + +/* Await.location = 0, location */ +/* Await.parenthesised = 1, bool */ +/* Await.value = 2, expr */ + +/* BinaryExpr.location = 0, location */ +/* BinaryExpr.parenthesised = 1, bool */ +/* BinaryExpr.left = 2, expr */ +/* BinaryExpr.op = 3, operator */ +/* BinaryExpr.right = 4, expr */ +/* BinaryExpr = AugAssign */ + +/* BoolExpr.location = 0, location */ +/* BoolExpr.parenthesised = 1, bool */ +/* BoolExpr.op = 2, boolop */ +/* BoolExpr.values = 3, expr_list */ + +/* Break.location = 0, location */ + +/* Bytes.location = 0, location */ +/* Bytes.parenthesised = 1, bool */ +/* Bytes.s = 2, bytes */ +/* Bytes.prefix = 3, bytes */ +/* Bytes.implicitly_concatenated_parts = 4, StringPart_list */ + +/* Call.location = 0, location */ +/* Call.parenthesised = 1, bool */ +/* Call.func = 2, expr */ +/* Call.positional_args = 3, expr_list */ +/* Call.named_args = 4, dict_item_list */ + +/* Case.location = 0, location */ +/* Case.pattern = 1, pattern */ +/* Case.guard = 2, expr */ +/* Case.body = 3, stmt_list */ + +/* Class.name = 0, str */ +/* Class.body = 1, stmt_list */ +/* Class = ClassExpr */ + +/* ClassExpr.location = 0, location */ +/* ClassExpr.parenthesised = 1, bool */ +/* ClassExpr.name = 2, str */ +/* ClassExpr.bases = 3, expr_list */ +/* ClassExpr.keywords = 4, dict_item_list */ +/* ClassExpr.inner_scope = 5, Class */ + +/* Compare.location = 0, location */ +/* Compare.parenthesised = 1, bool */ +/* Compare.left = 2, expr */ +/* Compare.ops = 3, cmpop_list */ +/* Compare.comparators = 4, expr_list */ + +/* Continue.location = 0, location */ + +/* Delete.location = 0, location */ +/* Delete.targets = 1, expr_list */ + +/* Dict.location = 0, location */ +/* Dict.parenthesised = 1, bool */ +/* Dict.items = 2, dict_item_list */ + +/* DictComp.location = 0, location */ +/* DictComp.parenthesised = 1, bool */ +/* DictComp.function = 2, Function */ +/* DictComp.iterable = 3, expr */ + +/* DictUnpacking.location = 0, location */ +/* DictUnpacking.value = 1, expr */ + +/* Ellipsis.location = 0, location */ +/* Ellipsis.parenthesised = 1, bool */ + +/* ExceptGroupStmt.location = 0, location */ +/* ExceptGroupStmt.type = 1, expr */ +/* ExceptGroupStmt.name = 2, expr */ +/* ExceptGroupStmt.body = 3, stmt_list */ + +/* ExceptStmt.location = 0, location */ +/* ExceptStmt.type = 1, expr */ +/* ExceptStmt.name = 2, expr */ +/* ExceptStmt.body = 3, stmt_list */ + +/* Exec.location = 0, location */ +/* Exec.body = 1, expr */ +/* Exec.globals = 2, expr */ +/* Exec.locals = 3, expr */ + +/* ExprStmt.location = 0, location */ +/* ExprStmt.value = 1, expr */ + +/* Filter.location = 0, location */ +/* Filter.parenthesised = 1, bool */ +/* Filter.value = 2, expr */ +/* Filter.filter = 3, expr */ + +/* For.location = 0, location */ +/* For.target = 1, expr */ +/* For.iter = 2, expr */ +/* For.body = 3, stmt_list */ +/* For.orelse = 4, stmt_list */ +/* For.is_async = 5, bool */ + +/* FormattedValue.location = 0, location */ +/* FormattedValue.parenthesised = 1, bool */ +/* FormattedValue.value = 2, expr */ +/* FormattedValue.conversion = 3, str */ +/* FormattedValue.format_spec = 4, JoinedStr */ + +/* Function.name = 0, str */ +/* Function.args = 1, parameter_list */ +/* Function.vararg = 2, expr */ +/* Function.kwonlyargs = 3, expr_list */ +/* Function.kwarg = 4, expr */ +/* Function.body = 5, stmt_list */ +/* Function.is_async = 6, bool */ +/* Function = FunctionParent */ + +/* FunctionExpr.location = 0, location */ +/* FunctionExpr.parenthesised = 1, bool */ +/* FunctionExpr.name = 2, str */ +/* FunctionExpr.args = 3, arguments */ +/* FunctionExpr.returns = 4, expr */ +/* FunctionExpr.inner_scope = 5, Function */ + +/* GeneratorExp.location = 0, location */ +/* GeneratorExp.parenthesised = 1, bool */ +/* GeneratorExp.function = 2, Function */ +/* GeneratorExp.iterable = 3, expr */ + +/* Global.location = 0, location */ +/* Global.names = 1, str_list */ + +/* Guard.location = 0, location */ +/* Guard.parenthesised = 1, bool */ +/* Guard.test = 2, expr */ + +/* If.location = 0, location */ +/* If.test = 1, expr */ +/* If.body = 2, stmt_list */ +/* If.orelse = 3, stmt_list */ + +/* IfExp.location = 0, location */ +/* IfExp.parenthesised = 1, bool */ +/* IfExp.test = 2, expr */ +/* IfExp.body = 3, expr */ +/* IfExp.orelse = 4, expr */ + +/* Import.location = 0, location */ +/* Import.names = 1, alias_list */ + +/* ImportExpr.location = 0, location */ +/* ImportExpr.parenthesised = 1, bool */ +/* ImportExpr.level = 2, int */ +/* ImportExpr.name = 3, str */ +/* ImportExpr.top = 4, bool */ + +/* ImportStar.location = 0, location */ +/* ImportStar.module = 1, expr */ + +/* ImportMember.location = 0, location */ +/* ImportMember.parenthesised = 1, bool */ +/* ImportMember.module = 2, expr */ +/* ImportMember.name = 3, str */ + +/* Fstring.location = 0, location */ +/* Fstring.parenthesised = 1, bool */ +/* Fstring.values = 2, expr_list */ +/* Fstring = FormattedValue */ + +/* KeyValuePair.location = 0, location */ +/* KeyValuePair.value = 1, expr */ +/* KeyValuePair.key = 2, expr */ + +/* Lambda.location = 0, location */ +/* Lambda.parenthesised = 1, bool */ +/* Lambda.args = 2, arguments */ +/* Lambda.inner_scope = 3, Function */ + +/* List.location = 0, location */ +/* List.parenthesised = 1, bool */ +/* List.elts = 2, expr_list */ +/* List.ctx = 3, expr_context */ + +/* ListComp.location = 0, location */ +/* ListComp.parenthesised = 1, bool */ +/* ListComp.function = 2, Function */ +/* ListComp.iterable = 3, expr */ +/* ListComp.generators = 4, comprehension_list */ +/* ListComp.elt = 5, expr */ + +/* MatchStmt.location = 0, location */ +/* MatchStmt.subject = 1, expr */ +/* MatchStmt.cases = 2, stmt_list */ + +/* MatchAsPattern.location = 0, location */ +/* MatchAsPattern.parenthesised = 1, bool */ +/* MatchAsPattern.pattern = 2, pattern */ +/* MatchAsPattern.alias = 3, expr */ + +/* MatchCapturePattern.location = 0, location */ +/* MatchCapturePattern.parenthesised = 1, bool */ +/* MatchCapturePattern.variable = 2, expr */ + +/* MatchClassPattern.location = 0, location */ +/* MatchClassPattern.parenthesised = 1, bool */ +/* MatchClassPattern.class = 2, expr */ +/* MatchClassPattern.class_name = 3, expr */ +/* MatchClassPattern.positional = 4, pattern_list */ +/* MatchClassPattern.keyword = 5, pattern_list */ + +/* MatchDoubleStarPattern.location = 0, location */ +/* MatchDoubleStarPattern.parenthesised = 1, bool */ +/* MatchDoubleStarPattern.target = 2, pattern */ + +/* MatchKeyValuePattern.location = 0, location */ +/* MatchKeyValuePattern.parenthesised = 1, bool */ +/* MatchKeyValuePattern.key = 2, pattern */ +/* MatchKeyValuePattern.value = 3, pattern */ + +/* MatchKeywordPattern.location = 0, location */ +/* MatchKeywordPattern.parenthesised = 1, bool */ +/* MatchKeywordPattern.attribute = 2, expr */ +/* MatchKeywordPattern.value = 3, pattern */ + +/* MatchLiteralPattern.location = 0, location */ +/* MatchLiteralPattern.parenthesised = 1, bool */ +/* MatchLiteralPattern.literal = 2, expr */ + +/* MatchMappingPattern.location = 0, location */ +/* MatchMappingPattern.parenthesised = 1, bool */ +/* MatchMappingPattern.mappings = 2, pattern_list */ + +/* MatchOrPattern.location = 0, location */ +/* MatchOrPattern.parenthesised = 1, bool */ +/* MatchOrPattern.patterns = 2, pattern_list */ + +/* MatchSequencePattern.location = 0, location */ +/* MatchSequencePattern.parenthesised = 1, bool */ +/* MatchSequencePattern.patterns = 2, pattern_list */ + +/* MatchStarPattern.location = 0, location */ +/* MatchStarPattern.parenthesised = 1, bool */ +/* MatchStarPattern.target = 2, pattern */ + +/* MatchValuePattern.location = 0, location */ +/* MatchValuePattern.parenthesised = 1, bool */ +/* MatchValuePattern.value = 2, expr */ + +/* MatchWildcardPattern.location = 0, location */ +/* MatchWildcardPattern.parenthesised = 1, bool */ + +/* Module.name = 0, str */ +/* Module.hash = 1, str */ +/* Module.body = 2, stmt_list */ +/* Module.kind = 3, str */ + +/* Name.location = 0, location */ +/* Name.parenthesised = 1, bool */ +/* Name.variable = 2, variable */ +/* Name.ctx = 3, expr_context */ +/* Name = ParameterList */ + +/* Nonlocal.location = 0, location */ +/* Nonlocal.names = 1, str_list */ + +/* Num.location = 0, location */ +/* Num.parenthesised = 1, bool */ +/* Num.n = 2, number */ +/* Num.text = 3, number */ + +/* Pass.location = 0, location */ + +/* PlaceHolder.location = 0, location */ +/* PlaceHolder.parenthesised = 1, bool */ +/* PlaceHolder.variable = 2, variable */ +/* PlaceHolder.ctx = 3, expr_context */ + +/* Print.location = 0, location */ +/* Print.dest = 1, expr */ +/* Print.values = 2, expr_list */ +/* Print.nl = 3, bool */ + +/* Raise.location = 0, location */ +/* Raise.exc = 1, expr */ +/* Raise.cause = 2, expr */ +/* Raise.type = 3, expr */ +/* Raise.inst = 4, expr */ +/* Raise.tback = 5, expr */ + +/* Repr.location = 0, location */ +/* Repr.parenthesised = 1, bool */ +/* Repr.value = 2, expr */ + +/* Return.location = 0, location */ +/* Return.value = 1, expr */ + +/* Set.location = 0, location */ +/* Set.parenthesised = 1, bool */ +/* Set.elts = 2, expr_list */ + +/* SetComp.location = 0, location */ +/* SetComp.parenthesised = 1, bool */ +/* SetComp.function = 2, Function */ +/* SetComp.iterable = 3, expr */ + +/* Slice.location = 0, location */ +/* Slice.parenthesised = 1, bool */ +/* Slice.start = 2, expr */ +/* Slice.stop = 3, expr */ +/* Slice.step = 4, expr */ + +/* SpecialOperation.location = 0, location */ +/* SpecialOperation.parenthesised = 1, bool */ +/* SpecialOperation.name = 2, str */ +/* SpecialOperation.arguments = 3, expr_list */ + +/* Starred.location = 0, location */ +/* Starred.parenthesised = 1, bool */ +/* Starred.value = 2, expr */ +/* Starred.ctx = 3, expr_context */ + +/* Str.location = 0, location */ +/* Str.parenthesised = 1, bool */ +/* Str.s = 2, str */ +/* Str.prefix = 3, str */ +/* Str.implicitly_concatenated_parts = 4, StringPart_list */ + +/* StringPart.text = 0, str */ +/* StringPart.location = 1, location */ +/* StringPart = StringPartList */ +/* StringPartList = BytesOrStr */ + +/* Subscript.location = 0, location */ +/* Subscript.parenthesised = 1, bool */ +/* Subscript.value = 2, expr */ +/* Subscript.index = 3, expr */ +/* Subscript.ctx = 4, expr_context */ + +/* TemplateDottedNotation.location = 0, location */ +/* TemplateDottedNotation.parenthesised = 1, bool */ +/* TemplateDottedNotation.value = 2, expr */ +/* TemplateDottedNotation.attr = 3, str */ +/* TemplateDottedNotation.ctx = 4, expr_context */ + +/* TemplateWrite.location = 0, location */ +/* TemplateWrite.value = 1, expr */ + +/* Try.location = 0, location */ +/* Try.body = 1, stmt_list */ +/* Try.orelse = 2, stmt_list */ +/* Try.handlers = 3, stmt_list */ +/* Try.finalbody = 4, stmt_list */ + +/* Tuple.location = 0, location */ +/* Tuple.parenthesised = 1, bool */ +/* Tuple.elts = 2, expr_list */ +/* Tuple.ctx = 3, expr_context */ +/* Tuple = ParameterList */ + +/* UnaryExpr.location = 0, location */ +/* UnaryExpr.parenthesised = 1, bool */ +/* UnaryExpr.op = 2, unaryop */ +/* UnaryExpr.operand = 3, expr */ + +/* While.location = 0, location */ +/* While.test = 1, expr */ +/* While.body = 2, stmt_list */ +/* While.orelse = 3, stmt_list */ + +/* With.location = 0, location */ +/* With.context_expr = 1, expr */ +/* With.optional_vars = 2, expr */ +/* With.body = 3, stmt_list */ +/* With.is_async = 4, bool */ + +/* Yield.location = 0, location */ +/* Yield.parenthesised = 1, bool */ +/* Yield.value = 2, expr */ + +/* YieldFrom.location = 0, location */ +/* YieldFrom.parenthesised = 1, bool */ +/* YieldFrom.value = 2, expr */ + +/* Alias.value = 0, expr */ +/* Alias.asname = 1, expr */ +/* Alias = AliasList */ +/* AliasList = Import */ + +/* Arguments.kw_defaults = 0, expr_list */ +/* Arguments.defaults = 1, expr_list */ +/* Arguments.annotations = 2, expr_list */ +/* Arguments.varargannotation = 3, expr */ +/* Arguments.kwargannotation = 4, expr */ +/* Arguments.kw_annotations = 5, expr_list */ +/* Arguments = ArgumentsParent */ +/* boolean = BoolParent */ +/* Boolop = BoolExpr */ +/* string = Bytes */ +/* Cmpop = CmpopList */ +/* CmpopList = Compare */ + +/* Comprehension.location = 0, location */ +/* Comprehension.iter = 1, expr */ +/* Comprehension.target = 2, expr */ +/* Comprehension.ifs = 3, expr_list */ +/* Comprehension = ComprehensionList */ +/* ComprehensionList = ListComp */ +/* DictItem = DictItemList */ +/* DictItemList = DictItemListParent */ + +/* Expr.location = 0, location */ +/* Expr.parenthesised = 1, bool */ +/* Expr = ExprParent */ +/* ExprContext = ExprContextParent */ +/* ExprList = ExprListParent */ +/* int = ImportExpr */ + +/* Keyword.location = 0, location */ +/* Keyword.value = 1, expr */ +/* Keyword.arg = 2, str */ +/* Location = LocationParent */ +/* string = Num */ +/* Operator = BinaryExpr */ +/* ParameterList = Function */ + +/* Pattern.location = 0, location */ +/* Pattern.parenthesised = 1, bool */ +/* Pattern = PatternParent */ +/* PatternList = PatternListParent */ + +/* Stmt.location = 0, location */ +/* Stmt = StmtList */ +/* StmtList = StmtListParent */ +/* string = StrParent */ +/* StringList = StrListParent */ +/* Unaryop = UnaryExpr */ +/* Variable = VariableParent */ +py_Classes(unique int id : @py_Class, + unique int parent : @py_ClassExpr ref); + +py_Functions(unique int id : @py_Function, + unique int parent : @py_Function_parent ref); + +py_Modules(unique int id : @py_Module); + +py_StringParts(unique int id : @py_StringPart, + int parent : @py_StringPart_list ref, + int idx : int ref); + +py_StringPart_lists(unique int id : @py_StringPart_list, + unique int parent : @py_Bytes_or_Str ref); + +py_aliases(unique int id : @py_alias, + int parent : @py_alias_list ref, + int idx : int ref); + +py_alias_lists(unique int id : @py_alias_list, + unique int parent : @py_Import ref); + +py_arguments(unique int id : @py_arguments, + unique int parent : @py_arguments_parent ref); + +py_bools(int parent : @py_bool_parent ref, + int idx : int ref); + +py_boolops(unique int id : @py_boolop, + int kind: int ref, + unique int parent : @py_BoolExpr ref); + +py_bytes(varchar(1) id : string ref, + int parent : @py_Bytes ref, + int idx : int ref); + +py_cmpops(unique int id : @py_cmpop, + int kind: int ref, + int parent : @py_cmpop_list ref, + int idx : int ref); + +py_cmpop_lists(unique int id : @py_cmpop_list, + unique int parent : @py_Compare ref); + +py_comprehensions(unique int id : @py_comprehension, + int parent : @py_comprehension_list ref, + int idx : int ref); + +py_comprehension_lists(unique int id : @py_comprehension_list, + unique int parent : @py_ListComp ref); + +py_dict_items(unique int id : @py_dict_item, + int kind: int ref, + int parent : @py_dict_item_list ref, + int idx : int ref); + +py_dict_item_lists(unique int id : @py_dict_item_list, + unique int parent : @py_dict_item_list_parent ref); + +py_exprs(unique int id : @py_expr, + int kind: int ref, + int parent : @py_expr_parent ref, + int idx : int ref); + +py_expr_contexts(unique int id : @py_expr_context, + int kind: int ref, + unique int parent : @py_expr_context_parent ref); + +py_expr_lists(unique int id : @py_expr_list, + int parent : @py_expr_list_parent ref, + int idx : int ref); + +py_ints(int id : int ref, + unique int parent : @py_ImportExpr ref); + +py_locations(unique int id : @location ref, + unique int parent : @py_location_parent ref); + +py_numbers(varchar(1) id : string ref, + int parent : @py_Num ref, + int idx : int ref); + +py_operators(unique int id : @py_operator, + int kind: int ref, + unique int parent : @py_BinaryExpr ref); + +py_parameter_lists(unique int id : @py_parameter_list, + unique int parent : @py_Function ref); + +py_patterns(unique int id : @py_pattern, + int kind: int ref, + int parent : @py_pattern_parent ref, + int idx : int ref); + +py_pattern_lists(unique int id : @py_pattern_list, + int parent : @py_pattern_list_parent ref, + int idx : int ref); + +py_stmts(unique int id : @py_stmt, + int kind: int ref, + int parent : @py_stmt_list ref, + int idx : int ref); + +py_stmt_lists(unique int id : @py_stmt_list, + int parent : @py_stmt_list_parent ref, + int idx : int ref); + +py_strs(varchar(1) id : string ref, + int parent : @py_str_parent ref, + int idx : int ref); + +py_str_lists(unique int id : @py_str_list, + unique int parent : @py_str_list_parent ref); + +py_unaryops(unique int id : @py_unaryop, + int kind: int ref, + unique int parent : @py_UnaryExpr ref); + +py_variables(int id : @py_variable ref, + unique int parent : @py_variable_parent ref); + +case @py_boolop.kind of + 0 = @py_And +| 1 = @py_Or; + +case @py_cmpop.kind of + 0 = @py_Eq +| 1 = @py_Gt +| 2 = @py_GtE +| 3 = @py_In +| 4 = @py_Is +| 5 = @py_IsNot +| 6 = @py_Lt +| 7 = @py_LtE +| 8 = @py_NotEq +| 9 = @py_NotIn; + +case @py_dict_item.kind of + 0 = @py_DictUnpacking +| 1 = @py_KeyValuePair +| 2 = @py_keyword; + +case @py_expr.kind of + 0 = @py_Attribute +| 1 = @py_BinaryExpr +| 2 = @py_BoolExpr +| 3 = @py_Bytes +| 4 = @py_Call +| 5 = @py_ClassExpr +| 6 = @py_Compare +| 7 = @py_Dict +| 8 = @py_DictComp +| 9 = @py_Ellipsis +| 10 = @py_FunctionExpr +| 11 = @py_GeneratorExp +| 12 = @py_IfExp +| 13 = @py_ImportExpr +| 14 = @py_ImportMember +| 15 = @py_Lambda +| 16 = @py_List +| 17 = @py_ListComp +| 18 = @py_Guard +| 19 = @py_Name +| 20 = @py_Num +| 21 = @py_Repr +| 22 = @py_Set +| 23 = @py_SetComp +| 24 = @py_Slice +| 25 = @py_Starred +| 26 = @py_Str +| 27 = @py_Subscript +| 28 = @py_Tuple +| 29 = @py_UnaryExpr +| 30 = @py_Yield +| 31 = @py_YieldFrom +| 32 = @py_TemplateDottedNotation +| 33 = @py_Filter +| 34 = @py_PlaceHolder +| 35 = @py_Await +| 36 = @py_Fstring +| 37 = @py_FormattedValue +| 38 = @py_AssignExpr +| 39 = @py_SpecialOperation; + +case @py_expr_context.kind of + 0 = @py_AugLoad +| 1 = @py_AugStore +| 2 = @py_Del +| 3 = @py_Load +| 4 = @py_Param +| 5 = @py_Store; + +case @py_operator.kind of + 0 = @py_Add +| 1 = @py_BitAnd +| 2 = @py_BitOr +| 3 = @py_BitXor +| 4 = @py_Div +| 5 = @py_FloorDiv +| 6 = @py_LShift +| 7 = @py_Mod +| 8 = @py_Mult +| 9 = @py_Pow +| 10 = @py_RShift +| 11 = @py_Sub +| 12 = @py_MatMult; + +case @py_pattern.kind of + 0 = @py_MatchAsPattern +| 1 = @py_MatchOrPattern +| 2 = @py_MatchLiteralPattern +| 3 = @py_MatchCapturePattern +| 4 = @py_MatchWildcardPattern +| 5 = @py_MatchValuePattern +| 6 = @py_MatchSequencePattern +| 7 = @py_MatchStarPattern +| 8 = @py_MatchMappingPattern +| 9 = @py_MatchDoubleStarPattern +| 10 = @py_MatchKeyValuePattern +| 11 = @py_MatchClassPattern +| 12 = @py_MatchKeywordPattern; + +case @py_stmt.kind of + 0 = @py_Assert +| 1 = @py_Assign +| 2 = @py_AugAssign +| 3 = @py_Break +| 4 = @py_Continue +| 5 = @py_Delete +| 6 = @py_ExceptStmt +| 7 = @py_ExceptGroupStmt +| 8 = @py_Exec +| 9 = @py_Expr_stmt +| 10 = @py_For +| 11 = @py_Global +| 12 = @py_If +| 13 = @py_Import +| 14 = @py_ImportStar +| 15 = @py_MatchStmt +| 16 = @py_Case +| 17 = @py_Nonlocal +| 18 = @py_Pass +| 19 = @py_Print +| 20 = @py_Raise +| 21 = @py_Return +| 22 = @py_Try +| 23 = @py_While +| 24 = @py_With +| 25 = @py_TemplateWrite +| 26 = @py_AnnAssign; + +case @py_unaryop.kind of + 0 = @py_Invert +| 1 = @py_Not +| 2 = @py_UAdd +| 3 = @py_USub; + +@py_Bytes_or_Str = @py_Bytes | @py_Str; + +@py_Function_parent = @py_DictComp | @py_FunctionExpr | @py_GeneratorExp | @py_Lambda | @py_ListComp | @py_SetComp; + +@py_arguments_parent = @py_FunctionExpr | @py_Lambda; + +@py_ast_node = @py_Class | @py_Function | @py_Module | @py_StringPart | @py_comprehension | @py_dict_item | @py_expr | @py_pattern | @py_stmt; + +@py_bool_parent = @py_For | @py_Function | @py_Print | @py_With | @py_expr | @py_pattern; + +@py_dict_item_list_parent = @py_Call | @py_ClassExpr | @py_Dict; + +@py_expr_context_parent = @py_Attribute | @py_List | @py_Name | @py_PlaceHolder | @py_Starred | @py_Subscript | @py_TemplateDottedNotation | @py_Tuple; + +@py_expr_list_parent = @py_Assign | @py_BoolExpr | @py_Call | @py_ClassExpr | @py_Compare | @py_Delete | @py_Fstring | @py_Function | @py_List | @py_Print | @py_Set | @py_SpecialOperation | @py_Tuple | @py_arguments | @py_comprehension; + +@py_expr_or_stmt = @py_expr | @py_stmt; + +@py_expr_parent = @py_AnnAssign | @py_Assert | @py_Assign | @py_AssignExpr | @py_Attribute | @py_AugAssign | @py_Await | @py_BinaryExpr | @py_Call | @py_Case | @py_Compare | @py_DictComp | @py_DictUnpacking | @py_ExceptGroupStmt | @py_ExceptStmt | @py_Exec | @py_Expr_stmt | @py_Filter | @py_For | @py_FormattedValue | @py_Function | @py_FunctionExpr | @py_GeneratorExp | @py_Guard | @py_If | @py_IfExp | @py_ImportMember | @py_ImportStar | @py_KeyValuePair | @py_ListComp | @py_MatchAsPattern | @py_MatchCapturePattern | @py_MatchClassPattern | @py_MatchKeywordPattern | @py_MatchLiteralPattern | @py_MatchStmt | @py_MatchValuePattern | @py_Print | @py_Raise | @py_Repr | @py_Return | @py_SetComp | @py_Slice | @py_Starred | @py_Subscript | @py_TemplateDottedNotation | @py_TemplateWrite | @py_UnaryExpr | @py_While | @py_With | @py_Yield | @py_YieldFrom | @py_alias | @py_arguments | @py_comprehension | @py_expr_list | @py_keyword | @py_parameter_list; + +@py_location_parent = @py_DictUnpacking | @py_KeyValuePair | @py_StringPart | @py_comprehension | @py_expr | @py_keyword | @py_pattern | @py_stmt; + +@py_parameter = @py_Name | @py_Tuple; + +@py_pattern_list_parent = @py_MatchClassPattern | @py_MatchMappingPattern | @py_MatchOrPattern | @py_MatchSequencePattern; + +@py_pattern_parent = @py_Case | @py_MatchAsPattern | @py_MatchDoubleStarPattern | @py_MatchKeyValuePattern | @py_MatchKeywordPattern | @py_MatchStarPattern | @py_pattern_list; + +@py_scope = @py_Class | @py_Function | @py_Module; + +@py_stmt_list_parent = @py_Case | @py_Class | @py_ExceptGroupStmt | @py_ExceptStmt | @py_For | @py_Function | @py_If | @py_MatchStmt | @py_Module | @py_Try | @py_While | @py_With; + +@py_str_list_parent = @py_Global | @py_Nonlocal; + +@py_str_parent = @py_Attribute | @py_Class | @py_ClassExpr | @py_FormattedValue | @py_Function | @py_FunctionExpr | @py_ImportExpr | @py_ImportMember | @py_Module | @py_SpecialOperation | @py_Str | @py_StringPart | @py_TemplateDottedNotation | @py_keyword | @py_str_list; + +@py_variable_parent = @py_Name | @py_PlaceHolder; + + +/* + * End of auto-generated part + */ + + + +/* Map relative names to absolute names for imports */ +py_absolute_names(int module : @py_Module ref, + varchar(1) relname : string ref, + varchar(1) absname : string ref); + +py_exports(int id : @py_Module ref, + varchar(1) name : string ref); + +/* Successor information */ +py_successors(int predecessor : @py_flow_node ref, + int successor : @py_flow_node ref); + +py_true_successors(int predecessor : @py_flow_node ref, + int successor : @py_flow_node ref); + +py_exception_successors(int predecessor : @py_flow_node ref, + int successor : @py_flow_node ref); + +py_false_successors(int predecessor : @py_flow_node ref, + int successor : @py_flow_node ref); + +py_flow_bb_node(unique int flownode : @py_flow_node, + int realnode : @py_ast_node ref, + int basicblock : @py_flow_node ref, + int index : int ref); + +py_scope_flow(int flow : @py_flow_node ref, + int scope : @py_scope ref, + int kind : int ref); + +py_idoms(unique int node : @py_flow_node ref, + int immediate_dominator : @py_flow_node ref); + +py_ssa_phi(int phi : @py_ssa_var ref, + int arg: @py_ssa_var ref); + +py_ssa_var(unique int id : @py_ssa_var, + int var : @py_variable ref); + +py_ssa_use(int node: @py_flow_node ref, + int var : @py_ssa_var ref); + +py_ssa_defn(unique int id : @py_ssa_var ref, + int node: @py_flow_node ref); + +@py_base_var = @py_variable | @py_ssa_var; + +py_scopes(unique int node : @py_expr_or_stmt ref, + int scope : @py_scope ref); + +py_scope_location(unique int id : @location ref, + unique int scope : @py_scope ref); + +py_flags_versioned(varchar(1) name : string ref, + varchar(1) value : string ref, + varchar(1) version : string ref); + +py_syntax_error_versioned(unique int id : @location ref, + varchar(1) message : string ref, + varchar(1) version : string ref); + +py_comments(unique int id : @py_comment, + varchar(1) text : string ref, + unique int location : @location ref); + +/* Type information support */ + +py_cobjects(unique int obj : @py_cobject); + +py_cobjecttypes(unique int obj : @py_cobject ref, + int typeof : @py_cobject ref); + +py_cobjectnames(unique int obj : @py_cobject ref, + varchar(1) name : string ref); + +/* Kind should be 0 for introspection, > 0 from source, as follows: + 1 from C extension source + */ +py_cobject_sources(int obj : @py_cobject ref, + int kind : int ref); + +py_cmembers_versioned(int object : @py_cobject ref, + varchar(1) name : string ref, + int member : @py_cobject ref, + varchar(1) version : string ref); + +py_citems(int object : @py_cobject ref, + int index : int ref, + int member : @py_cobject ref); + +ext_argtype(int funcid : @py_object ref, + int arg : int ref, + int typeid : @py_object ref); + +ext_rettype(int funcid : @py_object ref, + int typeid : @py_object ref); + +ext_proptype(int propid : @py_object ref, + int typeid : @py_object ref); + +ext_argreturn(int funcid : @py_object ref, + int arg : int ref); + +py_special_objects(unique int obj : @py_cobject ref, + unique varchar(1) name : string ref); + +py_decorated_object(int object : @py_object ref, + int level: int ref); + +@py_object = @py_cobject | @py_flow_node; + +@py_source_element = @py_ast_node | @container; diff --git a/example/codeql-db/db-python/semmlecode.python.dbscheme.stats b/example/codeql-db/db-python/semmlecode.python.dbscheme.stats new file mode 100644 index 0000000000000000000000000000000000000000..0424528c8db4523eebb00176ce6c74894a4ae95d --- /dev/null +++ b/example/codeql-db/db-python/semmlecode.python.dbscheme.stats @@ -0,0 +1,18552 @@ + + +@py_Guard100 +@py_MatchAsPattern100 +@py_MatchOrPattern100 +@py_MatchLiteralPattern100 +@py_MatchCapturePattern100 +@py_MatchWildcardPattern100 +@py_MatchValuePattern100 +@py_MatchSequencePattern100 +@py_MatchStarPattern100 +@py_MatchMappingPattern100 +@py_MatchDoubleStarPattern100 +@py_MatchKeyValuePattern100 +@py_MatchClassPattern100 +@py_MatchKeywordPattern100 +@py_Case100 +@py_MatchStmt100 +@py_pattern_list100 +@externalDefect +100 + + +@externalMetric +100 + + +@externalDataElement +20 + + +@duplication +890 + + +@similarity +5591 + + +@svnentry +100 + + +@file +3066 + + +@folder +686 + + +@location_default +100 + + +@location_ast +2310679 + + +@py_variable +242770 + + +@py_line +100 + + +@py_Class +10244 + + +@py_Function +44860 + + +@py_Module +5983 + + +@py_StringPart +6399 + + +@py_StringPart_list +2296 + + +@py_alias +21374 + + +@py_alias_list +14396 + + +@py_arguments +41982 + + +@py_boolop +10907 + + +@py_And +7243 + + +@py_Or +3663 + + +@py_cmpop +38007 + + +@py_Eq +11370 + + +@py_Gt +1999 + + +@py_GtE +1306 + + +@py_In +4743 + + +@py_Is +6368 + + +@py_IsNot +4541 + + +@py_Lt +1920 + + +@py_LtE +1128 + + +@py_NotEq +3050 + + +@py_NotIn +1672 + + +@py_cmpop_list +37666 + + +@py_comprehension +1688 + + +@py_comprehension_list +1682 + + +@py_dict_item +167901 + + +@py_DictUnpacking +1521 + + +@py_KeyValuePair +92837 + + +@py_keyword +74612 + + +@py_dict_item_list +33758 + + +@py_expr +1684031 + + +@py_Attribute +249565 + + +@py_BinaryExpr +28868 + + +@py_BoolExpr +10907 + + +@py_Bytes +105600 + + +@py_Call +198138 + + +@py_ClassExpr +10244 + + +@py_Compare +37666 + + +@py_Dict +9635 + + +@py_DictComp +99 + + +@py_Ellipsis +115 + + +@py_Fstring +100 + + +@py_FormattedValue +100 + + +@py_FunctionExpr +41531 + + +@py_GeneratorExp +1066 + + +@py_IfExp +923 + + +@py_ImportExpr +21532 + + +@py_ImportMember +17714 + + +@py_Lambda +870 + + +@py_List +23200 + + +@py_ListComp +1690 + + +@py_Name +845963 + + +@py_Num +58723 + + +@py_Set +261 + + +@py_SetComp +49 + + +@py_Slice +5316 + + +@py_Starred +1265 + + +@py_Str +288427 + + +@py_Subscript +31583 + + +@py_Tuple +27693 + + +@py_UnaryExpr +13295 + + +@py_Yield +3941 + + +@py_YieldFrom +398 + + +@py_Repr +100 + + +@py_TemplateDottedNotation +100 + + +@py_Filter +100 + + +@py_PlaceHolder +100 + + +@py_Await +500 + + +@py_AssignExpr +200 + + +@py_SpecialOperation +100 + + +@py_expr_context +1140675 + + +@py_Del +1324 + + +@py_Load +853094 + + +@py_Param +96047 + + +@py_Store +198700 + + +@py_AugLoad +100 + + +@py_AugStore +100 + + +@py_expr_list +430986 + + +@py_operator +28868 + + +@py_Add +13603 + + +@py_BitAnd +796 + + +@py_BitOr +799 + + +@py_BitXor +190 + + +@py_Div +393 + + +@py_FloorDiv +362 + + +@py_LShift +279 + + +@py_Mod +8234 + + +@py_Mult +2218 + + +@py_Pow +501 + + +@py_RShift +157 + + +@py_Sub +3136 + + +@py_MatMult +100 + + +@py_parameter_list +43271 + + +@py_stmt +372643 + + +@py_Assert +1999 + + +@py_Assign +151576 + + +@py_AugAssign +3656 + + +@py_Break +1699 + + +@py_Continue +1199 + + +@py_Delete +1149 + + +@py_ExceptStmt +5610 + + +@py_ExceptGroupStmt +1000 + + +@py_Expr_stmt +76750 + + +@py_For +11495 + + +@py_Global +392 + + +@py_If +53619 + + +@py_Import +14396 + + +@py_ImportStar +158 + + +@py_Nonlocal +35 + + +@py_Pass +2872 + + +@py_Raise +7794 + + +@py_Return +36127 + + +@py_Try +6210 + + +@py_While +2138 + + +@py_With +4193 + + +@py_Exec +43 + + +@py_Print +1032 + + +@py_TemplateWrite +100 + + +@py_AnnAssign +100 + + +@py_stmt_list +156700 + + +@py_str_list +427 + + +@py_unaryop +13295 + + +@py_Invert +107 + + +@py_Not +8655 + + +@py_UAdd +14 + + +@py_USub +4565 + + +@py_flow_node +2323431 + + +@py_ssa_var +272292 + + +@py_comment +77830 + + +@py_cobject +112856 + + +@xmldtd +100 + + +@xmlelement +100 + + +@xmlattribute +100 + + +@xmlnamespace +100 + + +@xmlcomment +100 + + +@xmlcharacters +100 + + +@yaml_node +885 + + +@yaml_scalar_node +700 + + +@yaml_mapping_node +149 + + +@yaml_sequence_node +35 + + +@yaml_alias_node +1 + + +@yaml_error +1 + + + +externalDefects +100 + + +id +100 + + +queryPath +100 + + +location +100 + + +message +100 + + +severity +100 + + + + +id +queryPath + + +12 + + +1 +2 +2 + + + + + + +id +location + + +12 + + +1 +2 +2 + + + + + + +id +message + + +12 + + +1 +2 +2 + + + + + + +id +severity + + +12 + + +1 +2 +2 + + + + + + +queryPath +id + + +12 + + + + + +queryPath +location + + +12 + + + + + +queryPath +message + + +12 + + + + + +queryPath +severity + + +12 + + + + + +location +id + + +12 + + + + + +location +queryPath + + +12 + + + + + +location +message + + +12 + + + + + +location +severity + + +12 + + + + + +message +id + + +12 + + + + + +message +queryPath + + +12 + + + + + +message +location + + +12 + + + + + +message +severity + + +12 + + + + + +severity +id + + +12 + + + + + +severity +queryPath + + +12 + + + + + +severity +location + + +12 + + + + + +severity +message + + +12 + + + + + + + +externalMetrics +100 + + +id +100 + + +queryPath +100 + + +location +100 + + +value +100 + + + + +id +queryPath + + +12 + + +1 +2 +1 + + + + + + +id +location + + +12 + + +1 +2 +1 + + + + + + +id +value + + +12 + + +1 +2 +1 + + + + + + +queryPath +id + + +12 + + + + + +queryPath +location + + +12 + + + + + +queryPath +value + + +12 + + + + + +location +id + + +12 + + + + + +location +queryPath + + +12 + + + + + +location +value + + +12 + + + + + +value +id + + +12 + + + + + +value +queryPath + + +12 + + + + + +value +location + + +12 + + + + + + + +externalData +41 + + +id +20 + + +path +2 + + +column +5 + + +value +41 + + + + +id +path + + +12 + + +1 +2 +20 + + + + + + +id +column + + +12 + + +2 +3 +20 + + + + + + +id +value + + +12 + + +2 +3 +20 + + + + + + +path +id + + +12 + + +7 +8 +2 + + + + + + +path +column + + +12 + + +2 +3 +2 + + + + + + +path +value + + +12 + + +14 +15 +2 + + + + + + +column +id + + +12 + + +7 +8 +5 + + + + + + +column +path + + +12 + + +1 +2 +5 + + + + + + +column +value + + +12 + + +7 +8 +5 + + + + + + +value +id + + +12 + + +1 +2 +41 + + + + + + +value +path + + +12 + + +1 +2 +41 + + + + + + +value +column + + +12 + + +1 +2 +41 + + + + + + + + +snapshotDate +2 + + +snapshotDate +2 + + + + + +sourceLocationPrefix +2 + + +prefix +2 + + + + + +duplicateCode +890 + + +id +890 + + +relativePath +91 + + +equivClass +415 + + + + +id +relativePath + + +12 + + +1 +2 +890 + + + + + + +id +equivClass + + +12 + + +1 +2 +890 + + + + + + +relativePath +id + + +12 + + +1 +2 +30 + + +2 +3 +16 + + +3 +4 +4 + + +4 +5 +8 + + +6 +8 +6 + + +8 +12 +6 + + +12 +19 +6 + + +23 +47 +6 + + +48 +109 +4 + + + + + + +relativePath +equivClass + + +12 + + +1 +2 +38 + + +2 +3 +12 + + +3 +4 +6 + + +4 +5 +8 + + +6 +10 +8 + + +10 +15 +6 + + +15 +46 +6 + + +92 +105 +2 + + + + + + +equivClass +id + + +12 + + +2 +3 +371 + + +3 +4 +31 + + +4 +7 +12 + + + + + + +equivClass +relativePath + + +12 + + +1 +2 +95 + + +2 +3 +288 + + +3 +5 +31 + + + + + + + + +similarCode +5591 + + +id +5591 + + +relativePath +347 + + +equivClass +1696 + + + + +id +relativePath + + +12 + + +1 +2 +5591 + + + + + + +id +equivClass + + +12 + + +1 +2 +5591 + + + + + + +relativePath +id + + +12 + + +1 +2 +44 + + +2 +3 +33 + + +3 +5 +31 + + +5 +7 +30 + + +7 +9 +18 + + +9 +11 +26 + + +11 +13 +26 + + +13 +18 +29 + + +18 +23 +29 + + +23 +30 +24 + + +30 +42 +26 + + +45 +155 +26 + + +161 +162 +1 + + + + + + +relativePath +equivClass + + +12 + + +1 +2 +66 + + +2 +3 +19 + + +3 +4 +20 + + +4 +5 +18 + + +5 +6 +18 + + +6 +8 +27 + + +8 +10 +30 + + +10 +13 +26 + + +13 +18 +26 + + +18 +23 +26 + + +23 +31 +31 + + +31 +53 +26 + + +54 +145 +9 + + + + + + +equivClass +id + + +12 + + +2 +3 +937 + + +3 +4 +260 + + +4 +5 +166 + + +5 +6 +88 + + +6 +8 +138 + + +8 +11 +105 + + + + + + +equivClass +relativePath + + +12 + + +1 +2 +358 + + +2 +3 +733 + + +3 +4 +216 + + +4 +5 +139 + + +5 +7 +110 + + +7 +10 +127 + + +10 +11 +9 + + + + + + + + +tokens +889686 + + +id +6481 + + +offset +10514 + + +beginLine +9882 + + +beginColumn +1197 + + +endLine +9882 + + +endColumn +1207 + + + + +id +offset + + +12 + + +100 +101 +394 + + +101 +102 +750 + + +102 +103 +347 + + +103 +104 +414 + + +104 +105 +405 + + +105 +107 +528 + + +107 +108 +414 + + +108 +111 +513 + + +111 +117 +555 + + +117 +127 +494 + + +127 +145 +490 + + +145 +176 +487 + + +176 +284 +488 + + +289 +7594 +196 + + + + + + +id +beginLine + + +12 + + +5 +9 +396 + + +9 +10 +299 + + +10 +11 +559 + + +11 +12 +432 + + +12 +13 +598 + + +13 +14 +747 + + +14 +15 +541 + + +15 +17 +564 + + +17 +20 +589 + + +20 +24 +573 + + +24 +28 +526 + + +28 +51 +498 + + +51 +1520 +155 + + + + + + +id +beginColumn + + +12 + + +9 +17 +516 + + +17 +22 +488 + + +22 +31 +563 + + +31 +37 +566 + + +37 +43 +585 + + +43 +46 +472 + + +46 +49 +591 + + +49 +51 +438 + + +51 +54 +571 + + +54 +56 +443 + + +56 +59 +484 + + +59 +68 +524 + + +68 +131 +234 + + + + + + +id +endLine + + +12 + + +5 +9 +396 + + +9 +10 +299 + + +10 +11 +559 + + +11 +12 +432 + + +12 +13 +598 + + +13 +14 +747 + + +14 +15 +541 + + +15 +17 +564 + + +17 +20 +589 + + +20 +24 +573 + + +24 +28 +526 + + +28 +51 +502 + + +51 +1520 +150 + + + + + + +id +endColumn + + +12 + + +10 +18 +450 + + +18 +23 +523 + + +23 +33 +531 + + +33 +39 +495 + + +39 +44 +504 + + +44 +48 +533 + + +48 +51 +544 + + +51 +54 +549 + + +54 +56 +492 + + +56 +58 +458 + + +58 +61 +508 + + +61 +67 +498 + + +67 +133 +391 + + + + + + +offset +id + + +12 + + +2 +3 +6935 + + +4 +5 +693 + + +6 +11 +706 + + +12 +15 +887 + + +16 +93 +790 + + +94 +4682 +499 + + + + + + +offset +beginLine + + +12 + + +2 +3 +6935 + + +4 +5 +693 + + +6 +11 +706 + + +12 +15 +891 + + +16 +91 +789 + + +91 +1817 +497 + + + + + + +offset +beginColumn + + +12 + + +1 +2 +6952 + + +2 +3 +722 + + +3 +5 +674 + + +5 +8 +969 + + +8 +41 +797 + + +41 +169 +397 + + + + + + +offset +endLine + + +12 + + +2 +3 +6935 + + +4 +5 +693 + + +6 +11 +706 + + +12 +15 +891 + + +16 +91 +789 + + +91 +1817 +497 + + + + + + +offset +endColumn + + +12 + + +1 +2 +6973 + + +2 +3 +696 + + +3 +6 +929 + + +6 +9 +801 + + +9 +57 +798 + + +57 +172 +314 + + + + + + +beginLine +id + + +12 + + +1 +2 +1613 + + +2 +3 +1931 + + +3 +4 +987 + + +4 +5 +650 + + +5 +7 +825 + + +7 +9 +744 + + +9 +12 +772 + + +12 +17 +836 + + +17 +37 +749 + + +37 +148 +742 + + +151 +217 +29 + + + + + + +beginLine +offset + + +12 + + +1 +4 +697 + + +4 +8 +882 + + +8 +11 +746 + + +11 +15 +883 + + +15 +20 +801 + + +20 +25 +756 + + +25 +32 +757 + + +32 +42 +743 + + +42 +55 +742 + + +55 +72 +778 + + +72 +98 +747 + + +98 +148 +751 + + +148 +211 +594 + + + + + + +beginLine +beginColumn + + +12 + + +1 +3 +749 + + +3 +6 +686 + + +6 +8 +605 + + +8 +10 +779 + + +10 +12 +733 + + +12 +14 +714 + + +14 +17 +726 + + +17 +21 +880 + + +21 +26 +872 + + +26 +32 +852 + + +32 +40 +810 + + +40 +54 +771 + + +54 +184 +699 + + + + + + +beginLine +endLine + + +12 + + +1 +2 +9740 + + +2 +4 +142 + + + + + + +beginLine +endColumn + + +12 + + +1 +3 +750 + + +3 +6 +666 + + +6 +8 +621 + + +8 +10 +722 + + +10 +12 +720 + + +12 +14 +699 + + +14 +17 +721 + + +17 +21 +890 + + +21 +26 +862 + + +26 +32 +839 + + +32 +40 +794 + + +40 +53 +790 + + +53 +81 +746 + + +81 +185 +56 + + + + + + +beginColumn +id + + +12 + + +1 +2 +389 + + +2 +3 +200 + + +3 +4 +80 + + +4 +7 +105 + + +7 +8 +90 + + +8 +11 +91 + + +11 +45 +91 + + +48 +2322 +90 + + +2328 +3928 +59 + + + + + + +beginColumn +offset + + +12 + + +1 +2 +404 + + +2 +3 +206 + + +3 +4 +65 + + +4 +7 +101 + + +7 +8 +88 + + +8 +11 +94 + + +11 +33 +90 + + +33 +345 +90 + + +360 +2645 +58 + + + + + + +beginColumn +beginLine + + +12 + + +1 +2 +628 + + +2 +3 +204 + + +3 +4 +90 + + +4 +10 +99 + + +10 +750 +90 + + +762 +5047 +84 + + + + + + +beginColumn +endLine + + +12 + + +1 +2 +628 + + +2 +3 +204 + + +3 +4 +90 + + +4 +10 +99 + + +10 +750 +90 + + +762 +5046 +84 + + + + + + +beginColumn +endColumn + + +12 + + +1 +2 +822 + + +2 +3 +152 + + +3 +6 +95 + + +6 +31 +92 + + +31 +99 +34 + + + + + + +endLine +id + + +12 + + +1 +2 +1613 + + +2 +3 +1931 + + +3 +4 +987 + + +4 +5 +652 + + +5 +7 +823 + + +7 +9 +744 + + +9 +12 +772 + + +12 +17 +836 + + +17 +37 +749 + + +37 +148 +742 + + +151 +217 +29 + + + + + + +endLine +offset + + +12 + + +1 +4 +702 + + +4 +8 +876 + + +8 +11 +749 + + +11 +15 +883 + + +15 +20 +801 + + +20 +25 +756 + + +25 +32 +753 + + +32 +42 +744 + + +42 +55 +743 + + +55 +72 +779 + + +72 +98 +746 + + +98 +148 +751 + + +148 +211 +594 + + + + + + +endLine +beginLine + + +12 + + +1 +2 +9734 + + +2 +3 +148 + + + + + + +endLine +beginColumn + + +12 + + +1 +3 +749 + + +3 +6 +685 + + +6 +8 +607 + + +8 +10 +782 + + +10 +12 +728 + + +12 +14 +714 + + +14 +17 +728 + + +17 +21 +880 + + +21 +26 +873 + + +26 +32 +851 + + +32 +40 +810 + + +40 +54 +771 + + +54 +184 +699 + + + + + + +endLine +endColumn + + +12 + + +1 +3 +750 + + +3 +6 +664 + + +6 +8 +625 + + +8 +10 +721 + + +10 +12 +718 + + +12 +14 +702 + + +14 +17 +721 + + +17 +21 +883 + + +21 +26 +862 + + +26 +32 +841 + + +32 +40 +797 + + +40 +53 +792 + + +53 +81 +743 + + +81 +185 +56 + + + + + + +endColumn +id + + +12 + + +1 +2 +391 + + +2 +3 +192 + + +3 +4 +84 + + +4 +7 +102 + + +7 +8 +92 + + +8 +11 +98 + + +11 +47 +91 + + +50 +2174 +91 + + +2189 +4114 +62 + + + + + + +endColumn +offset + + +12 + + +1 +2 +408 + + +2 +3 +193 + + +3 +4 +74 + + +4 +7 +95 + + +7 +8 +85 + + +8 +11 +103 + + +11 +36 +91 + + +37 +353 +91 + + +364 +1140 +62 + + + + + + +endColumn +beginLine + + +12 + + +1 +2 +625 + + +2 +3 +211 + + +3 +4 +84 + + +4 +8 +91 + + +8 +405 +91 + + +414 +3303 +91 + + +3320 +3523 +11 + + + + + + +endColumn +beginColumn + + +12 + + +1 +2 +812 + + +2 +3 +167 + + +3 +8 +95 + + +8 +33 +92 + + +33 +42 +38 + + + + + + +endColumn +endLine + + +12 + + +1 +2 +625 + + +2 +3 +211 + + +3 +4 +84 + + +4 +8 +91 + + +8 +405 +91 + + +414 +3303 +91 + + +3320 +3523 +11 + + + + + + + + +py_codelines +52985 + + +id +52985 + + +count +732 + + + + +id +count + + +12 + + +1 +2 +52985 + + + + + + +count +id + + +12 + + +1 +2 +307 + + +2 +3 +116 + + +3 +4 +59 + + +4 +6 +61 + + +6 +11 +62 + + +11 +28 +57 + + +28 +612 +55 + + +631 +13079 +15 + + + + + + + + +py_commentlines +52983 + + +id +52983 + + +count +198 + + + + +id +count + + +12 + + +1 +2 +52983 + + + + + + +count +id + + +12 + + +1 +2 +78 + + +2 +3 +26 + + +3 +4 +11 + + +4 +6 +16 + + +6 +10 +15 + + +10 +19 +15 + + +19 +48 +15 + + +49 +351 +15 + + +494 +40367 +7 + + + + + + + + +py_docstringlines +52983 + + +id +52983 + + +count +123 + + + + +id +count + + +12 + + +1 +2 +52983 + + + + + + +count +id + + +12 + + +1 +2 +20 + + +2 +3 +11 + + +3 +4 +9 + + +4 +5 +10 + + +5 +8 +11 + + +8 +13 +10 + + +14 +22 +11 + + +22 +29 +10 + + +29 +54 +10 + + +56 +175 +10 + + +232 +5368 +10 + + +36413 +36414 +1 + + + + + + + + +py_alllines +52983 + + +id +52983 + + +count +829 + + + + +id +count + + +12 + + +1 +2 +52983 + + + + + + +count +id + + +12 + + +1 +2 +361 + + +2 +3 +108 + + +3 +4 +68 + + +4 +5 +47 + + +5 +8 +69 + + +8 +17 +65 + + +17 +93 +64 + + +113 +9596 +47 + + + + + + + + +svnentries +100 + + +id +100 + + +revision +100 + + +author +100 + + +revisionDate +100 + + +changeSize +100 + + + + +id +revision + + +12 + + + + + +id +author + + +12 + + + + + +id +revisionDate + + +12 + + + + + +id +changeSize + + +12 + + + + + +revision +id + + +12 + + + + + +revision +author + + +12 + + + + + +revision +revisionDate + + +12 + + + + + +revision +changeSize + + +12 + + + + + +author +id + + +12 + + + + + +author +revision + + +12 + + + + + +author +revisionDate + + +12 + + + + + +author +changeSize + + +12 + + + + + +revisionDate +id + + +12 + + + + + +revisionDate +revision + + +12 + + + + + +revisionDate +author + + +12 + + + + + +revisionDate +changeSize + + +12 + + + + + +changeSize +id + + +12 + + + + + +changeSize +revision + + +12 + + + + + +changeSize +author + + +12 + + + + + +changeSize +revisionDate + + +12 + + + + + + + +svnaffectedfiles +100 + + +id +100 + + +file +100 + + +action +100 + + + + +id +file + + +12 + + + + + +id +action + + +12 + + + + + +file +id + + +12 + + + + + +file +action + + +12 + + + + + +action +id + + +12 + + + + + +action +file + + +12 + + + + + + + +svnentrymsg +100 + + +id +100 + + +message +100 + + + + +id +message + + +12 + + + + + +message +id + + +12 + + + + + + + +svnchurn +100 + + +commit +100 + + +file +100 + + +addedLines +100 + + +deletedLines +100 + + + + +commit +file + + +12 + + + + + +commit +addedLines + + +12 + + + + + +commit +deletedLines + + +12 + + + + + +file +commit + + +12 + + + + + +file +addedLines + + +12 + + + + + +file +deletedLines + + +12 + + + + + +addedLines +commit + + +12 + + + + + +addedLines +file + + +12 + + + + + +addedLines +deletedLines + + +12 + + + + + +deletedLines +commit + + +12 + + + + + +deletedLines +file + + +12 + + + + + +deletedLines +addedLines + + +12 + + + + + + + +files +3066 + + +id +3066 + + +name +3066 + + + + +id +name + + +12 + + +1 +2 +3066 + + + + + + +name +id + + +12 + + +1 +2 +3066 + + + + + + + + +folders +686 + + +id +686 + + +name +686 + + + + +id +name + + +12 + + +1 +2 +686 + + + + + + +name +id + + +12 + + +1 +2 +686 + + + + + + + + +containerparent +3750 + + +parent +685 + + +child +3750 + + + + +parent +child + + +12 + + +1 +2 +53 + + +2 +3 +202 + + +3 +4 +176 + + +4 +5 +57 + + +5 +6 +34 + + +6 +8 +56 + + +8 +13 +54 + + +13 +149 +52 + + +204 +205 +1 + + + + + + +child +parent + + +12 + + +1 +2 +3750 + + + + + + + + +numlines +2553 + + +element_id +2553 + + +num_lines +687 + + +num_code +648 + + +num_comment +193 + + + + +element_id +num_lines + + +12 + + +1 +2 +2553 + + + + + + +element_id +num_code + + +12 + + +1 +2 +2553 + + + + + + +element_id +num_comment + + +12 + + +1 +2 +2553 + + + + + + +num_lines +element_id + + +12 + + +1 +2 +345 + + +2 +3 +129 + + +3 +4 +44 + + +4 +6 +57 + + +6 +11 +54 + + +11 +34 +52 + + +35 +60 +6 + + + + + + +num_lines +num_code + + +12 + + +1 +2 +348 + + +2 +3 +134 + + +3 +4 +46 + + +4 +5 +41 + + +5 +6 +39 + + +6 +9 +60 + + +9 +17 +19 + + + + + + +num_lines +num_comment + + +12 + + +1 +2 +348 + + +2 +3 +134 + + +3 +4 +46 + + +4 +5 +41 + + +5 +6 +39 + + +6 +9 +60 + + +9 +17 +19 + + + + + + +num_code +element_id + + +12 + + +1 +2 +319 + + +2 +3 +110 + + +3 +4 +53 + + +4 +6 +56 + + +6 +11 +54 + + +11 +36 +49 + + +36 +56 +7 + + + + + + +num_code +num_lines + + +12 + + +1 +2 +321 + + +2 +3 +110 + + +3 +4 +62 + + +4 +5 +38 + + +5 +7 +52 + + +7 +10 +51 + + +10 +14 +14 + + + + + + +num_code +num_comment + + +12 + + +1 +2 +321 + + +2 +3 +110 + + +3 +4 +62 + + +4 +5 +38 + + +5 +7 +52 + + +7 +10 +51 + + +10 +14 +14 + + + + + + +num_comment +element_id + + +12 + + +1 +2 +72 + + +2 +3 +29 + + +3 +4 +16 + + +4 +5 +15 + + +5 +8 +12 + + +8 +13 +15 + + +13 +29 +16 + + +30 +98 +15 + + +112 +578 +3 + + + + + + +num_comment +num_lines + + +12 + + +1 +2 +72 + + +2 +3 +29 + + +3 +4 +16 + + +4 +5 +15 + + +5 +8 +12 + + +8 +13 +15 + + +13 +26 +15 + + +27 +75 +16 + + +75 +112 +3 + + + + + + +num_comment +num_code + + +12 + + +1 +2 +72 + + +2 +3 +29 + + +3 +4 +16 + + +4 +5 +15 + + +5 +8 +12 + + +8 +13 +15 + + +13 +26 +15 + + +27 +75 +16 + + +75 +112 +3 + + + + + + + + +locations_default +100 + + +id +100 + + +file +100 + + +beginLine +100 + + +beginColumn +100 + + +endLine +100 + + +endColumn +100 + + + + +id +file + + +12 + + +1 +2 +2 + + + + + + +id +beginLine + + +12 + + +1 +2 +2 + + + + + + +id +beginColumn + + +12 + + +1 +2 +2 + + + + + + +id +endLine + + +12 + + +1 +2 +2 + + + + + + +id +endColumn + + +12 + + +1 +2 +2 + + + + + + +file +id + + +12 + + + + + +file +beginLine + + +12 + + + + + +file +beginColumn + + +12 + + + + + +file +endLine + + +12 + + + + + +file +endColumn + + +12 + + + + + +beginLine +id + + +12 + + + + + +beginLine +file + + +12 + + + + + +beginLine +beginColumn + + +12 + + + + + +beginLine +endLine + + +12 + + + + + +beginLine +endColumn + + +12 + + + + + +beginColumn +id + + +12 + + + + + +beginColumn +file + + +12 + + + + + +beginColumn +beginLine + + +12 + + + + + +beginColumn +endLine + + +12 + + + + + +beginColumn +endColumn + + +12 + + + + + +endLine +id + + +12 + + + + + +endLine +file + + +12 + + + + + +endLine +beginLine + + +12 + + + + + +endLine +beginColumn + + +12 + + + + + +endLine +endColumn + + +12 + + + + + +endColumn +id + + +12 + + + + + +endColumn +file + + +12 + + + + + +endColumn +beginLine + + +12 + + + + + +endColumn +beginColumn + + +12 + + + + + +endColumn +endLine + + +12 + + + + + + + +locations_ast +2310679 + + +id +2310679 + + +module +1527 + + +beginLine +12546 + + +beginColumn +2819 + + +endLine +12539 + + +endColumn +2939 + + + + +id +module + + +12 + + +1 +2 +2310679 + + + + + + +id +beginLine + + +12 + + +1 +2 +2310679 + + + + + + +id +beginColumn + + +12 + + +1 +2 +2310679 + + + + + + +id +endLine + + +12 + + +1 +2 +2310679 + + + + + + +id +endColumn + + +12 + + +1 +2 +2310679 + + + + + + +module +id + + +12 + + +1 +2 +288 + + +2 +30 +114 + + +30 +159 +114 + + +159 +276 +114 + + +279 +427 +116 + + +434 +716 +114 + + +719 +1003 +114 + + +1007 +1409 +116 + + +1426 +1860 +114 + + +1862 +2782 +114 + + +2798 +5578 +114 + + +5667 +58828 +87 + + + + + + +module +beginLine + + +12 + + +1 +2 +288 + + +2 +17 +116 + + +17 +42 +114 + + +42 +72 +116 + + +72 +113 +116 + + +114 +165 +116 + + +167 +231 +116 + + +232 +314 +114 + + +314 +411 +114 + + +413 +634 +114 + + +640 +1326 +114 + + +1326 +6932 +83 + + + + + + +module +beginColumn + + +12 + + +1 +2 +288 + + +2 +7 +114 + + +7 +29 +117 + + +29 +41 +119 + + +41 +49 +126 + + +49 +56 +137 + + +56 +60 +110 + + +60 +64 +123 + + +64 +68 +117 + + +68 +74 +127 + + +74 +91 +116 + + +91 +1405 +29 + + + + + + +module +endLine + + +12 + + +1 +2 +288 + + +2 +17 +117 + + +17 +43 +119 + + +44 +74 +121 + + +74 +117 +114 + + +117 +173 +114 + + +173 +238 +114 + + +238 +322 +114 + + +326 +421 +114 + + +421 +666 +116 + + +668 +1461 +114 + + +1472 +6948 +74 + + + + + + +module +endColumn + + +12 + + +1 +2 +288 + + +2 +18 +116 + + +18 +45 +114 + + +45 +59 +130 + + +59 +65 +131 + + +65 +69 +108 + + +69 +72 +109 + + +72 +75 +114 + + +75 +79 +121 + + +79 +86 +120 + + +86 +99 +120 + + +99 +1425 +51 + + + + + + +beginLine +id + + +12 + + +1 +8 +783 + + +8 +11 +960 + + +11 +15 +1027 + + +15 +20 +1012 + + +20 +27 +1050 + + +27 +36 +995 + + +36 +49 +1003 + + +49 +66 +977 + + +66 +107 +951 + + +107 +170 +949 + + +170 +297 +947 + + +297 +636 +941 + + +637 +2279 +941 + + +2283 +2351 +2 + + + + + + +beginLine +module + + +12 + + +1 +2 +1188 + + +2 +3 +1761 + + +3 +4 +510 + + +4 +5 +792 + + +5 +6 +792 + + +6 +9 +1114 + + +9 +11 +726 + + +11 +14 +1084 + + +14 +25 +955 + + +25 +42 +942 + + +42 +71 +976 + + +71 +177 +942 + + +177 +1104 +758 + + + + + + +beginLine +beginColumn + + +12 + + +1 +6 +995 + + +6 +8 +486 + + +8 +9 +780 + + +9 +11 +1091 + + +11 +13 +952 + + +13 +16 +1093 + + +16 +19 +954 + + +19 +23 +1128 + + +23 +29 +954 + + +29 +38 +972 + + +38 +47 +980 + + +47 +59 +976 + + +59 +75 +984 + + +75 +542 +196 + + + + + + +beginLine +endLine + + +12 + + +1 +2 +3511 + + +2 +3 +3490 + + +3 +4 +1501 + + +4 +5 +767 + + +5 +7 +1110 + + +7 +10 +988 + + +10 +17 +1010 + + +17 +51 +166 + + + + + + +beginLine +endColumn + + +12 + + +1 +5 +672 + + +5 +7 +785 + + +7 +9 +868 + + +9 +12 +1028 + + +12 +16 +1156 + + +16 +20 +952 + + +20 +25 +1052 + + +25 +30 +983 + + +30 +40 +1003 + + +40 +52 +959 + + +52 +64 +1026 + + +64 +74 +951 + + +74 +89 +965 + + +89 +546 +141 + + + + + + +beginColumn +id + + +12 + + +1 +2 +1542 + + +2 +3 +877 + + +3 +5 +213 + + +5 +250154 +185 + + + + + + +beginColumn +module + + +12 + + +1 +2 +2376 + + +2 +3 +238 + + +3 +1104 +204 + + + + + + +beginColumn +beginLine + + +12 + + +1 +2 +1542 + + +2 +3 +882 + + +3 +6 +220 + + +6 +7984 +174 + + + + + + +beginColumn +endLine + + +12 + + +1 +2 +1542 + + +2 +3 +882 + + +3 +6 +220 + + +6 +7972 +174 + + + + + + +beginColumn +endColumn + + +12 + + +1 +2 +2295 + + +2 +3 +304 + + +3 +114 +211 + + +120 +161 +6 + + + + + + +endLine +id + + +12 + + +1 +8 +793 + + +8 +11 +965 + + +11 +15 +996 + + +15 +20 +1005 + + +20 +27 +1056 + + +27 +36 +1016 + + +36 +49 +981 + + +49 +65 +966 + + +65 +106 +956 + + +106 +169 +951 + + +169 +295 +947 + + +295 +626 +941 + + +627 +2214 +941 + + +2217 +2349 +19 + + + + + + +endLine +module + + +12 + + +1 +2 +1210 + + +2 +3 +1754 + + +3 +4 +526 + + +4 +5 +797 + + +5 +6 +760 + + +6 +9 +1109 + + +9 +11 +732 + + +11 +14 +1078 + + +14 +25 +947 + + +25 +42 +956 + + +42 +70 +942 + + +70 +170 +941 + + +170 +1104 +782 + + + + + + +endLine +beginLine + + +12 + + +1 +2 +4048 + + +2 +3 +3046 + + +3 +4 +1345 + + +4 +5 +851 + + +5 +7 +1021 + + +7 +10 +1010 + + +10 +17 +1010 + + +17 +34 +203 + + + + + + +endLine +beginColumn + + +12 + + +1 +6 +999 + + +6 +9 +1140 + + +9 +11 +1056 + + +11 +13 +933 + + +13 +16 +1154 + + +16 +19 +992 + + +19 +23 +1129 + + +23 +29 +999 + + +29 +38 +981 + + +38 +47 +983 + + +47 +59 +985 + + +59 +75 +988 + + +75 +542 +192 + + + + + + +endLine +endColumn + + +12 + + +1 +6 +1045 + + +6 +8 +1010 + + +8 +11 +1073 + + +11 +14 +933 + + +14 +18 +1055 + + +18 +23 +1084 + + +23 +28 +1020 + + +28 +36 +984 + + +36 +48 +999 + + +48 +60 +991 + + +60 +70 +959 + + +70 +84 +963 + + +84 +547 +418 + + + + + + +endColumn +id + + +12 + + +1 +2 +1505 + + +2 +3 +972 + + +3 +5 +227 + + +5 +41083 +221 + + +42453 +55223 +13 + + + + + + +endColumn +module + + +12 + + +1 +2 +2435 + + +2 +3 +264 + + +3 +782 +221 + + +782 +1104 +18 + + + + + + +endColumn +beginLine + + +12 + + +1 +2 +1606 + + +2 +3 +902 + + +3 +6 +228 + + +6 +6777 +202 + + + + + + +endColumn +beginColumn + + +12 + + +1 +2 +2250 + + +2 +3 +408 + + +3 +56 +221 + + +56 +79 +59 + + + + + + +endColumn +endLine + + +12 + + +1 +2 +1606 + + +2 +3 +902 + + +3 +6 +228 + + +6 +6726 +202 + + + + + + + + +py_module_path +3066 + + +module +3066 + + +file +3066 + + + + +module +file + + +12 + + +1 +2 +3066 + + + + + + +file +module + + +12 + + +1 +2 +3066 + + + + + + + + +file_contents +100 + + +file +3066 + + +contents +100 + + + + +file +contents + + +12 + + +1 +2 +100 + + + + + + +contents +file + + +12 + + +1 +2 +100 + + + + + + + + +variable +242770 + + +id +242770 + + +scope +50174 + + +name +54891 + + + + +id +scope + + +12 + + +1 +2 +242770 + + + + + + +id +name + + +12 + + +1 +2 +242770 + + + + + + +scope +id + + +12 + + +1 +2 +10764 + + +2 +3 +14394 + + +3 +4 +7657 + + +4 +5 +4580 + + +5 +6 +2991 + + +6 +9 +4606 + + +9 +22 +3819 + + +22 +233 +1360 + + + + + + +scope +name + + +12 + + +1 +2 +10764 + + +2 +3 +14394 + + +3 +4 +7657 + + +4 +5 +4580 + + +5 +6 +2991 + + +6 +9 +4606 + + +9 +22 +3819 + + +22 +233 +1360 + + + + + + +name +id + + +12 + + +1 +2 +36525 + + +2 +3 +8506 + + +3 +5 +4396 + + +5 +20 +4134 + + +20 +10542 +1327 + + + + + + +name +scope + + +12 + + +1 +2 +36525 + + +2 +3 +8506 + + +3 +5 +4396 + + +5 +20 +4134 + + +20 +10542 +1327 + + + + + + + + +py_line_lengths +100 + + +id +100 + + +file +100 + + +line +100 + + +length +100 + + + + +id +file + + +12 + + +1 +2 +2 + + + + + + +id +line + + +12 + + +1 +2 +2 + + + + + + +id +length + + +12 + + +1 +2 +2 + + + + + + +file +id + + +12 + + + + + +file +line + + +12 + + + + + +file +length + + +12 + + + + + +line +id + + +12 + + + + + +line +file + + +12 + + + + + +line +length + + +12 + + + + + +length +id + + +12 + + + + + +length +file + + +12 + + + + + +length +line + + +12 + + + + + + + +py_Classes +10244 + + +id +10244 + + +parent +10244 + + + + +id +parent + + +12 + + +1 +2 +10244 + + + + + + +parent +id + + +12 + + +1 +2 +10244 + + + + + + + + +py_Functions +44860 + + +id +44860 + + +parent +44860 + + + + +id +parent + + +12 + + +1 +2 +44860 + + + + + + +parent +id + + +12 + + +1 +2 +44860 + + + + + + + + +py_Modules +5983 + + +id +5983 + + + + + +py_patterns +1000 + + +id +1000 + + +kind +13 + + +parent +1000 + + +idx +100 + + + + + +py_pattern_lists +1000 + + +id +1000 + + +parent +1000 + + +idx +100 + + + + + +py_extracted_version +3337 + + +module +3337 + + +version +1 + + + + +module +version + + +12 + + +1 +2 +3337 + + + + + + +version +module + + +12 + + +3337 +3338 +1 + + + + + + + + +py_StringParts +6399 + + +id +6399 + + +parent +2296 + + +idx +62 + + + + +id +parent + + +12 + + +1 +2 +6399 + + + + + + +id +idx + + +12 + + +1 +2 +6399 + + + + + + +parent +id + + +12 + + +2 +3 +1598 + + +3 +4 +380 + + +4 +5 +142 + + +5 +63 +176 + + + + + + +parent +idx + + +12 + + +2 +3 +1598 + + +3 +4 +380 + + +4 +5 +142 + + +5 +63 +176 + + + + + + +idx +id + + +12 + + +4 +5 +17 + + +5 +6 +23 + + +6 +9 +5 + + +9 +14 +5 + + +16 +59 +5 + + +72 +699 +5 + + +2296 +2297 +2 + + + + + + +idx +parent + + +12 + + +4 +5 +17 + + +5 +6 +23 + + +6 +9 +5 + + +9 +14 +5 + + +16 +59 +5 + + +72 +699 +5 + + +2296 +2297 +2 + + + + + + + + +py_StringPart_lists +2296 + + +id +2296 + + +parent +2296 + + + + +id +parent + + +12 + + +1 +2 +2296 + + + + + + +parent +id + + +12 + + +1 +2 +2296 + + + + + + + + +py_aliases +21374 + + +id +21374 + + +parent +14396 + + +idx +110 + + + + +id +parent + + +12 + + +1 +2 +21374 + + + + + + +id +idx + + +12 + + +1 +2 +21374 + + + + + + +parent +id + + +12 + + +1 +2 +11488 + + +2 +3 +1597 + + +3 +7 +1116 + + +7 +111 +195 + + + + + + +parent +idx + + +12 + + +1 +2 +11488 + + +2 +3 +1597 + + +3 +7 +1116 + + +7 +111 +195 + + + + + + +idx +id + + +12 + + +1 +2 +21 + + +2 +3 +2 + + +3 +4 +30 + + +4 +5 +4 + + +5 +6 +9 + + +6 +9 +10 + + +9 +15 +8 + + +18 +32 +9 + + +36 +113 +9 + + +142 +14397 +8 + + + + + + +idx +parent + + +12 + + +1 +2 +21 + + +2 +3 +2 + + +3 +4 +30 + + +4 +5 +4 + + +5 +6 +9 + + +6 +9 +10 + + +9 +15 +8 + + +18 +32 +9 + + +36 +113 +9 + + +142 +14397 +8 + + + + + + + + +py_alias_lists +14396 + + +id +14396 + + +parent +14396 + + + + +id +parent + + +12 + + +1 +2 +14396 + + + + + + +parent +id + + +12 + + +1 +2 +14396 + + + + + + + + +py_arguments +41982 + + +id +41982 + + +parent +41982 + + + + +id +parent + + +12 + + +1 +2 +41982 + + + + + + +parent +id + + +12 + + +1 +2 +41982 + + + + + + + + +py_bools +26986 + + +parent +26986 + + +idx +3 + + + + +parent +idx + + +12 + + +1 +2 +26986 + + + + + + +idx +parent + + +12 + + +964 +965 +1 + + +3487 +3488 +1 + + +22535 +22536 +1 + + + + + + + + +py_boolops +10907 + + +id +10907 + + +kind +2 + + +parent +10907 + + + + +id +kind + + +12 + + +1 +2 +10907 + + + + + + +id +parent + + +12 + + +1 +2 +10907 + + + + + + +kind +id + + +12 + + +2646 +2647 +1 + + +5231 +5232 +1 + + + + + + +kind +parent + + +12 + + +2646 +2647 +1 + + +5231 +5232 +1 + + + + + + +parent +id + + +12 + + +1 +2 +10907 + + + + + + +parent +kind + + +12 + + +1 +2 +10907 + + + + + + + + +py_bytes +211200 + + +id +48658 + + +parent +105600 + + +idx +2 + + + + +id +parent + + +12 + + +1 +2 +37453 + + +2 +3 +6003 + + +3 +8 +3791 + + +8 +71667 +1411 + + + + + + +id +idx + + +12 + + +1 +2 +48644 + + +2 +3 +14 + + + + + + +parent +id + + +12 + + +1 +2 +14 + + +2 +3 +105586 + + + + + + +parent +idx + + +12 + + +2 +3 +105600 + + + + + + +idx +id + + +12 + + +14 +15 +1 + + +48658 +48659 +1 + + + + + + +idx +parent + + +12 + + +105600 +105601 +2 + + + + + + + + +py_cmpops +38007 + + +id +38007 + + +kind +29 + + +parent +37666 + + +idx +8 + + + + +id +kind + + +12 + + +1 +2 +38007 + + + + + + +id +parent + + +12 + + +1 +2 +38007 + + + + + + +id +idx + + +12 + + +1 +2 +38007 + + + + + + +kind +id + + +12 + + +380 +381 +2 + + +440 +441 +2 + + +563 +564 +2 + + +615 +616 +2 + + +673 +674 +2 + + +1027 +1028 +2 + + +1529 +1530 +2 + + +1597 +1598 +2 + + +2144 +2145 +2 + + +3828 +3829 +2 + + + + + + +kind +parent + + +12 + + +317 +318 +2 + + +439 +440 +2 + + +563 +564 +2 + + +612 +613 +2 + + +669 +670 +2 + + +1027 +1028 +2 + + +1529 +1530 +2 + + +1597 +1598 +2 + + +2144 +2145 +2 + + +3819 +3820 +2 + + + + + + +kind +idx + + +12 + + +1 +2 +11 + + +2 +3 +14 + + +3 +4 +2 + + + + + + +parent +id + + +12 + + +1 +2 +37330 + + +2 +4 +335 + + + + + + +parent +kind + + +12 + + +1 +2 +37562 + + +2 +3 +103 + + + + + + +parent +idx + + +12 + + +1 +2 +37330 + + +2 +4 +335 + + + + + + +idx +id + + +12 + + +2 +3 +2 + + +113 +114 +2 + + +12681 +12682 +2 + + + + + + +idx +kind + + +12 + + +1 +2 +2 + + +6 +7 +2 + + +10 +11 +2 + + + + + + +idx +parent + + +12 + + +2 +3 +2 + + +113 +114 +2 + + +12681 +12682 +2 + + + + + + + + +py_cmpop_lists +37666 + + +id +37666 + + +parent +37666 + + + + +id +parent + + +12 + + +1 +2 +37666 + + + + + + +parent +id + + +12 + + +1 +2 +37666 + + + + + + + + +py_comprehensions +1688 + + +id +1688 + + +parent +1682 + + +idx +2 + + + + +id +parent + + +12 + + +1 +2 +1688 + + + + + + +id +idx + + +12 + + +1 +2 +1688 + + + + + + +parent +id + + +12 + + +1 +2 +1676 + + +2 +3 +6 + + + + + + +parent +idx + + +12 + + +1 +2 +1676 + + +2 +3 +6 + + + + + + +idx +id + + +12 + + +6 +7 +1 + + +1682 +1683 +1 + + + + + + +idx +parent + + +12 + + +6 +7 +1 + + +1682 +1683 +1 + + + + + + + + +py_comprehension_lists +1682 + + +id +1682 + + +parent +1682 + + + + +id +parent + + +12 + + +1 +2 +1682 + + + + + + +parent +id + + +12 + + +1 +2 +1682 + + + + + + + + +py_dict_items +167901 + + +id +167901 + + +kind +4 + + +parent +19804 + + +idx +7730 + + + + +id +kind + + +12 + + +1 +2 +167901 + + + + + + +id +parent + + +12 + + +1 +2 +167901 + + + + + + +id +idx + + +12 + + +1 +2 +167901 + + + + + + +kind +id + + +12 + + +326 +327 +1 + + +53883 +53884 +1 + + +67045 +67046 +1 + + + + + + +kind +parent + + +12 + + +326 +327 +1 + + +1881 +1882 +1 + + +12123 +12124 +1 + + + + + + +kind +idx + + +12 + + +7 +8 +1 + + +18 +19 +1 + + +5583 +5584 +1 + + + + + + +parent +id + + +12 + + +1 +2 +5811 + + +2 +3 +1851 + + +3 +6 +1700 + + +6 +7 +8083 + + +7 +12 +1826 + + +12 +5584 +530 + + + + + + +parent +kind + + +12 + + +1 +2 +19765 + + +2 +3 +38 + + + + + + +parent +idx + + +12 + + +1 +2 +5811 + + +2 +3 +1851 + + +3 +6 +1700 + + +6 +7 +8083 + + +7 +12 +1826 + + +12 +5584 +530 + + + + + + +idx +id + + +12 + + +1 +2 +1654 + + +2 +3 +1982 + + +3 +4 +811 + + +4 +6 +192 + + +6 +7 +753 + + +7 +8 +962 + + +8 +20 +610 + + +20 +69 +584 + + +69 +14303 +178 + + + + + + +idx +kind + + +12 + + +1 +2 +7705 + + +2 +4 +24 + + + + + + +idx +parent + + +12 + + +1 +2 +1654 + + +2 +3 +1982 + + +3 +4 +811 + + +4 +6 +192 + + +6 +7 +753 + + +7 +8 +962 + + +8 +20 +610 + + +20 +69 +584 + + +69 +14303 +178 + + + + + + + + +py_dict_item_lists +33758 + + +id +33758 + + +parent +33758 + + + + +id +parent + + +12 + + +1 +2 +33758 + + + + + + +parent +id + + +12 + + +1 +2 +33758 + + + + + + + + +py_exprs +1684031 + + +id +1684031 + + +kind +89 + + +parent +1380134 + + +idx +597 + + + + +id +kind + + +12 + + +1 +2 +1684031 + + + + + + +id +parent + + +12 + + +1 +2 +1684031 + + + + + + +id +idx + + +12 + + +1 +2 +1684031 + + + + + + +kind +id + + +12 + + +15 +28 +5 + + +39 +89 +5 + + +134 +189 +5 + + +281 +360 +5 + + +426 +570 +5 + + +1056 +1205 +5 + + +1327 +1791 +5 + + +1942 +3179 +5 + + +3398 +4019 +5 + + +4476 +4980 +5 + + +8519 +9720 +5 + + +10633 +12682 +5 + + +13945 +16376 +5 + + +46173 +58988 +5 + + +75624 +284809 +5 + + + + + + +kind +parent + + +12 + + +15 +28 +5 + + +39 +87 +5 + + +134 +175 +5 + + +271 +359 +5 + + +426 +560 +5 + + +1036 +1119 +5 + + +1327 +1791 +5 + + +1942 +3179 +5 + + +3357 +3716 +5 + + +4285 +4980 +5 + + +8177 +9473 +5 + + +10060 +11624 +5 + + +13945 +15094 +5 + + +35526 +57772 +5 + + +72662 +245283 +5 + + + + + + +kind +idx + + +12 + + +1 +2 +8 + + +2 +3 +17 + + +3 +4 +2 + + +5 +6 +5 + + +6 +7 +11 + + +8 +9 +2 + + +9 +10 +5 + + +11 +12 +5 + + +12 +13 +5 + + +15 +18 +5 + + +23 +27 +5 + + +37 +127 +5 + + +201 +202 +2 + + + + + + +parent +id + + +12 + + +1 +2 +1147073 + + +2 +3 +197316 + + +3 +202 +35744 + + + + + + +parent +kind + + +12 + + +1 +2 +1255206 + + +2 +3 +120198 + + +3 +11 +4728 + + + + + + +parent +idx + + +12 + + +1 +2 +1147073 + + +2 +3 +197316 + + +3 +202 +35744 + + + + + + +idx +id + + +12 + + +1 +2 +23 + + +2 +3 +199 + + +3 +4 +148 + + +4 +6 +35 + + +6 +8 +50 + + +9 +26 +47 + + +26 +102 +47 + + +113 +197687 +44 + + + + + + +idx +kind + + +12 + + +1 +2 +222 + + +2 +3 +258 + + +3 +4 +8 + + +4 +5 +47 + + +5 +21 +47 + + +22 +29 +11 + + + + + + +idx +parent + + +12 + + +1 +2 +23 + + +2 +3 +199 + + +3 +4 +148 + + +4 +6 +35 + + +6 +8 +50 + + +9 +26 +47 + + +26 +102 +47 + + +113 +197687 +44 + + + + + + + + +py_expr_contexts +1140675 + + +id +1140675 + + +kind +11 + + +parent +1140675 + + + + +id +kind + + +12 + + +1 +2 +1140675 + + + + + + +id +parent + + +12 + + +1 +2 +1140675 + + + + + + +kind +id + + +12 + + +446 +447 +2 + + +29477 +29478 +2 + + +66896 +66897 +2 + + +287209 +287210 +2 + + + + + + +kind +parent + + +12 + + +446 +447 +2 + + +29477 +29478 +2 + + +66896 +66897 +2 + + +287209 +287210 +2 + + + + + + +parent +id + + +12 + + +1 +2 +1140675 + + + + + + +parent +kind + + +12 + + +1 +2 +1140675 + + + + + + + + +py_expr_lists +430986 + + +id +430986 + + +parent +423623 + + +idx +17 + + + + +id +parent + + +12 + + +1 +2 +430986 + + + + + + +id +idx + + +12 + + +1 +2 +430986 + + + + + + +parent +id + + +12 + + +1 +2 +416966 + + +2 +5 +6656 + + + + + + +parent +idx + + +12 + + +1 +2 +416966 + + +2 +5 +6656 + + + + + + +idx +id + + +12 + + +175 +176 +5 + + +2522 +2523 +2 + + +12681 +12682 +2 + + +54095 +54096 +2 + + +75451 +75452 +2 + + + + + + +idx +parent + + +12 + + +175 +176 +5 + + +2522 +2523 +2 + + +12681 +12682 +2 + + +54095 +54096 +2 + + +75451 +75452 +2 + + + + + + + + +py_ints +21532 + + +id +4 + + +parent +21532 + + + + +id +parent + + +12 + + +2 +3 +1 + + +207 +208 +1 + + +2770 +2771 +1 + + +18553 +18554 +1 + + + + + + +parent +id + + +12 + + +1 +2 +21532 + + + + + + + + +py_locations +2184728 + + +id +2184728 + + +parent +2184728 + + + + +id +parent + + +12 + + +1 +2 +2184728 + + + + + + +parent +id + + +12 + + +1 +2 +2184728 + + + + + + + + +py_numbers +117446 + + +id +4249 + + +parent +58723 + + +idx +2 + + + + +id +parent + + +12 + + +1 +2 +2830 + + +2 +3 +632 + + +3 +4 +291 + + +4 +11 +320 + + +11 +15704 +176 + + + + + + +id +idx + + +12 + + +1 +2 +1355 + + +2 +3 +2894 + + + + + + +parent +id + + +12 + + +1 +2 +57251 + + +2 +3 +1472 + + + + + + +parent +idx + + +12 + + +2 +3 +58723 + + + + + + +idx +id + + +12 + + +3302 +3303 +1 + + +3841 +3842 +1 + + + + + + +idx +parent + + +12 + + +58723 +58724 +2 + + + + + + + + +py_operators +28868 + + +id +28868 + + +kind +35 + + +parent +28868 + + + + +id +kind + + +12 + + +1 +2 +28868 + + + + + + +id +parent + + +12 + + +1 +2 +28868 + + + + + + +kind +id + + +12 + + +53 +54 +2 + + +64 +65 +2 + + +94 +95 +2 + + +121 +122 +2 + + +122 +123 +2 + + +169 +170 +2 + + +268 +269 +2 + + +269 +270 +2 + + +747 +748 +2 + + +1056 +1057 +2 + + +2176 +2177 +2 + + +4580 +4581 +2 + + + + + + +kind +parent + + +12 + + +53 +54 +2 + + +64 +65 +2 + + +94 +95 +2 + + +121 +122 +2 + + +122 +123 +2 + + +169 +170 +2 + + +268 +269 +2 + + +269 +270 +2 + + +747 +748 +2 + + +1056 +1057 +2 + + +2176 +2177 +2 + + +4580 +4581 +2 + + + + + + +parent +id + + +12 + + +1 +2 +28868 + + + + + + +parent +kind + + +12 + + +1 +2 +28868 + + + + + + + + +py_parameter_lists +43271 + + +id +43271 + + +parent +43271 + + + + +id +parent + + +12 + + +1 +2 +43271 + + + + + + +parent +id + + +12 + + +1 +2 +43271 + + + + + + + + +py_stmts +372643 + + +id +372643 + + +kind +59 + + +parent +156700 + + +idx +888 + + + + +id +kind + + +12 + + +1 +2 +372643 + + + + + + +id +parent + + +12 + + +1 +2 +372643 + + + + + + +id +idx + + +12 + + +1 +2 +372643 + + + + + + +kind +id + + +12 + + +12 +13 +2 + + +47 +48 +2 + + +132 +133 +2 + + +387 +388 +2 + + +404 +405 +2 + + +559 +560 +2 + + +572 +573 +2 + + +673 +674 +2 + + +720 +721 +2 + + +967 +968 +2 + + +1231 +1232 +2 + + +1889 +1890 +2 + + +2091 +2092 +2 + + +2624 +2625 +2 + + +3001 +3002 +2 + + +3870 +3871 +2 + + +12163 +12164 +2 + + +18052 +18053 +2 + + +25032 +25033 +2 + + +51031 +51032 +2 + + + + + + +kind +parent + + +12 + + +12 +13 +2 + + +37 +38 +2 + + +123 +124 +2 + + +356 +357 +2 + + +404 +405 +2 + + +471 +472 +2 + + +557 +558 +2 + + +572 +573 +2 + + +677 +678 +2 + + +967 +968 +2 + + +984 +985 +2 + + +1094 +1095 +2 + + +1777 +1778 +2 + + +1895 +1896 +2 + + +2624 +2625 +2 + + +3544 +3545 +2 + + +12163 +12164 +2 + + +12758 +12759 +2 + + +18445 +18446 +2 + + +20426 +20427 +2 + + + + + + +kind +idx + + +12 + + +2 +3 +5 + + +6 +7 +2 + + +7 +8 +5 + + +8 +9 +2 + + +13 +14 +2 + + +15 +16 +2 + + +18 +19 +5 + + +21 +22 +2 + + +27 +28 +2 + + +33 +34 +2 + + +37 +38 +2 + + +38 +39 +2 + + +42 +43 +2 + + +51 +52 +2 + + +84 +85 +2 + + +187 +188 +2 + + +293 +294 +2 + + + + + + +parent +id + + +12 + + +1 +2 +96284 + + +2 +3 +25704 + + +3 +4 +11789 + + +4 +7 +14376 + + +7 +300 +8545 + + + + + + +parent +kind + + +12 + + +1 +2 +106000 + + +2 +3 +31003 + + +3 +4 +12071 + + +4 +9 +7624 + + + + + + +parent +idx + + +12 + + +1 +2 +96284 + + +2 +3 +25704 + + +3 +4 +11789 + + +4 +7 +14376 + + +7 +300 +8545 + + + + + + +idx +id + + +12 + + +1 +2 +335 + + +2 +5 +59 + + +5 +6 +83 + + +6 +14 +74 + + +14 +25 +68 + + +25 +53 +68 + + +53 +103 +68 + + +107 +335 +68 + + +369 +52757 +62 + + + + + + +idx +kind + + +12 + + +1 +2 +344 + + +2 +3 +267 + + +3 +4 +83 + + +4 +5 +62 + + +5 +10 +71 + + +10 +21 +59 + + + + + + +idx +parent + + +12 + + +1 +2 +335 + + +2 +5 +59 + + +5 +6 +83 + + +6 +14 +74 + + +14 +25 +68 + + +25 +53 +68 + + +53 +103 +68 + + +107 +335 +68 + + +369 +52757 +62 + + + + + + + + +py_stmt_lists +156700 + + +id +156700 + + +parent +132647 + + +idx +14 + + + + +id +parent + + +12 + + +1 +2 +156700 + + + + + + +id +idx + + +12 + + +1 +2 +156700 + + + + + + +parent +id + + +12 + + +1 +2 +109538 + + +2 +3 +22179 + + +3 +5 +929 + + + + + + +parent +idx + + +12 + + +1 +2 +109538 + + +2 +3 +22179 + + +3 +5 +929 + + + + + + +idx +id + + +12 + + +460 +461 +2 + + +4033 +4034 +2 + + +13686 +13687 +2 + + +15103 +15104 +2 + + +19474 +19475 +2 + + + + + + +idx +parent + + +12 + + +460 +461 +2 + + +4033 +4034 +2 + + +13686 +13687 +2 + + +15103 +15104 +2 + + +19474 +19475 +2 + + + + + + + + +py_strs +985327 + + +id +140335 + + +parent +695288 + + +idx +5 + + + + +id +parent + + +12 + + +1 +2 +79968 + + +2 +3 +31802 + + +3 +4 +9602 + + +4 +8 +11026 + + +8 +143732 +7935 + + + + + + +id +idx + + +12 + + +1 +2 +106110 + + +2 +3 +22027 + + +3 +4 +12190 + + +4 +5 +6 + + + + + + +parent +id + + +12 + + +1 +2 +405951 + + +2 +3 +289317 + + +3 +5 +19 + + + + + + +parent +idx + + +12 + + +1 +2 +405275 + + +2 +3 +289993 + + +3 +5 +19 + + + + + + +idx +id + + +12 + + +34 +35 +1 + + +17059 +17060 +1 + + +25371 +25372 +1 + + +92414 +92415 +1 + + + + + + +idx +parent + + +12 + + +42 +43 +1 + + +37559 +37560 +1 + + +294366 +294367 +1 + + +379612 +379613 +1 + + + + + + + + +py_str_lists +427 + + +id +427 + + +parent +427 + + + + +id +parent + + +12 + + +1 +2 +427 + + + + + + +parent +id + + +12 + + +1 +2 +427 + + + + + + + + +py_unaryops +13295 + + +id +13295 + + +kind +11 + + +parent +13295 + + + + +id +kind + + +12 + + +1 +2 +13295 + + + + + + +id +parent + + +12 + + +1 +2 +13295 + + + + + + +kind +id + + +12 + + +5 +6 +2 + + +20 +21 +2 + + +1537 +1538 +2 + + +2914 +2915 +2 + + + + + + +kind +parent + + +12 + + +5 +6 +2 + + +20 +21 +2 + + +1537 +1538 +2 + + +2914 +2915 +2 + + + + + + +parent +id + + +12 + + +1 +2 +13295 + + + + + + +parent +kind + + +12 + + +1 +2 +13295 + + + + + + + + +py_variables +845963 + + +id +242770 + + +parent +845963 + + + + +id +parent + + +12 + + +1 +2 +61149 + + +2 +3 +77254 + + +3 +4 +38584 + + +4 +5 +21392 + + +5 +7 +20913 + + +7 +15 +18418 + + +15 +318 +5058 + + + + + + +parent +id + + +12 + + +1 +2 +845963 + + + + + + + + +py_absolute_names +100 + + +module +100 + + +relname +100 + + +absname +100 + + + + +module +relname + + +12 + + + + + +module +absname + + +12 + + + + + +relname +module + + +12 + + + + + +relname +absname + + +12 + + + + + +absname +module + + +12 + + + + + +absname +relname + + +12 + + + + + + + +py_exports +19755 + + +id +1138 + + +name +16813 + + + + +id +name + + +12 + + +1 +2 +141 + + +2 +3 +164 + + +3 +4 +109 + + +4 +5 +112 + + +5 +7 +103 + + +7 +10 +91 + + +10 +14 +88 + + +14 +20 +90 + + +20 +33 +94 + + +33 +53 +90 + + +53 +2260 +52 + + + + + + +name +id + + +12 + + +1 +2 +16070 + + +2 +143 +742 + + + + + + + + +py_successors +2366367 + + +predecessor +2270167 + + +successor +2275369 + + + + +predecessor +successor + + +12 + + +1 +2 +2177926 + + +2 +9 +92240 + + + + + + +successor +predecessor + + +12 + + +1 +2 +2225590 + + +2 +173 +49778 + + + + + + + + +py_true_successors +70315 + + +predecessor +70315 + + +successor +67897 + + + + +predecessor +successor + + +12 + + +1 +2 +70315 + + + + + + +successor +predecessor + + +12 + + +1 +2 +65747 + + +2 +7 +2150 + + + + + + + + +py_exception_successors +43951 + + +predecessor +39261 + + +successor +6911 + + + + +predecessor +successor + + +12 + + +1 +2 +35379 + + +2 +3 +3448 + + +3 +7 +433 + + + + + + +successor +predecessor + + +12 + + +1 +2 +1045 + + +2 +3 +1497 + + +3 +4 +1271 + + +4 +5 +760 + + +5 +6 +463 + + +6 +8 +519 + + +8 +12 +525 + + +12 +27 +534 + + +27 +173 +294 + + + + + + + + +py_false_successors +69439 + + +predecessor +69439 + + +successor +59260 + + + + +predecessor +successor + + +12 + + +1 +2 +69439 + + + + + + +successor +predecessor + + +12 + + +1 +2 +51296 + + +2 +3 +6510 + + +3 +13 +1452 + + + + + + + + +py_flow_bb_node +2323431 + + +flownode +2323431 + + +realnode +2208164 + + +basicblock +215280 + + +index +23948 + + + + +flownode +realnode + + +12 + + +1 +2 +2323431 + + + + + + +flownode +basicblock + + +12 + + +1 +2 +2323431 + + + + + + +flownode +index + + +12 + + +1 +2 +2323431 + + + + + + +realnode +flownode + + +12 + + +1 +2 +2102771 + + +2 +9 +105392 + + + + + + +realnode +basicblock + + +12 + + +1 +2 +2135213 + + +2 +7 +72950 + + + + + + +realnode +index + + +12 + + +1 +2 +2155174 + + +2 +5 +52989 + + + + + + +basicblock +flownode + + +12 + + +1 +2 +37515 + + +2 +3 +17987 + + +3 +4 +19072 + + +4 +5 +17365 + + +5 +6 +17931 + + +6 +7 +13664 + + +7 +8 +10900 + + +8 +10 +16975 + + +10 +13 +17232 + + +13 +19 +17763 + + +19 +26 +16605 + + +26 +17296 +12265 + + + + + + +basicblock +realnode + + +12 + + +1 +2 +37832 + + +2 +3 +17905 + + +3 +4 +19216 + + +4 +5 +18823 + + +5 +6 +16929 + + +6 +7 +13644 + + +7 +8 +11703 + + +8 +10 +16817 + + +10 +13 +16741 + + +13 +19 +17322 + + +19 +26 +16368 + + +26 +17295 +11973 + + + + + + +basicblock +index + + +12 + + +1 +2 +37515 + + +2 +3 +17987 + + +3 +4 +19072 + + +4 +5 +17365 + + +5 +6 +17931 + + +6 +7 +13664 + + +7 +8 +10900 + + +8 +10 +16975 + + +10 +13 +17232 + + +13 +19 +17763 + + +19 +26 +16605 + + +26 +17296 +12265 + + + + + + +index +flownode + + +12 + + +1 +2 +4957 + + +2 +3 +4220 + + +3 +4 +1805 + + +4 +6 +1253 + + +6 +8 +1750 + + +8 +9 +2240 + + +9 +10 +2678 + + +10 +19 +1819 + + +19 +60 +1815 + + +60 +155471 +1408 + + + + + + +index +realnode + + +12 + + +1 +2 +4957 + + +2 +3 +4220 + + +3 +4 +1805 + + +4 +6 +1253 + + +6 +8 +1750 + + +8 +9 +2240 + + +9 +10 +2678 + + +10 +19 +1819 + + +19 +60 +1815 + + +60 +141411 +1408 + + + + + + +index +basicblock + + +12 + + +1 +2 +4957 + + +2 +3 +4220 + + +3 +4 +1805 + + +4 +6 +1253 + + +6 +8 +1750 + + +8 +9 +2240 + + +9 +10 +2678 + + +10 +19 +1819 + + +19 +60 +1815 + + +60 +155471 +1408 + + + + + + + + +py_scope_flow +405895 + + +flow +405895 + + +scope +56616 + + +kind +4 + + + + +flow +scope + + +12 + + +1 +2 +405895 + + + + + + +flow +kind + + +12 + + +1 +2 +405895 + + + + + + +scope +flow + + +12 + + +2 +3 +15663 + + +3 +4 +8677 + + +4 +5 +7135 + + +5 +6 +4823 + + +6 +7 +3426 + + +7 +9 +4807 + + +9 +13 +5102 + + +13 +23 +4277 + + +23 +767 +2706 + + + + + + +scope +kind + + +12 + + +2 +3 +16115 + + +3 +4 +39685 + + +4 +5 +816 + + + + + + +kind +flow + + +12 + + +18869 +18870 +1 + + +37919 +37920 +1 + + +56616 +56617 +1 + + +292491 +292492 +1 + + + + + + +kind +scope + + +12 + + +18869 +18870 +1 + + +37919 +37920 +1 + + +41145 +41146 +1 + + +56616 +56617 +1 + + + + + + + + +py_idoms +2275369 + + +node +2275369 + + +immediate_dominator +2207166 + + + + +node +immediate_dominator + + +12 + + +1 +2 +2275369 + + + + + + +immediate_dominator +node + + +12 + + +1 +2 +2153132 + + +2 +11 +54033 + + + + + + + + +py_ssa_phi +46687 + + +phi +21496 + + +arg +44830 + + + + +phi +arg + + +12 + + +1 +2 +1782 + + +2 +3 +16149 + + +3 +4 +2560 + + +4 +23 +1003 + + + + + + +arg +phi + + +12 + + +1 +2 +43208 + + +2 +8 +1621 + + + + + + + + +py_ssa_var +272292 + + +id +272292 + + +var +217265 + + + + +id +var + + +12 + + +1 +2 +272292 + + + + + + +var +id + + +12 + + +1 +2 +194518 + + +2 +4 +16728 + + +4 +35 +6017 + + + + + + + + +py_ssa_use +487906 + + +node +421169 + + +var +239604 + + + + +node +var + + +12 + + +1 +2 +416004 + + +2 +185 +5165 + + + + + + +var +node + + +12 + + +1 +2 +151110 + + +2 +3 +42380 + + +3 +4 +18095 + + +4 +7 +18656 + + +7 +203 +9362 + + + + + + + + +py_ssa_defn +267795 + + +id +267795 + + +node +261828 + + + + +id +node + + +12 + + +1 +2 +267795 + + + + + + +node +id + + +12 + + +1 +2 +258774 + + +2 +81 +3053 + + + + + + + + +py_scopes +2056674 + + +node +2056674 + + +scope +51911 + + + + +node +scope + + +12 + + +1 +2 +2056674 + + + + + + +scope +node + + +12 + + +1 +5 +3923 + + +5 +7 +3611 + + +7 +9 +3715 + + +9 +11 +3941 + + +11 +14 +4776 + + +14 +17 +3965 + + +17 +22 +4491 + + +22 +28 +4078 + + +28 +37 +4161 + + +37 +50 +3938 + + +50 +72 +3914 + + +72 +118 +3953 + + +118 +5003 +3439 + + + + + + + + +py_scope_location +56618 + + +id +56618 + + +scope +56618 + + + + +id +scope + + +12 + + +1 +2 +56618 + + + + + + +scope +id + + +12 + + +1 +2 +56618 + + + + + + + + +py_flags_versioned +136 + + +name +136 + + +value +83 + + +version +2 + + + + +name +value + + +12 + + +1 +2 +136 + + + + + + +value +name + + +12 + + +1 +2 +68 + + +2 +3 +11 + + +15 +16 +2 + + + + + + + + +py_syntax_error_versioned +30 + + +id +30 + + +message +4 + + +version +2 + + + + +id +message + + +12 + + +1 +2 +30 + + + + + + +message +id + + +12 + + +1 +2 +1 + + +4 +5 +1 + + +17 +18 +1 + + + + + + + + +py_comments +77830 + + +id +77830 + + +text +61555 + + +location +77830 + + + + +id +text + + +12 + + +1 +2 +77830 + + + + + + +id +location + + +12 + + +1 +2 +77830 + + + + + + +text +id + + +12 + + +1 +2 +56275 + + +2 +5 +4845 + + +5 +942 +434 + + + + + + +text +location + + +12 + + +1 +2 +56275 + + +2 +5 +4845 + + +5 +942 +434 + + + + + + +location +id + + +12 + + +1 +2 +77830 + + + + + + +location +text + + +12 + + +1 +2 +77830 + + + + + + + + +py_cobjects +112856 + + +obj +112856 + + + + + +py_cobjecttypes +111600 + + +obj +111600 + + +typeof +65 + + + + +obj +typeof + + +12 + + +1 +2 +111600 + + + + + + +typeof +obj + + +12 + + +1 +2 +27 + + +2 +3 +4 + + +3 +5 +5 + + +6 +19 +5 + + +19 +54 +5 + + +58 +295 +5 + + +325 +857 +5 + + +923 +73625 +5 + + + + + + + + +py_cobjectnames +111600 + + +obj +111600 + + +name +106332 + + + + +obj +name + + +12 + + +1 +2 +111600 + + + + + + +name +obj + + +12 + + +1 +2 +105898 + + +2 +413 +434 + + + + + + + + +py_cobject_sources +114955 + + +obj +112856 + + +kind +2 + + + + +obj +kind + + +12 + + +1 +2 +110757 + + +2 +3 +2099 + + + + + + +kind +obj + + +12 + + +2423 +2424 +1 + + +80595 +80596 +1 + + + + + + + + +py_cmembers_versioned +21362 + + +object +1681 + + +name +8322 + + +member +15501 + + +version +2 + + + + +object +name + + +12 + + +3 +4 +59 + + +4 +5 +448 + + +5 +8 +118 + + +8 +9 +582 + + +9 +12 +154 + + +12 +20 +133 + + +20 +50 +127 + + +58 +312 +56 + + + + + + +object +member + + +12 + + +3 +4 +59 + + +4 +5 +448 + + +5 +8 +118 + + +8 +9 +591 + + +9 +12 +154 + + +12 +20 +133 + + +21 +59 +127 + + +60 +206 +47 + + + + + + +name +object + + +12 + + +1 +2 +7390 + + +2 +6 +656 + + +6 +567 +276 + + + + + + +name +member + + +12 + + +1 +2 +7407 + + +2 +6 +647 + + +6 +280 +267 + + + + + + +member +object + + +12 + + +1 +2 +14765 + + +2 +249 +736 + + + + + + +member +name + + +12 + + +1 +2 +14803 + + +2 +84 +698 + + + + + + + + +py_citems +3959 + + +object +213 + + +index +593 + + +member +1906 + + + + +object +index + + +12 + + +1 +2 +41 + + +2 +3 +37 + + +3 +4 +37 + + +4 +5 +7 + + +5 +6 +29 + + +6 +12 +16 + + +12 +22 +16 + + +24 +42 +16 + + +42 +594 +14 + + + + + + +object +member + + +12 + + +1 +2 +41 + + +2 +3 +40 + + +3 +4 +34 + + +4 +5 +20 + + +5 +6 +16 + + +6 +12 +16 + + +12 +22 +16 + + +24 +42 +16 + + +42 +546 +14 + + + + + + +index +object + + +12 + + +1 +2 +186 + + +2 +3 +62 + + +3 +4 +89 + + +4 +6 +44 + + +6 +8 +41 + + +8 +9 +83 + + +9 +14 +46 + + +14 +214 +42 + + + + + + +index +member + + +12 + + +1 +2 +186 + + +2 +3 +62 + + +3 +4 +89 + + +4 +6 +44 + + +6 +8 +41 + + +8 +9 +83 + + +9 +14 +46 + + +14 +158 +42 + + + + + + +member +object + + +12 + + +1 +2 +1112 + + +2 +3 +215 + + +3 +4 +303 + + +4 +5 +101 + + +5 +7 +166 + + +7 +21 +9 + + + + + + +member +index + + +12 + + +1 +2 +1139 + + +2 +3 +212 + + +3 +4 +298 + + +4 +5 +92 + + +5 +9 +165 + + + + + + + + +ext_argtype +6320 + + +funcid +4069 + + +arg +50 + + +typeid +466 + + + + +funcid +arg + + +12 + + +1 +2 +2726 + + +2 +3 +932 + + +3 +4 +329 + + +4 +18 +80 + + + + + + +funcid +typeid + + +12 + + +1 +2 +2694 + + +2 +3 +1149 + + +3 +6 +225 + + + + + + +arg +funcid + + +12 + + +1 +2 +23 + + +2 +3 +5 + + +3 +4 +2 + + +7 +8 +2 + + +10 +11 +2 + + +31 +32 +2 + + +141 +142 +2 + + +449 +450 +2 + + +1365 +1366 +2 + + + + + + +arg +typeid + + +12 + + +1 +2 +26 + + +2 +3 +8 + + +3 +4 +2 + + +4 +5 +2 + + +8 +9 +2 + + +12 +13 +2 + + +157 +158 +2 + + + + + + +typeid +funcid + + +12 + + +1 +2 +68 + + +2 +3 +86 + + +3 +4 +68 + + +4 +5 +38 + + +5 +6 +26 + + +6 +8 +29 + + +8 +10 +35 + + +10 +16 +41 + + +16 +22 +35 + + +24 +505 +35 + + + + + + +typeid +arg + + +12 + + +1 +2 +424 + + +2 +5 +35 + + +9 +17 +5 + + + + + + + + +ext_rettype +4719 + + +funcid +4321 + + +typeid +154 + + + + +funcid +typeid + + +12 + + +1 +2 +4042 + + +2 +11 +279 + + + + + + +typeid +funcid + + +12 + + +1 +2 +59 + + +2 +3 +14 + + +3 +4 +23 + + +4 +6 +8 + + +8 +14 +11 + + +22 +40 +11 + + +43 +115 +11 + + +116 +454 +11 + + + + + + + + +ext_proptype +398 + + +propid +386 + + +typeid +32 + + + + +propid +typeid + + +12 + + +1 +2 +374 + + +2 +3 +11 + + + + + + +typeid +propid + + +12 + + +1 +2 +11 + + +2 +3 +2 + + +7 +8 +5 + + +8 +9 +2 + + +19 +20 +2 + + +35 +36 +2 + + +52 +53 +2 + + + + + + + + +ext_argreturn +26 + + +funcid +26 + + +arg +5 + + + + +funcid +arg + + +12 + + +1 +2 +26 + + + + + + +arg +funcid + + +12 + + +2 +3 +2 + + +7 +8 +2 + + + + + + + + +py_special_objects +40 + + +obj +40 + + +name +40 + + + + +obj +name + + +12 + + +1 +2 +40 + + + + + + +name +obj + + +12 + + +1 +2 +40 + + + + + + + + +py_decorated_object +100 + + +object +100 + + +level +100 + + + + +object +level + + +12 + + + + + +level +object + + +12 + + + + + + + +xmlEncoding +100 + + +id +100 + + +encoding +100 + + + + +id +encoding + + +12 + + +1 +2 +2 + + + + + + +encoding +id + + +12 + + + + + + + +xmlDTDs +100 + + +id +100 + + +root +100 + + +publicId +100 + + +systemId +100 + + +fileid +100 + + + + +id +root + + +12 + + +1 +2 +2 + + + + + + +id +publicId + + +12 + + +1 +2 +2 + + + + + + +id +systemId + + +12 + + +1 +2 +2 + + + + + + +id +fileid + + +12 + + +1 +2 +2 + + + + + + +root +id + + +12 + + + + + +root +publicId + + +12 + + + + + +root +systemId + + +12 + + + + + +root +fileid + + +12 + + + + + +publicId +id + + +12 + + + + + +publicId +root + + +12 + + + + + +publicId +systemId + + +12 + + + + + +publicId +fileid + + +12 + + + + + +systemId +id + + +12 + + + + + +systemId +root + + +12 + + + + + +systemId +publicId + + +12 + + + + + +systemId +fileid + + +12 + + + + + +fileid +id + + +12 + + + + + +fileid +root + + +12 + + + + + +fileid +publicId + + +12 + + + + + +fileid +systemId + + +12 + + + + + + + +xmlElements +100 + + +id +100 + + +name +100 + + +parentid +100 + + +idx +100 + + +fileid +100 + + + + +id +name + + +12 + + +1 +2 +2 + + + + + + +id +parentid + + +12 + + +1 +2 +2 + + + + + + +id +idx + + +12 + + +1 +2 +2 + + + + + + +id +fileid + + +12 + + +1 +2 +2 + + + + + + +name +id + + +12 + + + + + +name +parentid + + +12 + + + + + +name +idx + + +12 + + + + + +name +fileid + + +12 + + + + + +parentid +id + + +12 + + + + + +parentid +name + + +12 + + + + + +parentid +idx + + +12 + + + + + +parentid +fileid + + +12 + + + + + +idx +id + + +12 + + + + + +idx +name + + +12 + + + + + +idx +parentid + + +12 + + + + + +idx +fileid + + +12 + + + + + +fileid +id + + +12 + + + + + +fileid +name + + +12 + + + + + +fileid +parentid + + +12 + + + + + +fileid +idx + + +12 + + + + + + + +xmlAttrs +100 + + +id +100 + + +elementid +100 + + +name +100 + + +value +100 + + +idx +100 + + +fileid +100 + + + + +id +elementid + + +12 + + +1 +2 +1 + + + + + + +id +name + + +12 + + +1 +2 +1 + + + + + + +id +value + + +12 + + +1 +2 +1 + + + + + + +id +idx + + +12 + + +1 +2 +1 + + + + + + +id +fileid + + +12 + + +1 +2 +1 + + + + + + +elementid +id + + +12 + + + + + +elementid +name + + +12 + + + + + +elementid +value + + +12 + + + + + +elementid +idx + + +12 + + + + + +elementid +fileid + + +12 + + + + + +name +id + + +12 + + + + + +name +elementid + + +12 + + + + + +name +value + + +12 + + + + + +name +idx + + +12 + + + + + +name +fileid + + +12 + + + + + +value +id + + +12 + + + + + +value +elementid + + +12 + + + + + +value +name + + +12 + + + + + +value +idx + + +12 + + + + + +value +fileid + + +12 + + + + + +idx +id + + +12 + + + + + +idx +elementid + + +12 + + + + + +idx +name + + +12 + + + + + +idx +value + + +12 + + + + + +idx +fileid + + +12 + + + + + +fileid +id + + +12 + + + + + +fileid +elementid + + +12 + + + + + +fileid +name + + +12 + + + + + +fileid +value + + +12 + + + + + +fileid +idx + + +12 + + + + + + + +xmlNs +100 + + +id +100 + + +prefixName +100 + + +URI +100 + + +fileid +100 + + + + +id +prefixName + + +12 + + + + + +id +URI + + +12 + + + + + +id +fileid + + +12 + + + + + +prefixName +id + + +12 + + + + + +prefixName +URI + + +12 + + + + + +prefixName +fileid + + +12 + + + + + +URI +id + + +12 + + + + + +URI +prefixName + + +12 + + + + + +URI +fileid + + +12 + + + + + +fileid +id + + +12 + + + + + +fileid +prefixName + + +12 + + + + + +fileid +URI + + +12 + + + + + + + +xmlHasNs +100 + + +elementId +100 + + +nsId +100 + + +fileid +100 + + + + +elementId +nsId + + +12 + + + + + +elementId +fileid + + +12 + + + + + +nsId +elementId + + +12 + + + + + +nsId +fileid + + +12 + + + + + +fileid +elementId + + +12 + + + + + +fileid +nsId + + +12 + + + + + + + +xmlComments +100 + + +id +100 + + +text +100 + + +parentid +100 + + +fileid +100 + + + + +id +text + + +12 + + +1 +2 +2 + + + + + + +id +parentid + + +12 + + +1 +2 +2 + + + + + + +id +fileid + + +12 + + +1 +2 +2 + + + + + + +text +id + + +12 + + + + + +text +parentid + + +12 + + + + + +text +fileid + + +12 + + + + + +parentid +id + + +12 + + + + + +parentid +text + + +12 + + + + + +parentid +fileid + + +12 + + + + + +fileid +id + + +12 + + + + + +fileid +text + + +12 + + + + + +fileid +parentid + + +12 + + + + + + + +xmlChars +100 + + +id +100 + + +text +100 + + +parentid +100 + + +idx +100 + + +isCDATA +100 + + +fileid +100 + + + + +id +text + + +12 + + +1 +2 +1 + + + + + + +id +parentid + + +12 + + +1 +2 +1 + + + + + + +id +idx + + +12 + + +1 +2 +1 + + + + + + +id +isCDATA + + +12 + + +1 +2 +1 + + + + + + +id +fileid + + +12 + + +1 +2 +1 + + + + + + +text +id + + +12 + + + + + +text +parentid + + +12 + + + + + +text +idx + + +12 + + + + + +text +isCDATA + + +12 + + + + + +text +fileid + + +12 + + + + + +parentid +id + + +12 + + + + + +parentid +text + + +12 + + + + + +parentid +idx + + +12 + + + + + +parentid +isCDATA + + +12 + + + + + +parentid +fileid + + +12 + + + + + +idx +id + + +12 + + + + + +idx +text + + +12 + + + + + +idx +parentid + + +12 + + + + + +idx +isCDATA + + +12 + + + + + +idx +fileid + + +12 + + + + + +isCDATA +id + + +12 + + + + + +isCDATA +text + + +12 + + + + + +isCDATA +parentid + + +12 + + + + + +isCDATA +idx + + +12 + + + + + +isCDATA +fileid + + +12 + + + + + +fileid +id + + +12 + + + + + +fileid +text + + +12 + + + + + +fileid +parentid + + +12 + + + + + +fileid +idx + + +12 + + + + + +fileid +isCDATA + + +12 + + + + + + + +xmllocations +100 + + +xmlElement +100 + + +location +100 + + + + +xmlElement +location + + +12 + + + + + +location +xmlElement + + +12 + + + + + + + +yaml +id +885 + + +id +885 + + +kind +4 + + +parent +204 + + +idx +25 + + +tag +8 + + +tostring +318 + + + + +id +kind + + +12 + + +1 +2 +885 + + + + + + +id +parent + + +12 + + +1 +2 +885 + + + + + + +id +idx + + +12 + + +1 +2 +885 + + + + + + +id +tag + + +12 + + +1 +2 +885 + + + + + + +id +tostring + + +12 + + +1 +2 +885 + + + + + + +kind +id + + +12 + + +1 +2 +1 + + +35 +36 +1 + + +149 +150 +1 + + +700 +701 +1 + + + + + + +kind +parent + + +12 + + +1 +2 +1 + + +33 +34 +1 + + +90 +91 +1 + + +183 +184 +1 + + + + + + +kind +idx + + +12 + + +1 +2 +1 + + +7 +8 +1 + + +11 +12 +1 + + +25 +26 +1 + + + + + + +kind +tag + + +12 + + +1 +2 +3 + + +5 +6 +1 + + + + + + +kind +tostring + + +12 + + +1 +2 +1 + + +10 +11 +1 + + +67 +68 +1 + + +240 +241 +1 + + + + + + +parent +id + + +12 + + +1 +2 +33 + + +2 +3 +72 + + +3 +4 +2 + + +4 +5 +35 + + +6 +7 +29 + + +8 +11 +14 + + +12 +21 +17 + + +22 +25 +2 + + + + + + +parent +kind + + +12 + + +1 +2 +131 + + +2 +3 +43 + + +3 +4 +30 + + + + + + +parent +idx + + +12 + + +1 +2 +33 + + +2 +3 +72 + + +3 +4 +2 + + +4 +5 +35 + + +6 +7 +29 + + +8 +11 +14 + + +12 +21 +17 + + +22 +25 +2 + + + + + + +parent +tag + + +12 + + +1 +2 +120 + + +2 +3 +41 + + +3 +4 +36 + + +4 +5 +7 + + + + + + +parent +tostring + + +12 + + +1 +2 +33 + + +2 +3 +72 + + +3 +4 +2 + + +4 +5 +35 + + +5 +6 +5 + + +6 +7 +24 + + +8 +11 +14 + + +12 +14 +16 + + +16 +23 +3 + + + + + + +idx +id + + +12 + + +1 +2 +2 + + +2 +3 +2 + + +4 +5 +7 + + +5 +20 +2 + + +20 +25 +2 + + +25 +33 +2 + + +33 +56 +2 + + +61 +64 +2 + + +95 +100 +2 + + +149 +172 +2 + + + + + + +idx +kind + + +12 + + +1 +2 +14 + + +2 +3 +4 + + +3 +4 +6 + + +4 +5 +1 + + + + + + +idx +parent + + +12 + + +1 +2 +2 + + +2 +3 +2 + + +4 +5 +7 + + +5 +20 +2 + + +20 +25 +2 + + +25 +33 +2 + + +33 +56 +2 + + +61 +64 +2 + + +95 +100 +2 + + +149 +172 +2 + + + + + + +idx +tag + + +12 + + +1 +2 +11 + + +2 +3 +5 + + +3 +4 +3 + + +4 +5 +4 + + +6 +7 +2 + + + + + + +idx +tostring + + +12 + + +1 +2 +2 + + +2 +3 +2 + + +3 +4 +3 + + +4 +5 +4 + + +5 +7 +2 + + +7 +11 +2 + + +12 +15 +2 + + +15 +16 +1 + + +18 +19 +2 + + +28 +31 +2 + + +52 +56 +2 + + +87 +88 +1 + + + + + + +tag +id + + +12 + + +1 +2 +2 + + +4 +5 +1 + + +15 +16 +1 + + +26 +27 +1 + + +35 +36 +1 + + +149 +150 +1 + + +654 +655 +1 + + + + + + +tag +kind + + +12 + + +1 +2 +8 + + + + + + +tag +parent + + +12 + + +1 +2 +2 + + +2 +3 +1 + + +3 +4 +1 + + +25 +26 +1 + + +33 +34 +1 + + +90 +91 +1 + + +183 +184 +1 + + + + + + +tag +idx + + +12 + + +1 +2 +2 + + +3 +4 +2 + + +7 +8 +1 + + +9 +10 +1 + + +11 +12 +1 + + +23 +24 +1 + + + + + + +tag +tostring + + +12 + + +1 +2 +3 + + +2 +3 +1 + + +10 +11 +1 + + +13 +14 +1 + + +67 +68 +1 + + +223 +224 +1 + + + + + + +tostring +id + + +12 + + +1 +2 +209 + + +2 +3 +42 + + +3 +6 +29 + + +6 +15 +25 + + +15 +18 +13 + + + + + + +tostring +kind + + +12 + + +1 +2 +318 + + + + + + +tostring +parent + + +12 + + +1 +2 +213 + + +2 +3 +41 + + +3 +6 +27 + + +6 +15 +25 + + +15 +18 +12 + + + + + + +tostring +idx + + +12 + + +1 +2 +272 + + +2 +3 +34 + + +3 +10 +12 + + + + + + +tostring +tag + + +12 + + +1 +2 +318 + + + + + + + + +yaml_anchors +1 + + +node +1 + + +anchor +1 + + + + +node +anchor + + +12 + + +1 +2 +1 + + + + + + +anchor +node + + +12 + + +1 +2 +1 + + + + + + + + +yaml_aliases +1 + + +alias +1 + + +target +1 + + + + +alias +target + + +12 + + +1 +2 +1 + + + + + + +target +alias + + +12 + + +1 +2 +1 + + + + + + + + +yaml_scalars +700 + + +scalar +700 + + +style +3 + + +value +241 + + + + +scalar +style + + +12 + + +1 +2 +700 + + + + + + +scalar +value + + +12 + + +1 +2 +700 + + + + + + +style +scalar + + +12 + + +14 +15 +1 + + +97 +98 +1 + + +589 +590 +1 + + + + + + +style +value + + +12 + + +12 +13 +1 + + +47 +48 +1 + + +183 +184 +1 + + + + + + +value +scalar + + +12 + + +1 +2 +158 + + +2 +3 +32 + + +3 +6 +19 + + +6 +15 +20 + + +15 +18 +12 + + + + + + +value +style + + +12 + + +1 +2 +240 + + +2 +3 +1 + + + + + + + + +yaml_errors +id +1 + + +id +1 + + +message +1 + + + + +id +message + + +12 + + +1 +2 +1 + + + + + + +message +id + + +12 + + +1 +2 +1 + + + + + + + + +yaml_locations +71 + + +locatable +71 + + +location +71 + + + + +locatable +location + + +12 + + +1 +2 +71 + + + + + + +location +locatable + + +12 + + +1 +2 +71 + + + + + + + + + diff --git a/example/codeql-db/diagnostic/cli-diagnostics-add-20230719T090511.632Z.json b/example/codeql-db/diagnostic/cli-diagnostics-add-20230719T090511.632Z.json new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/example/codeql-db/diagnostic/cli-diagnostics-add-20230719T090515.827Z.json b/example/codeql-db/diagnostic/cli-diagnostics-add-20230719T090515.827Z.json new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/example/codeql-db/src.zip b/example/codeql-db/src.zip new file mode 100644 index 0000000000000000000000000000000000000000..728b660a56a4dc506604b2bf7c558a529fd9265f --- /dev/null +++ b/example/codeql-db/src.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fae066a9c830ad0de4813bdb183474e70b64ddc43c1ebd5d9787101e2cb30e97 +size 1487363 diff --git a/example/src/main.py b/example/src/main.py new file mode 100644 index 0000000000000000000000000000000000000000..cb1520fe256cf6de34aa254525ab4ea64466ad92 --- /dev/null +++ b/example/src/main.py @@ -0,0 +1,3 @@ +import subprocess + +subprocess.call(["python", "main.py"]) diff --git a/example/test.ipynb b/example/test.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..038afad4136d4121a7f4a2e11fac0b1b6b37acfe --- /dev/null +++ b/example/test.ipynb @@ -0,0 +1,141 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "ad247f35-1a9d-4f3a-9788-ecdbc43b6976", + "metadata": {}, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9051f612-d935-4b90-a623-0c5504b51d9c", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Database registered!'" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "%set_database codeql-db" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a460e252-30f5-4cad-9a32-253e9d789025", + "metadata": {}, + "outputs": [], + "source": [ + "import python\n", + "import semmle.python.ApiGraphs\n", + "import semmle.python.dataflow.new.RemoteFlowSources\n", + "import semmle.python.dataflow.new.DataFlow" + ] + }, + { + "cell_type": "markdown", + "id": "ee080ea9-375f-432b-8fc8-a93fa0e60910", + "metadata": {}, + "source": [ + "## Code exploration" + ] + }, + { + "cell_type": "markdown", + "id": "3c305ec3-40f7-4a7e-bef6-845ae0ef0c4f", + "metadata": {}, + "source": [ + "### Find all calls to `subprocess.call`" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "32c13716-39a0-43d9-8e05-25a11fcd557f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
nURL for n
0ControlFlowNode for Attribute()file:///Users/pwntester/src/github.com/github/codeql-jupyter-kernel/example/src/main.py:3:1:3:38
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "query predicate test(API::CallNode n) {\n", + " n = API::moduleImport(\"subprocess\").getMember(\"call\").getACall()\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "0e37dbb8-32b5-45a0-8aa2-90a8b415c905", + "metadata": {}, + "source": [ + "## Attack surface" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9383863b-0f6e-4fc3-afad-b653cae62bdf", + "metadata": {}, + "outputs": [], + "source": [ + "query predicate attackSurface(DataFlow::Node n) { n instanceof RemoteFlowSource }" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "CodeQL", + "language": "codeql", + "name": "codeql" + }, + "language_info": { + "file_extension": ".ql", + "help_links": [ + { + "text": "MetaKernel Magics", + "url": "https://metakernel.readthedocs.io/en/latest/source/README.html" + } + ], + "mimetype": "text/x-codeql", + "name": "codeql" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}