complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
1
15
def from_db(cls, db, field_names, values): instance = super().from_db(db, field_names, values) instance._orig_termination_a_type_id = instance.termination_a_type_id instance._orig_termination_a_ids = instance.termination_a_ids instance._orig_termination_b_type_id = instance.termination_b_type_id instance._orig_termination_b_ids = instance.termination_b_ids return instance
netbox/dcim/models/cables.py
87
netbox
{ "docstring": "\n Cache the original A and B terminations of existing Cable instances for later reference inside clean().\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 16 }
24
Python
18
4bb9b6ee2639db683b70d6ddbee055497e0a3647
cables.py
264,750
7
56
from_db
https://github.com/netbox-community/netbox.git
Extend Cable model to support multiple A/B terminations
73
0
77,794
10
1
3
def expand_dims(v, dims): return v[(...,) + (None,) * (dims - 1)]
ldm/models/diffusion/dpm_solver/dpm_solver.py
42
stablediffusion
{ "docstring": "\n Expand the tensor `v` to the dim `dims`.\n Args:\n `v`: a PyTorch tensor with shape [N].\n `dim`: a `int`.\n Returns:\n a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.\n ", "language": "en", "n_whitespaces": 70, "n_words": 36, "vocab_size": 25 }
11
Python
11
ca86da3a30c4e080d4db8c25fca73de843663cb4
dpm_solver.py
157,367
2
27
expand_dims
https://github.com/Stability-AI/stablediffusion.git
release more models
17
0
36,907
10
8
26
def test_combined_data_loader_with_max_size_cycle_and_ddp(replace_sampler_ddp, tmpdir): trainer = Trainer(strategy="ddp", accelerator="auto", devices=2, replace_sampler_ddp=replace_sampler_ddp) dataloader = CombinedLoader( {"a": DataLoader(RandomDataset(32, 8), batch_size=1), "b": DataLoader(RandomDataset(32, 8), batch_size=1)}, ) dataloader = trainer._data_connector._prepare_dataloader(dataloader, shuffle=False) assert len(dataloader) == 4 if replace_sampler_ddp else 8 for a_length in [6, 8, 10]: dataloader = CombinedLoader( { "a": DataLoader(range(a_length), batch_size=1), "b": DataLoader(range(8), batch_size=1), }, mode="max_size_cycle", ) length = max(a_length, 8) assert len(dataloader) == length dataloader = trainer._data_connector._prepare_dataloader(dataloader, shuffle=False) assert len(dataloader) == length // 2 if replace_sampler_ddp else length if replace_sampler_ddp: last_batch = list(dataloader)[-1] if a_length == 6: assert last_batch == {"a": torch.tensor([0]), "b": torch.tensor([6])} elif a_length == 8: assert last_batch == {"a": torch.tensor([6]), "b": torch.tensor([6])} elif a_length == 10: assert last_batch == {"a": torch.tensor([8]), "b": torch.tensor([0])}
tests/trainer/test_supporters.py
459
lightning
{ "docstring": "This test makes sure distributed sampler has been properly injected in dataloaders when using CombinedLoader\n with ddp and `max_size_cycle` mode.", "language": "en", "n_whitespaces": 22, "n_words": 20, "vocab_size": 20 }
112
Python
57
5b59c951e28ddc8bb884f044b1f46fb54c23a8b8
test_supporters.py
241,651
41
399
test_combined_data_loader_with_max_size_cycle_and_ddp
https://github.com/Lightning-AI/lightning.git
Deprecate `TrainerDataLoadingMixin` and move logic to `DataConnector` (#11282) Co-authored-by: Rohit Gupta <[email protected]> Co-authored-by: Aki Nitta <[email protected]> Co-authored-by: Carlos Mocholí <[email protected]>
341
0
69,641
18
2
21
def set_pea_parser(parser=None): if not parser: from .base import set_base_parser parser = set_base_parser() from .peapods.base import mixin_base_ppr_parser from .peapods.runtimes.worker import mixin_worker_runtime_parser from .peapods.runtimes.container import mixin_container_runtime_parser from .peapods.runtimes.remote import mixin_remote_runtime_parser from .peapods.pea import mixin_pea_parser from .peapods.runtimes.distributed import mixin_distributed_feature_parser from .hubble.pull import mixin_hub_pull_options_parser mixin_base_ppr_parser(parser) mixin_worker_runtime_parser(parser) mixin_container_runtime_parser(parser) mixin_remote_runtime_parser(parser) mixin_distributed_feature_parser(parser) mixin_pea_parser(parser) mixin_hub_pull_options_parser(parser) mixin_head_parser(parser) return parser
jina/parsers/__init__.py
193
jina
{ "docstring": "Set the parser for the Pea\n\n :param parser: an optional existing parser to build upon\n :return: the parser\n ", "language": "en", "n_whitespaces": 27, "n_words": 18, "vocab_size": 14 }
50
Python
35
933415bfa1f9eb89f935037014dfed816eb9815d
__init__.py
9,830
20
113
set_pea_parser
https://github.com/jina-ai/jina.git
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
118
0
1,716
10
2
26
def _fd(f): return f.fileno() if hasattr(f, "fileno") else f if os.name == "nt": import msvcrt from ctypes import ( POINTER, Structure, Union, byref, c_int64, c_ulong, c_void_p, sizeof, windll, ) from ctypes.wintypes import BOOL, DWORD, HANDLE LOCK_SH = 0 # the default LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK # --- Adapted from the pyserial project --- # detect size of ULONG_PTR if sizeof(c_ulong) != sizeof(c_void_p): ULONG_PTR = c_int64 else: ULONG_PTR = c_ulong PVOID = c_void_p # --- Union inside Structure by stackoverflow:3480240 ---
django/core/files/locks.py
161
django
{ "docstring": "Get a filedescriptor from something which could be a file or an fd.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
86
Python
64
9c19aff7c7561e3a82978a272ecdaad40dda5c00
locks.py
204,481
2
21
_fd
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
210
0
50,743
9
1
13
def set_scale_factor(self, scale_factor, unit_system="SI"): sympy_deprecation_warning( f, deprecated_since_version="1.5", active_deprecations_target="deprecated-quantity-methods", ) from sympy.physics.units import UnitSystem unit_system = UnitSystem.get_unit_system(unit_system) unit_system.set_quantity_scale_factor(self, scale_factor)
sympy/physics/units/quantities.py
82
sympy
{ "docstring": "\n Quantity.set_scale_factor() is deprecated. Use either\n unit_system.set_quantity_scale_factors() or\n {self}.set_global_relative_scale_factor() instead.\n ", "language": "en", "n_whitespaces": 54, "n_words": 9, "vocab_size": 9 }
18
Python
18
905eb426131ca9542a6b258462d9ae984e5b2563
quantities.py
197,104
13
49
set_scale_factor
https://github.com/sympy/sympy.git
Update the deprecation warnings in sympy.physics.units
85
0
48,341
9
1
9
def trace(self, offset=0, axis1=0, axis2=1, dtype=None): from dask.array.reductions import trace return trace(self, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)
dask/array/core.py
72
dask
{ "docstring": "Return the sum along diagonals of the array.\n\n Refer to :func:`dask.array.trace` for full documentation.\n\n See Also\n --------\n dask.array.trace : equivalent function\n ", "language": "en", "n_whitespaces": 56, "n_words": 21, "vocab_size": 20 }
16
Python
15
2820bae493a49cb1d0a6e376985c5473b8f04fa8
core.py
156,750
3
51
trace
https://github.com/dask/dask.git
Don't include docs in ``Array`` methods, just refer to module docs (#9244) Co-authored-by: James Bourbeau <[email protected]>
37
0
36,760
8
1
11
async def test_ensure_config_exists_creates_config(hass): assert not os.path.isfile(YAML_PATH) with patch("builtins.print") as mock_print: await config_util.async_ensure_config_exists(hass) assert os.path.isfile(YAML_PATH) assert mock_print.called
tests/test_config.py
80
core
{ "docstring": "Test that calling ensure_config_exists.\n\n If not creates a new config file.\n ", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 11 }
16
Python
13
8b38fa58aa45d1809f6900729b4046d6c02c2230
test_config.py
312,785
6
44
test_ensure_config_exists_creates_config
https://github.com/home-assistant/core.git
Bump pytest to 7.0.0 (#65981)
38
0
111,422
11
1
3
def is_even(self): return not self.is_odd
sympy/combinatorics/permutations.py
21
sympy
{ "docstring": "\n Checks if a permutation is even.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> p = Permutation([0, 1, 2, 3])\n >>> p.is_even\n True\n >>> p = Permutation([3, 2, 1, 0])\n >>> p.is_even\n True\n\n See Also\n ========\n\n is_odd\n ", "language": "en", "n_whitespaces": 136, "n_words": 37, "vocab_size": 26 }
5
Python
5
498015021131af4dbb07eb110e5badaba8250c7b
permutations.py
196,182
2
11
is_even
https://github.com/sympy/sympy.git
Updated import locations
19
0
47,682
7
15
44
def warns(warningcls, *, match='', test_stacklevel=True): # Absorbs all warnings in warnrec with warnings.catch_warnings(record=True) as warnrec: # Hide all warnings but make sure that our warning is emitted warnings.simplefilter("ignore") warnings.filterwarnings("always", match, warningcls) # Now run the test yield warnrec # Raise if expected warning not found if not any(issubclass(w.category, warningcls) for w in warnrec): msg = ('Failed: DID NOT WARN.' ' No warnings of type %s was emitted.' ' The list of emitted warnings is: %s.' ) % (warningcls, [w.message for w in warnrec]) raise Failed(msg) if test_stacklevel: for f in inspect.stack(): thisfile = f.filename file = os.path.split(thisfile)[1] if file.startswith('test_'): break elif file == 'doctest.py': # skip the stacklevel testing in the doctests of this # function return else: raise RuntimeError("Could not find the file for the given warning to test the stacklevel") for w in warnrec: if w.filename != thisfile: msg = f.replace('\n', ' ') raise Failed(msg) if warningcls == SymPyDeprecationWarning: this_file = pathlib.Path(__file__) active_deprecations_file = (this_file.parent.parent.parent / 'doc' / 'src' / 'explanation' / 'active-deprecations.md') if not active_deprecations_file.exists(): # We can only test that the active_deprecations_target works if we are # in the git repo. return targets = [] for w in warnrec: targets.append(w.message.active_deprecations_target) with open(active_deprecations_file) as f: text = f.read() for target in targets: if f'({target})=' not in text: raise Failed(f"The active deprecations target {target!r} does not appear to be a valid target in the active-deprecations.md file ({active_deprecations_file}).")
sympy/testing/pytest.py
485
sympy
{ "docstring": "\n Like raises but tests that warnings are emitted.\n\n >>> from sympy.testing.pytest import warns\n >>> import warnings\n\n >>> with warns(UserWarning):\n ... warnings.warn('deprecated', UserWarning, stacklevel=2)\n\n >>> with warns(UserWarning):\n ... pass\n Traceback (most recent call last):\n ...\n Failed: DID NOT WARN. No warnings of type UserWarning\\\n was emitted. The list of emitted warnings is: [].\n\n ``test_stacklevel`` makes it check that the ``stacklevel`` parameter to\n ``warn()`` is set so that the warning shows the user line of code (the\n code under the warns() context manager). Set this to False if this is\n ambiguous or if the context manager does not test the direct user code\n that emits the warning.\n\n If the warning is a ``SymPyDeprecationWarning``, this additionally tests that\n the ``active_deprecations_target`` is a real target in the\n ``active-deprecations.md`` file.\n\n \\\nFailed: Warning has the wrong stacklevel. The warning stacklevel needs to be\nset so that the line of code shown in the warning message is user code that\ncalls the deprecated code (the current stacklevel is showing code from\n{w.filename}, expected {thisfile})", "language": "en", "n_whitespaces": 232, "n_words": 168, "vocab_size": 101 }
229
Python
139
c5aa4e76c9642ebb2cf0fe105e46222b541966b2
pytest.py
196,844
44
263
warns
https://github.com/sympy/sympy.git
Fix the formatting of an error message
718
0
48,214
17
1
3
def precmd(self, line): return line
python3.10.4/Lib/cmd.py
18
XX-Net
{ "docstring": "Hook method executed just before the command line is\n interpreted, but after the input prompt is generated and issued.\n\n ", "language": "en", "n_whitespaces": 33, "n_words": 19, "vocab_size": 17 }
5
Python
5
8198943edd73a363c266633e1aa5b2a9e9c9f526
cmd.py
221,333
2
10
precmd
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
19
0
56,352
6
2
20
async def async_test_home_assistant(loop, load_registries=True): hass = ha.HomeAssistant() store = auth_store.AuthStore(hass) hass.auth = auth.AuthManager(hass, store, {}, {}) ensure_auth_manager_loaded(hass.auth) INSTANCES.append(hass) orig_async_add_job = hass.async_add_job orig_async_add_executor_job = hass.async_add_executor_job orig_async_create_task = hass.async_create_task
tests/common.py
114
core
{ "docstring": "Return a Home Assistant object pointing at test config dir.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
27
Python
22
e1338adf1a27edbc0e3513fa67cd6690c7a8fbc0
common.py
291,337
54
320
async_test_home_assistant
https://github.com/home-assistant/core.git
Allow configuring country and language in core config (#81734) * Allow configuring country and language in core config * Add script for updating list of countries * Use black for formatting * Fix quoting * Move country codes to a separate file * Address review comments * Add generated/countries.py * Get default language from owner account * Remove unused variable * Add script to generate list of supported languages * Add tests * Fix stale docsring * Use format_python_namespace * Correct async_user_store * Improve typing * Fix with_store decorator * Initialize language in core store migration * Fix startup * Tweak * Apply suggestions from code review Co-authored-by: Franck Nijhof <[email protected]> * Update storage.py Co-authored-by: Franck Nijhof <[email protected]>
54
0
90,446
9
1
2
def shift(self): return self["shift"]
packages/python/plotly/plotly/graph_objs/layout/_yaxis.py
22
plotly.py
{ "docstring": "\n Moves the axis a given number of pixels from where it would\n have been otherwise. Accepts both positive and negative values,\n which will shift the axis either right or left, respectively.\n If `autoshift` is set to true, then this defaults to a padding\n of -3 if `side` is set to \"left\". and defaults to +3 if `side`\n is set to \"right\". Defaults to 0 if `autoshift` is set to\n false. Only has an effect if `anchor` is set to \"free\".\n\n The 'shift' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 193, "n_words": 99, "vocab_size": 68 }
4
Python
4
ab7ddd3e8beeb1e70ce46447d26a0715fc92a5b7
_yaxis.py
231,482
2
11
shift
https://github.com/plotly/plotly.py.git
bump plotly.js to 2.17
18
0
62,935
7
2
9
def frame(self, fn_fid_or_fname, ignorekeys=[]): # get the frame info by name or id number if isinstance(fn_fid_or_fname, str): f = self.frame_by_name(fn_fid_or_fname, ignorekeys) else: f = self.frame_by_id(fn_fid_or_fname, ignorekeys) return f
nltk/corpus/reader/framenet.py
72
nltk
{ "docstring": "\n Get the details for the specified Frame using the frame's name\n or id number.\n\n Usage examples:\n\n >>> from nltk.corpus import framenet as fn\n >>> f = fn.frame(256)\n >>> f.name\n 'Medical_specialties'\n >>> f = fn.frame('Medical_specialties')\n >>> f.ID\n 256\n >>> # ensure non-ASCII character in definition doesn't trigger an encoding error:\n >>> fn.frame('Imposing_obligation') # doctest: +ELLIPSIS\n frame (1494): Imposing_obligation...\n\n\n The dict that is returned from this function will contain the\n following information about the Frame:\n\n - 'name' : the name of the Frame (e.g. 'Birth', 'Apply_heat', etc.)\n - 'definition' : textual definition of the Frame\n - 'ID' : the internal ID number of the Frame\n - 'semTypes' : a list of semantic types for this frame\n - Each item in the list is a dict containing the following keys:\n - 'name' : can be used with the semtype() function\n - 'ID' : can be used with the semtype() function\n\n - 'lexUnit' : a dict containing all of the LUs for this frame.\n The keys in this dict are the names of the LUs and\n the value for each key is itself a dict containing\n info about the LU (see the lu() function for more info.)\n\n - 'FE' : a dict containing the Frame Elements that are part of this frame\n The keys in this dict are the names of the FEs (e.g. 'Body_system')\n and the values are dicts containing the following keys\n\n - 'definition' : The definition of the FE\n - 'name' : The name of the FE e.g. 'Body_system'\n - 'ID' : The id number\n - '_type' : 'fe'\n - 'abbrev' : Abbreviation e.g. 'bod'\n - 'coreType' : one of \"Core\", \"Peripheral\", or \"Extra-Thematic\"\n - 'semType' : if not None, a dict with the following two keys:\n - 'name' : name of the semantic type. can be used with\n the semtype() function\n - 'ID' : id number of the semantic type. can be used with\n the semtype() function\n - 'requiresFE' : if not None, a dict with the following two keys:\n - 'name' : the name of another FE in this frame\n - 'ID' : the id of the other FE in this frame\n - 'excludesFE' : if not None, a dict with the following two keys:\n - 'name' : the name of another FE in this frame\n - 'ID' : the id of the other FE in this frame\n\n - 'frameRelation' : a list of objects describing frame relations\n - 'FEcoreSets' : a list of Frame Element core sets for this frame\n - Each item in the list is a list of FE objects\n\n :param fn_fid_or_fname: The Framenet name or id number of the frame\n :type fn_fid_or_fname: int or str\n :param ignorekeys: The keys to ignore. These keys will not be\n included in the output. (optional)\n :type ignorekeys: list(str)\n :return: Information about a frame\n :rtype: dict\n ", "language": "en", "n_whitespaces": 1166, "n_words": 466, "vocab_size": 160 }
28
Python
24
8a4cf5d94eb94b6427c5d1d7907ba07b119932c5
framenet.py
42,535
6
45
frame
https://github.com/nltk/nltk.git
Docstring tests (#3050) * fixed pytests * fixed more pytests * fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py * fixed pytests (mainly multiline or rounding issues) * fixed treebank pytests, removed test for return_string=True (deprecated) * fixed destructive.py pytests, removed test for return_string=True (deprecated) * fixed pytest (rounding issues) * fixed pytest (initialised missing object) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * added pytest +SKIP for deprecated module stanford * updated AUTHORS.md * changed docstring corrections by usage of ELLIPSIS and different roundings * fixed AUTHORS.md to be consistent * Fix framenet doctest formatting with pprint * Change docstring on MultiListBox.__init__ I believe the original typo was misinterpreted and changed to something that was not originally intended. Co-authored-by: Jan Lennartz <[email protected]> Co-authored-by: Tom Aarsen <[email protected]> Co-authored-by: Tom Aarsen <[email protected]>
85
0
7,597
11
1
5
def _get_cookies_set(self, req, resp): raise NotImplementedError("This method must be implemented by a subclass.")
tests/csrf_tests/tests.py
27
django
{ "docstring": "\n Return a list of the cookie values passed to set_cookie() over the\n course of the request-response.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 13 }
13
Python
13
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
202,418
2
15
_get_cookies_set
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
27
0
50,120
8
4
11
async def async_update(self) -> None: await self.fido_data.async_update() if (sensor_type := self.entity_description.key) == "balance": if self.fido_data.data.get(sensor_type) is not None: self._attr_native_value = round(self.fido_data.data[sensor_type], 2) else: if self.fido_data.data.get(self._number, {}).get(sensor_type) is not None: self._attr_native_value = round( self.fido_data.data[self._number][sensor_type], 2 )
homeassistant/components/fido/sensor.py
175
core
{ "docstring": "Get the latest data from Fido and update the state.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
35
Python
27
58b9785485af4b49097707edb7fbcc00c72a3df0
sensor.py
304,562
11
110
async_update
https://github.com/home-assistant/core.git
Improve entity type hints [f] (#77143)
149
0
103,369
17
21
38
def _missing_(cls, value): if not isinstance(value, int): raise ValueError( "%r is not a valid %s" % (value, cls.__qualname__) ) # check boundaries # - value must be in range (e.g. -16 <-> +15, i.e. ~15 <-> 15) # - value must not include any skipped flags (e.g. if bit 2 is not # defined, then 0d10 is invalid) flag_mask = cls._flag_mask_ all_bits = cls._all_bits_ neg_value = None if ( not ~all_bits <= value <= all_bits or value & (all_bits ^ flag_mask) ): if cls._boundary_ is STRICT: max_bits = max(value.bit_length(), flag_mask.bit_length()) raise ValueError( "%r invalid value %r\n given %s\n allowed %s" % ( cls, value, bin(value, max_bits), bin(flag_mask, max_bits), )) elif cls._boundary_ is CONFORM: value = value & flag_mask elif cls._boundary_ is EJECT: return value elif cls._boundary_ is KEEP: if value < 0: value = ( max(all_bits+1, 2**(value.bit_length())) + value ) else: raise ValueError( '%r unknown flag boundary %r' % (cls, cls._boundary_, ) ) if value < 0: neg_value = value value = all_bits + 1 + value # get members and unknown unknown = value & ~flag_mask member_value = value & flag_mask if unknown and cls._boundary_ is not KEEP: raise ValueError( '%s(%r) --> unknown values %r [%s]' % (cls.__name__, value, unknown, bin(unknown)) ) # normal Flag? __new__ = getattr(cls, '__new_member__', None) if cls._member_type_ is object and not __new__: # construct a singleton enum pseudo-member pseudo_member = object.__new__(cls) else: pseudo_member = (__new__ or cls._member_type_.__new__)(cls, value) if not hasattr(pseudo_member, '_value_'): pseudo_member._value_ = value if member_value: pseudo_member._name_ = '|'.join([ m._name_ for m in cls._iter_member_(member_value) ]) if unknown: pseudo_member._name_ += '|%s' % cls._numeric_repr_(unknown) else: pseudo_member._name_ = None # use setdefault in case another thread already created a composite # with this value, but only if all members are known # note: zero is a special case -- add it if not unknown: pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) if neg_value is not None: cls._value2member_map_[neg_value] = pseudo_member return pseudo_member
Lib/enum.py
611
cpython
{ "docstring": "\n Create a composite member containing all canonical members present in `value`.\n\n If non-member values are present, result depends on `_boundary_` setting.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 21 }
312
Python
166
acf7403f9baea3ae1119fc6b4a3298522188bf96
enum.py
175,319
62
378
_missing_
https://github.com/python/cpython.git
bpo-40066: [Enum] update str() and format() output (GH-30582) Undo rejected PEP-663 changes: - restore `repr()` to its 3.10 status - restore `str()` to its 3.10 status New changes: - `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result - zero-valued flags without a name have a slightly changed repr(), e.g. `repr(Color(0)) == '<Color: 0>'` - update `dir()` for mixed-in types to return all the methods and attributes of the mixed-in type - added `_numeric_repr_` to `Flag` to control display of unnamed values - enums without doc strings have a more comprehensive doc string added - `ReprEnum` added -- inheriting from this makes it so only `__repr__` is replaced, not `__str__` nor `__format__`; `IntEnum`, `IntFlag`, and `StrEnum` all inherit from `ReprEnum`
1,215
0
41,600
21
1
2
def configuration(self): return []
homeassistant/components/alexa/capabilities.py
18
core
{ "docstring": "Return the configuration object.\n\n Applicable to the ThermostatController, SecurityControlPanel, ModeController, RangeController,\n and EventDetectionSensor.\n ", "language": "en", "n_whitespaces": 34, "n_words": 13, "vocab_size": 12 }
4
Python
4
53245c65238e3009dd1f3412f7f9bef10385f64e
capabilities.py
294,423
2
9
configuration
https://github.com/home-assistant/core.git
Update pylint to 2.13.0 (#68656)
18
0
93,460
6
3
11
def test_get_documents_by_id(self, ds, documents): ds.write_documents(documents) ids = [doc.id for doc in documents] result = {doc.id for doc in ds.get_documents_by_id(ids)} assert set(ids) == result
test/document_stores/test_memory.py
78
haystack
{ "docstring": "\n The base test uses the batch_size param that's not supported\n here, so we override the test case\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 15 }
23
Python
18
a15af7f8c3ca504ceaa7c34e8487be2915cd6dc7
test_memory.py
258,280
5
50
test_get_documents_by_id
https://github.com/deepset-ai/haystack.git
refactor: Move `InMemoryDocumentStore` tests to their own class (#3614) * move tests to their own class * move more tests * add specific job * fix test * Update test/document_stores/test_memory.py Co-authored-by: Sara Zan <[email protected]> Co-authored-by: Sara Zan <[email protected]>
58
0
75,213
10
2
6
def add_checkpoint_hook(self, args): if args.cpu: original_n_cpus = args.distributed_world_size
doc/source/ray-core/_examples/lm/ray_train.py
31
ray
{ "docstring": "Add a hook to the original save_checkpoint function.\n\n This checks if there are new computational resources available.\n If so, raise exception to restart the training process and\n make use of the new resources.\n ", "language": "en", "n_whitespaces": 61, "n_words": 33, "vocab_size": 29 }
8
Python
8
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray_train.py
130,071
8
36
add_checkpoint_hook
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
33
0
29,088
9
5
21
def addResourceToFile(target_filename, data, resource_kind, lang_id, res_name, logger): max_attempts = 5 for attempt in range(1, max_attempts + 1): update_handle = _openFileWindowsResources(target_filename) _updateWindowsResource(update_handle, resource_kind, res_name, lang_id, data) try: _closeFileWindowsResources(update_handle) except OSError as e: if e.errno in (110, 13): logger.warning( % (target_filename, attempt) ) else: logger.warning( % (target_filename, attempt, e.errno) ) time.sleep(1) continue else: if attempt != 1: logger.warning( "Succeeded with resource update in attempt %d." % attempt ) break else: logger.sysexit("Failed to update resources, the result is unusable.")
nuitka/utils/WindowsResources.py
208
Nuitka
{ "docstring": "\nFailed to add resources to file %r in attempt %d.\nDisable Anti-Virus, e.g. Windows Defender for build folders. Retrying after a second of delay.\nFailed to add resources to file %r in attempt %d with error code %d.\nDisable Anti-Virus, e.g. Windows Defender for build folders. Retrying after a second of delay.", "language": "en", "n_whitespaces": 48, "n_words": 52, "vocab_size": 27 }
76
Python
55
ed2208484bde4bf78da0712d54ab18c192df7e2e
WindowsResources.py
178,816
32
131
addResourceToFile
https://github.com/Nuitka/Nuitka.git
Onefile: Attempt opening the binary for adding payload up to five seconds * This duplicates code from resource handling, where we needed to do this already, but due to hotfix intention, we don't want to make it as a reusable functionality yet.
390
0
42,833
18
2
10
def test_connection(self) -> Tuple[bool, str]: try: conn = self.get_conn() conn.normalize('.') return True, "Connection successfully tested" except Exception as e: return False, str(e)
airflow/providers/sftp/hooks/sftp.py
78
airflow
{ "docstring": "Test the SFTP connection by calling path with directory", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
22
Python
21
f3aacebe502c4ea5dc2b7d29373539296fa037eb
sftp.py
43,251
8
44
test_connection
https://github.com/apache/airflow.git
Convert sftp hook to use paramiko instead of pysftp (#24512)
87
0
7,889
11
3
2
def test_redirect_exception(self) -> None:
tests/test_server.py
16
synapse
{ "docstring": "\n If the callback raises a RedirectException, it is turned into a 30x\n with the right location.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 14 }
4
Python
4
f1145563f662653e451525032b043d1a58998b6d
test_server.py
248,661
15
95
test_redirect_exception
https://github.com/matrix-org/synapse.git
Extra type annotations in `test_server` (#13124)
11
0
72,408
6
10
21
def __format__(self, fmt): # Support string formatting if not fmt or fmt[-1] == 's': return format(str(self), fmt) # From here on down, support for 'bnXx' global _address_fmt_re if _address_fmt_re is None: import re _address_fmt_re = re.compile('(#?)(_?)([xbnX])') m = _address_fmt_re.fullmatch(fmt) if not m: return super().__format__(fmt) alternate, grouping, fmt_base = m.groups() # Set some defaults if fmt_base == 'n': if self._version == 4: fmt_base = 'b' # Binary is default for ipv4 else: fmt_base = 'x' # Hex is default for ipv6 if fmt_base == 'b': padlen = self._max_prefixlen else: padlen = self._max_prefixlen // 4 if grouping: padlen += padlen // 4 - 1 if alternate: padlen += 2 # 0b or 0x return format(int(self), f'{alternate}0{padlen}{grouping}{fmt_base}') @functools.total_ordering
python3.10.4/Lib/ipaddress.py
277
@functools.total_ordering
XX-Net
{ "docstring": "Returns an IP address as a formatted string.\n\n Supported presentation types are:\n 's': returns the IP address as a string (default)\n 'b': converts to binary and returns a zero-padded string\n 'X' or 'x': converts to upper- or lower-case hex and returns a zero-padded string\n 'n': the same as 'b' for IPv4 and 'x' for IPv6\n\n For binary and hex presentation types, the alternate form specifier\n '#' and the grouping option '_' are supported.\n ", "language": "en", "n_whitespaces": 129, "n_words": 73, "vocab_size": 47 }
115
Python
70
8198943edd73a363c266633e1aa5b2a9e9c9f526
ipaddress.py
218,526
25
141
__format__
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
369
1
55,367
12
1
23
def test_interactive_true_with_dependent_objects(self): post = Post.objects.create(title="post", content_type=self.content_type) # A related object is needed to show that a custom collector with # can_fast_delete=False is needed. ModelWithNullFKToSite.objects.create(post=post) with mock.patch("builtins.input", return_value="yes"): with captured_stdout() as stdout: call_command("remove_stale_contenttypes", verbosity=2, stdout=stdout) self.assertEqual(Post.objects.count(), 0) output = stdout.getvalue() self.assertIn("- Content type for contenttypes_tests.Fake", output) self.assertIn("- 1 contenttypes_tests.Post object(s)", output) self.assertIn("- 1 contenttypes_tests.ModelWithNullFKToSite", output) self.assertIn("Deleting stale content type", output) self.assertEqual(ContentType.objects.count(), self.before_count)
tests/contenttypes_tests/test_management.py
233
django
{ "docstring": "\n interactive mode (the default) deletes stale content types and warns of\n dependent objects.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
61
Python
50
9c19aff7c7561e3a82978a272ecdaad40dda5c00
test_management.py
202,322
13
134
test_interactive_true_with_dependent_objects
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
178
0
50,068
12
3
17
def test_activity_stream_related(): serializer_related = set( ActivityStream._meta.get_field(field_name).related_model for field_name, stuff in ActivityStreamSerializer()._local_summarizable_fk_fields(None) if hasattr(ActivityStream, field_name) ) models = set(activity_stream_registrar.models) models.remove(Setting) missing_models = models - serializer_related assert not missing_models
awx/main/tests/unit/api/serializers/test_activity_stream_serializer.py
102
awx
{ "docstring": "\n If this test failed with content in `missing_models`, that means that a\n model has been connected to the activity stream, but the model has not\n been added to the activity stream serializer.\n\n How to fix this:\n Ideally, all models should be in awx.api.serializers.SUMMARIZABLE_FK_FIELDS\n\n If, for whatever reason, the missing model should not generally be\n summarized from related resources, then a special case can be carved out in\n ActivityStreamSerializer._local_summarizable_fk_fields\n ", "language": "en", "n_whitespaces": 96, "n_words": 68, "vocab_size": 50 }
27
Python
22
2d310dc4e50c6f7cd298f9fb8af69da258cd9ea6
test_activity_stream_serializer.py
81,351
10
62
test_activity_stream_related
https://github.com/ansible/awx.git
Optimize object creation by getting fewer empty relationships (#12508) This optimizes the ActivityStreamSerializer by only getting many-to-many relationships that are speculatively non-empty based on information we have in other fields We run this every time we create an object as an on_commit action so it is expected this will have a major impact on response times for launching jobs
69
0
17,190
13
1
1
def test_invalid_scheduler_missing_state_dict():
tests/trainer/optimization/test_optimizers.py
12
lightning
{ "docstring": "Test that custom lr scheduler raises an error if it's missing the state dict.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
2
Python
2
82c8875f33addb0becd7761c95e9674ccc98c7ee
test_optimizers.py
241,755
10
48
test_invalid_scheduler_missing_state_dict
https://github.com/Lightning-AI/lightning.git
Add `LightningModule.lr_scheduler_step` (#10249) Co-authored-by: Carlos Mocholi <[email protected]>
5
0
69,685
6
6
15
def register_for_checkpointing(self, *objects): invalid_objects = [] for obj in objects: if not hasattr(obj, "state_dict") or not hasattr(obj, "load_state_dict"): invalid_objects.append(obj) if len(invalid_objects) > 0: err = "All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:" for index, obj in enumerate(invalid_objects): err += f"\n\t- Item at index {index}, `{get_pretty_name(obj)}`" raise ValueError(err) self._custom_objects.extend(objects)
src/accelerate/accelerator.py
149
accelerate
{ "docstring": "\n Makes note of `objects` and will save or load them in during `save_state` or `load_state`.\n\n These should be utilized when the state is being loaded or saved in the same script. It is not designed to be\n used in different scripts\n\n Note: Every `object` must have a `load_state_dict` and `state_dict` function to be stored.\n ", "language": "en", "n_whitespaces": 90, "n_words": 54, "vocab_size": 44 }
58
Python
50
6ffab178accebda485295bddf8eb6bf436ff698f
accelerator.py
337,272
11
79
register_for_checkpointing
https://github.com/huggingface/accelerate.git
Implementation of saving and loading custom states (#270)
167
0
120,989
14
28
33
def get_subsample(train_args, mode, arch): if arch == 'transformer': return np.array([1]) elif mode == 'mt' and arch == 'rnn': # +1 means input (+1) and layers outputs (train_args.elayer) subsample = np.ones(train_args.elayers + 1, dtype=np.int) logging.warning('Subsampling is not performed for machine translation.') logging.info('subsample: ' + ' '.join([str(x) for x in subsample])) return subsample elif (mode == 'asr' and arch in ('rnn', 'rnn-t')) or \ (mode == 'mt' and arch == 'rnn') or \ (mode == 'st' and arch == 'rnn'): subsample = np.ones(train_args.elayers + 1, dtype=np.int) if train_args.etype.endswith("p") and not train_args.etype.startswith("vgg"): ss = train_args.subsample.split("_") for j in range(min(train_args.elayers + 1, len(ss))): subsample[j] = int(ss[j]) else: logging.warning( 'Subsampling is not performed for vgg*. It is performed in max pooling layers at CNN.') logging.info('subsample: ' + ' '.join([str(x) for x in subsample])) return subsample elif mode == 'asr' and arch == 'rnn_mix': subsample = np.ones(train_args.elayers_sd + train_args.elayers + 1, dtype=np.int) if train_args.etype.endswith("p") and not train_args.etype.startswith("vgg"): ss = train_args.subsample.split("_") for j in range(min(train_args.elayers_sd + train_args.elayers + 1, len(ss))): subsample[j] = int(ss[j]) else: logging.warning( 'Subsampling is not performed for vgg*. It is performed in max pooling layers at CNN.') logging.info('subsample: ' + ' '.join([str(x) for x in subsample])) return subsample elif mode == 'asr' and arch == 'rnn_mulenc': subsample_list = [] for idx in range(train_args.num_encs): subsample = np.ones(train_args.elayers[idx] + 1, dtype=np.int) if train_args.etype[idx].endswith("p") and not train_args.etype[idx].startswith("vgg"): ss = train_args.subsample[idx].split("_") for j in range(min(train_args.elayers[idx] + 1, len(ss))): subsample[j] = int(ss[j]) else: logging.warning( 'Encoder %d: Subsampling is not performed for vgg*. ' 'It is performed in max pooling layers at CNN.', idx + 1) logging.info('subsample: ' + ' '.join([str(x) for x in subsample])) subsample_list.append(subsample) return subsample_list else: raise ValueError('Invalid options: mode={}, arch={}'.format(mode, arch))
ppg2mel/utils/nets_utils.py
897
MockingBird
{ "docstring": "Parse the subsampling factors from the training args for the specified `mode` and `arch`.\n\n Args:\n train_args: argument Namespace containing options.\n mode: one of ('asr', 'mt', 'st')\n arch: one of ('rnn', 'rnn-t', 'rnn_mix', 'rnn_mulenc', 'transformer')\n\n Returns:\n np.ndarray / List[np.ndarray]: subsampling factors.\n ", "language": "en", "n_whitespaces": 77, "n_words": 40, "vocab_size": 35 }
275
Python
96
b617a87ee40ab384767a27335313c2c65ee094ec
nets_utils.py
161,058
49
534
get_subsample
https://github.com/babysor/MockingBird.git
Init ppg extractor and ppg2mel (#375) * Init ppg extractor and ppg2mel * add preprocess and training * FIx known issues * Update __init__.py Allow to gen audio * Fix length issue * Fix bug of preparing fid * Fix sample issues * Add UI usage of PPG-vc
735
0
38,874
19
7
17
def getTreeBuilder(treeType, implementation=None, **kwargs): treeType = treeType.lower() if treeType not in treeBuilderCache: if treeType == "dom": from . import dom # Come up with a sane default (pref. from the stdlib) if implementation is None: from xml.dom import minidom implementation = minidom # NEVER cache here, caching is done in the dom submodule return dom.getDomModule(implementation, **kwargs).TreeBuilder elif treeType == "lxml": from . import etree_lxml treeBuilderCache[treeType] = etree_lxml.TreeBuilder elif treeType == "etree": from . import etree if implementation is None: implementation = default_etree # NEVER cache here, caching is done in the etree submodule return etree.getETreeModule(implementation, **kwargs).TreeBuilder else: raise ValueError( % treeType) return treeBuilderCache.get(treeType)
.venv/lib/python3.8/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py
211
transferlearning
{ "docstring": "Get a TreeBuilder class for various types of trees with built-in support\n\n :arg treeType: the name of the tree type required (case-insensitive). Supported\n values are:\n\n * \"dom\" - A generic builder for DOM implementations, defaulting to a\n xml.dom.minidom based implementation.\n * \"etree\" - A generic builder for tree implementations exposing an\n ElementTree-like interface, defaulting to xml.etree.cElementTree if\n available and xml.etree.ElementTree if not.\n * \"lxml\" - A etree-based builder for lxml.etree, handling limitations\n of lxml's implementation.\n\n :arg implementation: (Currently applies to the \"etree\" and \"dom\" tree\n types). A module implementing the tree type e.g. xml.etree.ElementTree\n or xml.etree.cElementTree.\n\n :arg kwargs: Any additional options to pass to the TreeBuilder when\n creating it.\n\n Example:\n\n >>> from html5lib.treebuilders import getTreeBuilder\n >>> builder = getTreeBuilder('etree')\n\n Unrecognised treebuilder \"%s\" ", "language": "en", "n_whitespaces": 228, "n_words": 122, "vocab_size": 82 }
103
Python
56
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
__init__.py
62,572
20
123
getTreeBuilder
https://github.com/jindongwang/transferlearning.git
upd; format
320
0
12,996
14
3
7
def getAllExportsAsDict(self, plugin_list=None): if plugin_list is None: # All enabled plugins should be exported plugin_list = self.getPluginsList() return {p: self._plugins[p].get_export() for p in plugin_list}
glances/stats.py
67
glances
{ "docstring": "Return all the stats to be exported (list).\n\n Default behavior is to export all the stat\n if plugin_list is provided, only export stats of given plugin (list)\n ", "language": "en", "n_whitespaces": 48, "n_words": 27, "vocab_size": 21 }
24
Python
23
5ce964bac3a618229f593ad587cb704f783a470f
stats.py
70,057
4
41
getAllExportsAsDict
https://github.com/nicolargo/glances.git
Remove the static exportable_plugins list from glances_export.py #1556" Limiting data exported for economic storage #1443
67
0
15,289
11
1
13
def mixin_client_protocol_parser(parser): from jina.enums import GatewayProtocolType parser.add_argument( '--protocol', type=GatewayProtocolType.from_string, choices=list(GatewayProtocolType), default=GatewayProtocolType.GRPC, help='Communication protocol between server and client.', )
jina/parsers/client.py
68
jina
{ "docstring": "Add the arguments for the protocol to the client parser\n\n :param parser: the parser configure\n ", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 11 }
18
Python
18
de5942f19db46321d09d31ff62d60ac33e7e43d7
client.py
13,477
9
42
mixin_client_protocol_parser
https://github.com/jina-ai/jina.git
feat: allow multiple port and protocols for gateway (#5378)
65
0
2,665
10
1
10
def test_span_maker_forward_with_empty(): nlp = English() doc1 = nlp("a b c") ent = doc1[0:1] ent.label_ = "X" doc1.ents = [ent] # no entities doc2 = nlp("x y z") # just to get a model span_maker = build_span_maker() span_maker([doc1, doc2], False)
spacy/tests/pipeline/test_entity_linker.py
103
spaCy
{ "docstring": "The forward pass of the span maker may have a doc with no entities.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
39
Python
32
d61e742960ef230b423dfa157449b291a03bd119
test_entity_linker.py
111,575
9
57
test_span_maker_forward_with_empty
https://github.com/explosion/spaCy.git
Handle Docs with no entities in EntityLinker (#11640) * Handle docs with no entities If a whole batch contains no entities it won't make it to the model, but it's possible for individual Docs to have no entities. Before this commit, those Docs would cause an error when attempting to concatenate arrays because the dimensions didn't match. It turns out the process of preparing the Ragged at the end of the span maker forward was a little different from list2ragged, which just uses the flatten function directly. Letting list2ragged do the conversion avoids the dimension issue. This did not come up before because in NEL demo projects it's typical for data with no entities to be discarded before it reaches the NEL component. This includes a simple direct test that shows the issue and checks it's resolved. It doesn't check if there are any downstream changes, so a more complete test could be added. A full run was tested by adding an example with no entities to the Emerson sample project. * Add a blank instance to default training data in tests Rather than adding a specific test, since not failing on instances with no entities is basic functionality, it makes sense to add it to the default set. * Fix without modifying architecture If the architecture is modified this would have to be a new version, but this change isn't big enough to merit that.
72
0
24,444
9
3
17
def evaluate_accuracy_gpu(net, data_iter, device=None): if not device: # Query the first device where the first parameter is on device = list(net.collect_params().values())[0].list_ctx()[0] # No. of correct predictions, no. of predictions metric = d2l.Accumulator(2) for X, y in data_iter: X, y = X.as_in_ctx(device), y.as_in_ctx(device) metric.add(d2l.accuracy(net(X), y), d2l.size(y)) return metric[0] / metric[1]
d2l/mxnet.py
167
d2l-zh
{ "docstring": "Compute the accuracy for a model on a dataset using a GPU.\n\n Defined in :numref:`sec_lenet`", "language": "en", "n_whitespaces": 17, "n_words": 15, "vocab_size": 13 }
49
Python
40
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
mxnet.py
158,175
8
105
evaluate_accuracy_gpu
https://github.com/d2l-ai/d2l-zh.git
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
89
0
37,352
18
5
30
def test_k8s_cpu(): # Some experimentally-obtained K8S CPU usage files for use in test_k8s_cpu. PROCSTAT1 = # noqa PROCSTAT2 = # noqa CPUACCTUSAGE1 = "2268980984108" CPUACCTUSAGE2 = "2270120061999" CPUSHARES = "2048" shares_file, cpu_file, proc_stat_file = [ tempfile.NamedTemporaryFile("w+") for _ in range(3) ] shares_file.write(CPUSHARES) cpu_file.write(CPUACCTUSAGE1) proc_stat_file.write(PROCSTAT1) for file in shares_file, cpu_file, proc_stat_file: file.flush() with mock.patch( "ray._private.utils.os.environ", {"KUBERNETES_SERVICE_HOST"} ), mock.patch("ray.dashboard.k8s_utils.CPU_USAGE_PATH", cpu_file.name), mock.patch( "ray.dashboard.k8s_utils.PROC_STAT_PATH", proc_stat_file.name ), mock.patch( "ray._private.utils.get_k8s_cpus.__defaults__", (shares_file.name,) ): # Test helpers assert ray._private.utils.get_num_cpus() == 2 assert k8s_utils._cpu_usage() == 2268980984108 assert k8s_utils._system_usage() == 1551775030000000 assert k8s_utils._host_num_cpus() == 8 # No delta for first computation, return 0. assert k8s_utils.cpu_percent() == 0.0 # Write new usage info obtained after 1 sec wait. for file in cpu_file, proc_stat_file: file.truncate(0) file.seek(0) cpu_file.write(CPUACCTUSAGE2) proc_stat_file.write(PROCSTAT2) for file in cpu_file, proc_stat_file: file.flush() # Files were extracted under 1 CPU of load on a 2 CPU pod assert 50 < k8s_utils.cpu_percent() < 60
python/ray/tests/test_advanced_3.py
375
ray
{ "docstring": "Test all the functions in dashboard/k8s_utils.py.\n Also test ray._private.utils.get_num_cpus when running in a K8s pod.\n Files were obtained from within a K8s pod with 2 CPU request, CPU limit\n unset, with 1 CPU of stress applied.\n cpu 2945022 98 3329420 148744854 39522 0 118587 0 0 0\n cpu0 370299 14 413841 18589778 5304 0 15288 0 0 0\n cpu1 378637 10 414414 18589275 5283 0 14731 0 0 0\n cpu2 367328 8 420914 18590974 4844 0 14416 0 0 0\n cpu3 368378 11 423720 18572899 4948 0 14394 0 0 0\n cpu4 369051 13 414615 18607285 4736 0 14383 0 0 0\n cpu5 362958 10 415984 18576655 4590 0 16614 0 0 0\n cpu6 362536 13 414430 18605197 4785 0 14353 0 0 0\n cpu7 365833 15 411499 18612787 5028 0 14405 0 0 0\n intr 1000694027 125 0 0 39 154 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1028 0 2160913 0 2779605 8 0 3981333 3665198 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n ctxt 1574979439\n btime 1615208601\n processes 857411\n procs_running 6\n procs_blocked 0\n softirq 524311775 0 230142964 27143 63542182 0 0 171 74042767 0 156556548\n cpu 2945152 98 3329436 148745483 39522 0 118587 0 0 0\n cpu0 370399 14 413841 18589778 5304 0 15288 0 0 0\n cpu1 378647 10 414415 18589362 5283 0 14731 0 0 0\n cpu2 367329 8 420916 18591067 4844 0 14416 0 0 0\n cpu3 368381 11 423724 18572989 4948 0 14395 0 0 0\n cpu4 369052 13 414618 18607374 4736 0 14383 0 0 0\n cpu5 362968 10 415986 18576741 4590 0 16614 0 0 0\n cpu6 362537 13 414432 18605290 4785 0 14353 0 0 0\n cpu7 365836 15 411502 18612878 5028 0 14405 0 0 0\n intr 1000700905 125 0 0 39 154 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1028 0 2160923 0 2779605 8 0 3981353 3665218 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n ctxt 1574988760\n btime 1615208601\n processes 857411\n procs_running 4\n procs_blocked 0\n softirq 524317451 0 230145523 27143 63542930 0 0 171 74043232 0 156558452\n ", "language": "en", "n_whitespaces": 1369, "n_words": 1258, "vocab_size": 156 }
142
Python
93
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
test_advanced_3.py
131,220
66
220
test_k8s_cpu
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
363
0
29,486
12
1
6
def __getstate__(self) -> Dict[str, Any]: return {} @ExperimentalAPI
rllib/agents/alpha_star/league_builder.py
31
@ExperimentalAPI
ray
{ "docstring": "Returns a state dict, mapping str keys to state variables.\n\n Returns:\n The current state dict of this LeagueBuilder.\n ", "language": "en", "n_whitespaces": 43, "n_words": 18, "vocab_size": 16 }
8
Python
8
0bb82f29b65dca348acf5aa516d21ef3f176a3e1
league_builder.py
147,590
7
16
__getstate__
https://github.com/ray-project/ray.git
[RLlib] AlphaStar polishing (fix logger.info bug). (#22281)
21
1
34,015
6
3
18
def from_euler(cls, angles, seq): if len(angles) != 3: raise ValueError("3 angles must be given.") extrinsic = _check_sequence(seq) i, j, k = seq.lower() q1 = cls.from_axis_angle(_elementary_axis(i), angles[0]) q2 = cls.from_axis_angle(_elementary_axis(j), angles[1]) q3 = cls.from_axis_angle(_elementary_axis(k), angles[2]) if extrinsic: return trigsimp(q3 * q2 * q1) else: return trigsimp(q1 * q2 * q3)
sympy/algebras/quaternion.py
176
sympy
{ "docstring": "Returns quaternion equivalent to Euler angles represented same in\n the sequence defined by `seq`.\n\n Parameters\n ==========\n\n angles : list, tuple or Matrix of 3 numbers\n The Euler angles (in radians).\n seq : string of length 3\n Represents the sequence of rotations.\n For intrinsic rotations, seq but be all lowercase and its elements\n must be from the set `['x', 'y', 'z']`\n For extrinsic rotations, seq but be all uppercase and its elements\n must be from the set `['X', 'Y', 'Z']`\n\n Returns\n =======\n\n Quaternion\n The normalized rotation quaternion calculated from the Euler angles\n in the given sequence.\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy import pi\n >>> q = Quaternion.from_euler([pi/2, 0, 0], 'xyz')\n >>> q\n sqrt(2)/2 + sqrt(2)/2*i + 0*j + 0*k\n\n >>> q = Quaternion.from_euler([0, pi/2, pi] , 'zyz')\n >>> q\n 0 + (-sqrt(2)/2)*i + 0*j + sqrt(2)/2*k\n\n >>> q = Quaternion.from_euler([0, pi/2, pi] , 'ZYZ')\n >>> q\n 0 + sqrt(2)/2*i + 0*j + sqrt(2)/2*k\n\n ", "language": "en", "n_whitespaces": 399, "n_words": 157, "vocab_size": 86 }
49
Python
38
1d8576449e7ab757f13f49a1d33faed602aa88fb
quaternion.py
200,597
12
111
from_euler
https://github.com/sympy/sympy.git
implemented to_euler and from_euler
145
0
49,726
12
1
3
def lookup_pattern(name): return _registered_patterns[name]
python/ray/_private/thirdparty/pathspec/util.py
20
ray
{ "docstring": "\n Lookups a registered pattern factory by name.\n\n *name* (:class:`str`) is the name of the pattern factory.\n\n Returns the registered pattern factory (:class:`~collections.abc.Callable`).\n If no pattern factory is registered, raises :exc:`KeyError`.\n ", "language": "en", "n_whitespaces": 46, "n_words": 30, "vocab_size": 21 }
4
Python
4
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
util.py
130,285
2
11
lookup_pattern
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
10
0
29,210
6
1
7
def save_instance(self): instance = self.form.save() log(instance=instance, action="wagtail.create") return instance
wagtail/admin/views/generic/models.py
48
wagtail
{ "docstring": "\n Called after the form is successfully validated - saves the object to the db\n and returns the new object. Override this to implement custom save logic.\n ", "language": "en", "n_whitespaces": 48, "n_words": 26, "vocab_size": 22 }
9
Python
8
96a0eb0fa0cc0e28bcf5616987d193f6b2fcea82
models.py
77,827
4
27
save_instance
https://github.com/wagtail/wagtail.git
Move logging in generic CreateView and EditView to save_instance() method
37
0
16,712
9
1
10
async def poll_and_get_state(self) -> State: await time_changed(self.hass, 60) await time_changed(self.hass, DEBOUNCE_COOLDOWN) state = self.hass.states.get(self.entity_id) assert state is not None return state
tests/components/homekit_controller/common.py
77
core
{ "docstring": "Trigger a time based poll and return the current entity state.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
21
Python
17
1ded3ac51ebc1915a5026af1998eb119972f6117
common.py
290,620
7
47
poll_and_get_state
https://github.com/home-assistant/core.git
Poll HomeKit Controller locks for state after lock operation (#82058)
63
0
89,734
10
1
5
async def help_test_setup_manual_entity_from_yaml(hass, config): calls = MagicMock()
tests/components/mqtt/test_common.py
25
core
{ "docstring": "Help to test setup from yaml through configuration entry.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
7
Python
7
4894e2e5a43e80a2f64d8f9486c7eb215fcdaa6b
test_common.py
287,415
14
106
help_test_setup_manual_entity_from_yaml
https://github.com/home-assistant/core.git
Refactor common MQTT tests to use modern schema (#77583) * Common tests availability * Common tests attributes * Common tests unique id * Common tests discovery * Common tests encoding * Common tests device info * Common tests entity_id updated * Common tests entity debug info * Common test entity category * Common tests setup reload unload+corrections * Cleanup sweep * Comments from curent change * Cleanup * Remove unused legacy config
13
0
86,608
8
4
8
def random_kernel_graph(n, kernel_integral, kernel_root=None, seed=None): r if kernel_root is None: import scipy as sp import scipy.optimize # call as sp.optimize
networkx/generators/random_graphs.py
47
networkx
{ "docstring": "Returns an random graph based on the specified kernel.\n\n The algorithm chooses each of the $[n(n-1)]/2$ possible edges with\n probability specified by a kernel $\\kappa(x,y)$ [1]_. The kernel\n $\\kappa(x,y)$ must be a symmetric (in $x,y$), non-negative,\n bounded function.\n\n Parameters\n ----------\n n : int\n The number of nodes\n kernel_integral : function\n Function that returns the definite integral of the kernel $\\kappa(x,y)$,\n $F(y,a,b) := \\int_a^b \\kappa(x,y)dx$\n kernel_root: function (optional)\n Function that returns the root $b$ of the equation $F(y,a,b) = r$.\n If None, the root is found using :func:`scipy.optimize.brentq`\n (this requires SciPy).\n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n See :ref:`Randomness<randomness>`.\n\n Notes\n -----\n The kernel is specified through its definite integral which must be\n provided as one of the arguments. If the integral and root of the\n kernel integral can be found in $O(1)$ time then this algorithm runs in\n time $O(n+m)$ where m is the expected number of edges [2]_.\n\n The nodes are set to integers from $0$ to $n-1$.\n\n Examples\n --------\n Generate an Erdős–Rényi random graph $G(n,c/n)$, with kernel\n $\\kappa(x,y)=c$ where $c$ is the mean expected degree.\n\n >>> def integral(u, w, z):\n ... return c * (z - w)\n >>> def root(u, w, r):\n ... return r / c + w\n >>> c = 1\n >>> graph = nx.random_kernel_graph(1000, integral, root)\n\n See Also\n --------\n gnp_random_graph\n expected_degree_graph\n\n References\n ----------\n .. [1] Bollobás, Béla, Janson, S. and Riordan, O.\n \"The phase transition in inhomogeneous random graphs\",\n *Random Structures Algorithms*, 31, 3--122, 2007.\n\n .. [2] Hagberg A, Lemons N (2015),\n \"Fast Generation of Sparse Random Kernel Graphs\".\n PLoS ONE 10(9): e0135177, 2015. doi:10.1371/journal.pone.0135177\n ", "language": "en", "n_whitespaces": 464, "n_words": 266, "vocab_size": 179 }
20
Python
18
2a05ccdb07cff88e56661dee8a9271859354027f
random_graphs.py
176,736
74
143
random_kernel_graph
https://github.com/networkx/networkx.git
Remove redundant py2 numeric conversions (#5661) * Remove redundant float conversion * Remove redundant int conversion * Use integer division Co-authored-by: Miroslav Šedivý <[email protected]>
40
0
42,069
9
6
8
def prompt_for_move(final_file, **move_params): skip_static_hosts = move_params["skipstatichosts"] if move_params["replace"] and not skip_static_hosts: move_file = True elif move_params["auto"] or skip_static_hosts: move_file = False else: prompt = "Do you want to replace your existing hosts file with the newly generated file?" move_file = query_yes_no(prompt) if move_file: move_file = move_hosts_file_into_place(final_file) return move_file # End Prompt the User
updateHostsFile.py
108
hosts
{ "docstring": "\n Prompt the user to move the newly created hosts file to its designated\n location in the OS.\n\n Parameters\n ----------\n final_file : file\n The file object that contains the newly created hosts data.\n move_params : kwargs\n Dictionary providing additional parameters for moving the hosts file\n into place. Currently, those fields are:\n\n 1) auto\n 2) replace\n 3) skipstatichosts\n\n Returns\n -------\n move_file : bool\n Whether or not the final hosts file was moved.\n ", "language": "en", "n_whitespaces": 150, "n_words": 70, "vocab_size": 53 }
53
Python
41
258b10edc90d53c31225962dde6dcc80b0fc9ba9
updateHostsFile.py
123,270
12
60
prompt_for_move
https://github.com/StevenBlack/hosts.git
refactor: more containerization
108
0
27,313
11
1
15
def test_api_fields(self) -> None: expected_fields = set(Stream.API_FIELDS) | {"stream_id"} expected_fields -= {"id"} stream_dict_fields = set(APIStreamDict.__annotations__.keys()) computed_fields = set(["is_announcement_only", "is_default"]) self.assertEqual(stream_dict_fields - computed_fields, expected_fields) expected_fields = set(Subscription.API_FIELDS) subscription_dict_fields = set(APISubscriptionDict.__annotations__.keys()) computed_fields = set( ["in_home_view", "email_address", "stream_weekly_traffic", "subscribers"] ) # `APISubscriptionDict` is a subclass of `APIStreamDict`, therefore having all the # fields in addition to the computed fields and `Subscription.API_FIELDS` that # need to be excluded here. self.assertEqual( subscription_dict_fields - computed_fields - stream_dict_fields, expected_fields, )
zerver/tests/test_subs.py
183
zulip
{ "docstring": "Verify that all the fields from `Stream.API_FIELDS` and `Subscription.API_FIELDS` present\n in `APIStreamDict` and `APISubscriptionDict`, respectively.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 14 }
73
Python
55
44ecd66eaec6533778bdff3fbb31ceb0acc0419a
test_subs.py
83,879
18
104
test_api_fields
https://github.com/zulip/zulip.git
types: Better types for API fields. Signed-off-by: Zixuan James Li <[email protected]>
211
0
17,743
11
1
5
def log(self) -> Logger: return self.app._logger
src/textual/message_pump.py
25
textual
{ "docstring": "Get a logger for this object.\n\n Returns:\n Logger: A logger.\n ", "language": "en", "n_whitespaces": 35, "n_words": 10, "vocab_size": 10 }
6
Python
6
ca9492ac569510ce0a7e5387f81e763a99c7359e
message_pump.py
184,828
7
14
log
https://github.com/Textualize/textual.git
layout docs
20
0
44,823
7
3
15
def fhash(value): fpart = math.modf(value) if fpart[0] == 0.0: return hash(int(fpart[1])) v, e = math.frexp(value) # 2**31 v *= 2147483648.0 # Top 32 bits hipart = int(v) # Next 32 bits v = (v - float(hipart)) * 2147483648.0 x = hipart + int(v) + (e << 15) if x == -1: x = -2 # Convert to C long type return ctypes.c_long(x).value
samtranslator/third_party/py27hash/hash.py
162
serverless-application-model
{ "docstring": "\n Returns a Python 2.7 hash for a float.\n\n Logic ported from the 2.7 Python branch: cpython/Objects/object.c\n Method: long _Py_HashDouble(double v)\n\n Args:\n value: input float\n\n Returns:\n Python 2.7 hash\n ", "language": "en", "n_whitespaces": 93, "n_words": 28, "vocab_size": 22 }
62
Python
42
a5db070f446b7cfebdaa6ad2e3dcf78f6105a272
hash.py
213,059
12
105
fhash
https://github.com/aws/serverless-application-model.git
fix: Py27hash fix (#2182) * Add third party py27hash code * Add Py27UniStr and unit tests * Add py27hash_fix utils and tests * Add to_py27_compatible_template and tests * Apply py27hash fix to wherever it is needed * Apply py27hash fix, all tests pass except api_with_any_method_in_swagger * apply py27hash fix in openapi + run black * remove py27 testing * remove other py27 references * black fixes * fixes/typos * remove py27 from tox.ini * refactoring * third party notice * black * Fix py27hash fix to deal with null events * Fix Py27UniStr repr for unicode literals * black reformat * Update _template_has_api_resource to check data type more defensively * Apply py27Dict in _get_authorizers * Apply Py27Dict to authorizers and gateway responses which will go into swagger * Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class * Rename _convert_to_py27_dict to _convert_to_py27_type * Apply Py27UniStr to path param name * Handle HttpApi resource under to_py27_compatible_template * Fix InvalidDocumentException to not sort different exceptions * black reformat * Remove unnecessary test files Co-authored-by: Wing Fung Lau <[email protected]>
182
0
53,613
12
6
43
def forward(self, x, l2_norm=False): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x.squeeze_(1) # if you torch spec compute it otherwise use the mel spec computed by the AP if self.use_torch_spec: x = self.torch_spec(x) if self.log_input: x = (x + 1e-6).log() x = self.instancenorm(x).unsqueeze(1) x = self.conv1(x) x = self.relu(x) x = self.bn1(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = x.reshape(x.size()[0], -1, x.size()[-1]) w = self.attention(x) if self.encoder_type == "SAP": x = torch.sum(x * w, dim=2) elif self.encoder_type == "ASP": mu = torch.sum(x * w, dim=2) sg = torch.sqrt((torch.sum((x**2) * w, dim=2) - mu**2).clamp(min=1e-5)) x = torch.cat((mu, sg), 1) x = x.view(x.size()[0], -1) x = self.fc(x) if l2_norm: x = torch.nn.functional.normalize(x, p=2, dim=1) return x
TTS/speaker_encoder/models/resnet.py
505
TTS
{ "docstring": "Forward pass of the model.\n\n Args:\n x (Tensor): Raw waveform signal or spectrogram frames. If input is a waveform, `torch_spec` must be `True`\n to compute the spectrogram on-the-fly.\n l2_norm (bool): Whether to L2-normalize the outputs.\n\n Shapes:\n - x: :math:`(N, 1, T_{in})` or :math:`(N, D_{spec}, T_{in})`\n ", "language": "en", "n_whitespaces": 114, "n_words": 45, "vocab_size": 38 }
118
Python
68
127118c6378168e3d36a1e5d19ede777fd20684f
resnet.py
261,852
29
320
forward
https://github.com/coqui-ai/TTS.git
Update TTS.tts formatters (#1228) * Return Dict from tts formatters * Make style
416
0
77,033
20
3
14
def _faces_from_frames(self) -> None: logger.debug("Extracting faces from frames: Number images: %s", len(self.source)) if self.update_source: self._crop_source_faces() self._crop_destination_faces() logger.debug("Extracted faces from frames: %s", {k: len(v) for k, v in self._faces.__dict__.items()})
tools/preview/preview.py
110
faceswap
{ "docstring": " Extract the preview faces from the source frames and apply the requisite padding. ", "language": "en", "n_whitespaces": 14, "n_words": 13, "vocab_size": 11 }
28
Python
24
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
preview.py
101,408
8
65
_faces_from_frames
https://github.com/deepfakes/faceswap.git
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
94
0
20,822
13
1
27
def test_deleted_message(self) -> None: user_profile = self.example_user("hamlet") message = self.get_message(Recipient.PERSONAL, type_id=1) UserMessage.objects.create( user_profile=user_profile, flags=UserMessage.flags.read, message=message, ) missed_message = { "message_id": message.id, "trigger": "private_message", } # Now, delete the message the normal way do_delete_messages(user_profile.realm, [message]) # This mock.patch() should be assertNoLogs once that feature # is added to Python. with mock.patch( "zerver.lib.push_notifications.uses_notification_bouncer" ) as mock_check, mock.patch("logging.error") as mock_logging_error, mock.patch( "zerver.lib.push_notifications.push_notifications_enabled", return_value=True ) as mock_push_notifications: handle_push_notification(user_profile.id, missed_message) mock_push_notifications.assert_called_once() # Check we didn't proceed through and didn't log anything. mock_check.assert_not_called() mock_logging_error.assert_not_called()
zerver/tests/test_push_notifications.py
229
zulip
{ "docstring": "Simulates the race where message is deleted before handling push notifications", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
78
Python
65
b0ce4f1bce8031881addecb1e86073483517f392
test_push_notifications.py
83,262
23
132
test_deleted_message
https://github.com/zulip/zulip.git
docs: Fix many spelling mistakes. Signed-off-by: Anders Kaseorg <[email protected]>
308
0
17,643
11
5
25
def get_abstract_dependencies(reqs, sources=None, parent=None): deps = [] from .requirements import Requirement for req in reqs: if isinstance(req, shims.InstallRequirement): requirement = Requirement.from_line("{0}{1}".format(req.name, req.specifier)) if req.link: requirement.req.link = req.link requirement.markers = req.markers requirement.req.markers = req.markers requirement.extras = req.extras requirement.req.extras = req.extras elif isinstance(req, Requirement): requirement = copy.deepcopy(req) else: requirement = Requirement.from_line(req) dep = AbstractDependency.from_requirement(requirement, parent=parent) deps.append(dep) return deps
pipenv/vendor/requirementslib/models/dependencies.py
238
pipenv
{ "docstring": "Get all abstract dependencies for a given list of requirements.\n\n Given a set of requirements, convert each requirement to an Abstract Dependency.\n\n :param reqs: A list of Requirements\n :type reqs: list[:class:`~requirementslib.models.requirements.Requirement`]\n :param sources: Pipfile-formatted sources, defaults to None\n :param sources: list[dict], optional\n :param parent: The parent of this list of dependencies, defaults to None\n :param parent: :class:`~requirementslib.models.requirements.Requirement`, optional\n :return: A list of Abstract Dependencies\n :rtype: list[:class:`~requirementslib.models.dependency.AbstractDependency`]\n ", "language": "en", "n_whitespaces": 96, "n_words": 66, "vocab_size": 43 }
56
Python
40
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
dependencies.py
22,218
19
149
get_abstract_dependencies
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
225
0
4,264
15
1
12
def special_library_payload(parent_payload, special_type): title = f"{special_type} ({parent_payload.title})" special_library_id = f"{parent_payload.media_content_id}/{special_type}" return BrowseMedia( title=title, media_class=parent_payload.media_class, media_content_id=special_library_id, media_content_type=parent_payload.media_content_type, can_play=False, can_expand=True, children_media_class=parent_payload.children_media_class, )
homeassistant/components/plex/media_browser.py
102
core
{ "docstring": "Create response payload for special library folders.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
20
Python
19
653305b998dd033365576db303b32dd5df3a6c54
media_browser.py
294,023
12
54
special_library_payload
https://github.com/home-assistant/core.git
Support multiple Plex servers in media browser (#68321)
84
0
93,067
9
8
15
def acl_update(consul_url=None, token=None, **kwargs): ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error("No Consul URL found.") ret["message"] = "No Consul URL found." ret["res"] = False return ret if "id" in kwargs: data["ID"] = kwargs["id"] else: ret["message"] = 'Required parameter "id" is missing.' ret["res"] = False return ret if "name" in kwargs: data["Name"] = kwargs["name"] else: raise SaltInvocationError('Required argument "name" is missing.') if "type" in kwargs: data["Type"] = kwargs["type"] if "rules" in kwargs: data["Rules"] = kwargs["rules"] function = "acl/update" res = _query( consul_url=consul_url, token=token, data=data, method="PUT", function=function ) if res["res"]: ret["res"] = True ret["message"] = "ACL {} created.".format(kwargs["name"]) else: ret["res"] = False ret["message"] = "Updating ACL {} failed.".format(kwargs["name"]) return ret
salt/modules/consul.py
390
salt
{ "docstring": "\n Update an ACL token.\n\n :param consul_url: The Consul server URL.\n :param name: Meaningful indicator of the ACL's purpose.\n :param id: Unique identifier for the ACL to update.\n :param type: Type is either client or management. A management\n token is comparable to a root user and has the\n ability to perform any action including creating,\n modifying, and deleting ACLs.\n :param rules: The Consul server URL.\n :return: Boolean & message of success or failure.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' consul.acl_update\n\n ", "language": "en", "n_whitespaces": 166, "n_words": 80, "vocab_size": 63 }
116
Python
63
fb825aa760fa0585a2c8fdafc6e62be8aec8cecf
consul.py
215,759
35
212
acl_update
https://github.com/saltstack/salt.git
[merge jam] Master port 49261 - consul modules (#58101) * add consul states and acl function present/absent * add consul to states doc index * refact/fix consul states * fix doc, fix states * fix name parameter for acl_changes * fixing pylint errors * small changes after review by @rallytime * fix header count * Update consul.py * fix acl_exists description, fix when both id and name are missing * Adding some tests for consul module and consul state module. Some additional fixes in the consul module. * Fixing tests. * Fixing failing tests on Windows. * Adding changelog. * Adding some tests for consul module and consul state module. Some additional fixes in the consul module. * moving tests to pytest. * manual black changes. * One more manual black change. * fixing formatting. Adding versionadded for state module. Co-authored-by: Rémi Jouannet <[email protected]> Co-authored-by: Mike Place <[email protected]> Co-authored-by: Daniel Wozniak <[email protected]> Co-authored-by: Wayne Werner <[email protected]>
313
0
54,153
13
1
13
def _reset(self) -> None: self._reset_slots() self._paused = False self.latest_action = {} self.latest_message = UserUttered.empty() self.latest_bot_utterance = BotUttered.empty() self.followup_action = ACTION_LISTEN_NAME self.active_loop = None
rasa/shared/core/trackers.py
89
rasa
{ "docstring": "Reset tracker to initial state - doesn't delete events though!.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
23
Python
18
e798bf049f036a5865c14d4343ed8a833864aabe
trackers.py
159,567
9
52
_reset
https://github.com/RasaHQ/rasa.git
convert TrackerActiveLoop to a dataclass
79
0
38,339
8
42
63
def resolve_type_hint(hint) -> Any: origin, args = _get_type_hint_origin(hint) excluded_fields = get_override(hint, "exclude_fields", []) if origin is None and is_basic_type(hint, allow_none=False): return build_basic_type(hint) elif origin is None and inspect.isclass(hint) and issubclass(hint, tuple): # a convoluted way to catch NamedTuple. suggestions welcome. if get_type_hints(hint): properties = {k: resolve_type_hint(v) for k, v in get_type_hints(hint).items()} else: properties = {k: build_basic_type(OpenApiTypes.ANY) for k in hint._fields} return build_object_type(properties=properties, required=properties.keys()) elif origin is list or hint is list: return build_array_type( resolve_type_hint(args[0]) if args else build_basic_type(OpenApiTypes.ANY) ) elif origin is tuple: return build_array_type( schema=build_basic_type(args[0]), max_length=len(args), min_length=len(args), ) elif origin is dict or origin is defaultdict: schema = build_basic_type(OpenApiTypes.OBJECT) if args and args[1] is not typing.Any: schema["additionalProperties"] = resolve_type_hint(args[1]) return schema elif origin is set: return build_array_type(resolve_type_hint(args[0])) elif origin is frozenset: return build_array_type(resolve_type_hint(args[0])) elif origin is Literal: # Literal only works for python >= 3.8 despite typing_extensions, because it # behaves slightly different w.r.t. __origin__ schema = {"enum": list(args)} if all(type(args[0]) is type(choice) for choice in args): schema.update(build_basic_type(type(args[0]))) return schema elif inspect.isclass(hint) and issubclass(hint, Enum): schema = {"enum": [item.value for item in hint]} mixin_base_types = [t for t in hint.__mro__ if is_basic_type(t)] if mixin_base_types: schema.update(build_basic_type(mixin_base_types[0])) return schema elif isinstance(hint, _TypedDictMeta): return build_object_type( properties={ k: resolve_type_hint(v) for k, v in get_type_hints(hint).items() if k not in excluded_fields }, description=inspect.cleandoc(hint.__doc__ or ""), required=[h for h in hint.__required_keys__ if h not in excluded_fields], ) elif origin is Union: type_args = [arg for arg in args if arg is not type(None)] # noqa: E721 if len(type_args) > 1: schema = {"oneOf": [resolve_type_hint(arg) for arg in type_args]} else: schema = resolve_type_hint(type_args[0]) if type(None) in args: schema["nullable"] = True return schema elif origin is collections.abc.Iterable: return build_array_type(resolve_type_hint(args[0])) elif isinstance(hint, typing._TypedDictMeta): raise UnableToProceedError("Wrong TypedDict class, please use typing_extensions.TypedDict") else: raise UnableToProceedError(hint)
src/sentry/apidocs/spectacular_ports.py
895
sentry
{ "docstring": "drf-spectacular library method modified as described above", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
284
Python
148
286bf2ae7ecfdd6698d8fb1cd4753f107159d4d2
spectacular_ports.py
86,529
67
569
resolve_type_hint
https://github.com/getsentry/sentry.git
ref: use dict instead of OrderedDict since sentry is >python3.6 (#39695) partially automated (especially the fixtures) also via `\(([^]+), (.*)\),$` -> `\1: \2,`
788
0
18,119
17
1
21
def test_mark_checked_unexpected_exception(self, mock_patch_already_checked, mock_delete_pod): k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", name="test", task_id="task", is_delete_operator_pod=False, ) self.await_pod_mock.side_effect = AirflowException("oops") context = create_context(k) with pytest.raises(AirflowException): k.execute(context=context) mock_patch_already_checked.assert_called_once() mock_delete_pod.assert_not_called()
tests/providers/cncf/kubernetes/operators/test_kubernetes_pod.py
133
airflow
{ "docstring": "If we aren't deleting pods and have an exception, mark it so we don't reattach to it", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 15 }
24
Python
22
c3d883a971a8e4e65ccc774891928daaaa0f4442
test_kubernetes_pod.py
47,750
14
77
test_mark_checked_unexpected_exception
https://github.com/apache/airflow.git
KubernetesPodOperator should patch "already checked" always (#22734) When not configured to delete pods, at end of task execution the current behavior is to patch the pod as "already checked", but only if pod not successful. We should also patch when successful so it isn't "reattached" to after a task clear.
146
0
9,243
10
5
29
def batch_test(num_threads, delay): with mock.patch( "ray.autoscaler._private.aws.node_provider.make_ec2_client" ), mock.patch.object(AWSNodeProvider, "_create_tags", mock_create_tags): provider = AWSNodeProvider( provider_config={"region": "nowhere"}, cluster_name="default" ) provider.batch_counter = 0 provider.tag_update_counter = 0 provider.tag_cache = {str(x): {} for x in range(num_threads)} threads = [] for x in range(num_threads): thread = threading.Thread( target=provider.set_node_tags, args=(str(x), {"foo": "bar"}) ) threads.append(thread) for thread in threads: thread.start() time.sleep(delay) for thread in threads: thread.join() return provider.batch_counter, provider.tag_update_counter
python/ray/tests/aws/test_aws_batch_tag_update.py
256
ray
{ "docstring": "Run AWSNodeProvider.set_node_tags in several threads, with a\n specified delay between thread launches.\n\n Return the number of batches of tag updates and the number of tags\n updated.\n ", "language": "en", "n_whitespaces": 38, "n_words": 26, "vocab_size": 22 }
61
Python
43
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
test_aws_batch_tag_update.py
131,087
22
154
batch_test
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
239
0
29,474
17
9
7
def validate_config(self, config): # Call (base) PPO's config validation function first. # Note that this will not touch or check on the train_batch_size=-1 # setting. super().validate_config(config) # Error if run on Win. if sys.platform in ["win32", "cygwin"]: raise ValueError( "DD-PPO not supported on Win yet! " "Due to usage of torch.distributed." ) # Auto-train_batch_size: Calculate from rollout len and # envs-per-worker. if config["train_batch_size"] == -1: config["train_batch_size"] = ( config["rollout_fragment_length"] * config["num_envs_per_worker"] ) # Users should not define `train_batch_size` directly (always -1). else: raise ValueError( "Set rollout_fragment_length instead of train_batch_size " "for DDPPO." ) # Only supported for PyTorch so far. if config["framework"] != "torch": raise ValueError("Distributed data parallel is only supported for PyTorch") if config["torch_distributed_backend"] not in ("gloo", "mpi", "nccl"): raise ValueError( "Only gloo, mpi, or nccl is supported for " "the backend of PyTorch distributed." ) # `num_gpus` must be 0/None, since all optimization happens on Workers. if config["num_gpus"]: raise ValueError( "When using distributed data parallel, you should set " "num_gpus=0 since all optimization " "is happening on workers. Enable GPUs for workers by setting " "num_gpus_per_worker=1." ) # `batch_mode` must be "truncate_episodes". if config["batch_mode"] != "truncate_episodes": raise ValueError( "Distributed data parallel requires truncate_episodes " "batch mode." ) # DDPPO doesn't support KL penalties like PPO-1. # In order to support KL penalties, DDPPO would need to become # undecentralized, which defeats the purpose of the algorithm. # Users can still tune the entropy coefficient to control the # policy entropy (similar to controlling the KL penalty). if config["kl_coeff"] != 0.0 or config["kl_target"] != 0.0: raise ValueError("DDPPO doesn't support KL penalties like PPO-1")
rllib/agents/ppo/ddppo.py
306
ray
{ "docstring": "Validates the Trainer's config dict.\n\n Args:\n config (TrainerConfigDict): The Trainer's config to check.\n\n Raises:\n ValueError: In case something is wrong with the config.\n ", "language": "en", "n_whitespaces": 66, "n_words": 23, "vocab_size": 19 }
264
Python
168
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ddppo.py
133,790
34
152
validate_config
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
743
0
30,109
12
1
14
def binary_accuracy(y_true, y_pred, threshold=0.5): y_pred = tf.convert_to_tensor(y_pred) threshold = tf.cast(threshold, y_pred.dtype) y_pred = tf.cast(y_pred > threshold, y_pred.dtype) return tf.cast(tf.equal(y_true, y_pred), tf.int8) @keras_export('keras.metrics.categorical_accuracy') @tf.__internal__.dispatch.add_dispatch_support
keras/metrics/metrics.py
121
@keras_export('keras.metrics.categorical_accuracy') @tf.__internal__.dispatch.add_dispatch_support
keras
{ "docstring": "Calculates how often predictions match binary labels.\n\n Standalone usage:\n >>> y_true = [[1], [1], [0], [0]]\n >>> y_pred = [[1], [1], [0], [0]]\n >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)\n >>> assert m.shape == (4,)\n >>> m.numpy()\n array([1., 1., 1., 1.], dtype=float32)\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n threshold: (Optional) Float representing the threshold for deciding whether\n prediction values are 1 or 0.\n\n Returns:\n Binary accuracy values. shape = `[batch_size, d0, .. dN]`\n ", "language": "en", "n_whitespaces": 113, "n_words": 86, "vocab_size": 61 }
23
Python
19
b96f9fdc2d5d0d375809ad9c16608830b01fc59a
metrics.py
268,902
5
66
binary_accuracy
https://github.com/keras-team/keras.git
fix sample_weight for BinAcc
26
1
79,763
9
3
9
def update(self) -> None: self.data.update() if (value := self.data.value) is None: value = STATE_UNKNOWN if self._value_template is not None: value = self._value_template.render_with_possible_json_value( str(value), STATE_UNKNOWN ) self._state = value
homeassistant/components/influxdb/sensor.py
96
core
{ "docstring": "Get the latest data from Influxdb and updates the states.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
28
Python
19
23090cb8a268b3f268aefa8477f30af88bf46051
sensor.py
305,137
10
59
update
https://github.com/home-assistant/core.git
Improve entity type hints [i] (#77529)
111
0
103,929
12
7
16
def get_stack(self, f, t): stack = [] if t and t.tb_frame is f: t = t.tb_next while f is not None: stack.append((f, f.f_lineno)) if f is self.botframe: break f = f.f_back stack.reverse() i = max(0, len(stack) - 1) while t is not None: stack.append((t.tb_frame, t.tb_lineno)) t = t.tb_next if f is None: i = max(0, len(stack) - 1) return stack, i
python3.10.4/Lib/bdb.py
195
XX-Net
{ "docstring": "Return a list of (frame, lineno) in a stack trace and a size.\n\n List starts with original calling frame, if there is one.\n Size may be number of frames above or below f.\n ", "language": "en", "n_whitespaces": 54, "n_words": 33, "vocab_size": 30 }
61
Python
33
8198943edd73a363c266633e1aa5b2a9e9c9f526
bdb.py
221,146
17
124
get_stack
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
216
0
56,241
13
8
22
def _generate_graph6_bytes(G, nodes, header): n = len(G) if n >= 2**36: raise ValueError( "graph6 is only defined if number of nodes is less " "than 2 ** 36" ) if header: yield b">>graph6<<" for d in n_to_data(n): yield str.encode(chr(d + 63)) # This generates the same as `(v in G[u] for u, v in combinations(G, 2))`, # but in "column-major" order instead of "row-major" order. bits = (nodes[j] in G[nodes[i]] for j in range(1, n) for i in range(j)) chunk = list(islice(bits, 6)) while chunk: d = sum(b << 5 - i for i, b in enumerate(chunk)) yield str.encode(chr(d + 63)) chunk = list(islice(bits, 6)) yield b"\n"
networkx/readwrite/graph6.py
237
networkx
{ "docstring": "Yield bytes in the graph6 encoding of a graph.\n\n `G` is an undirected simple graph. `nodes` is the list of nodes for\n which the node-induced subgraph will be encoded; if `nodes` is the\n list of all nodes in the graph, the entire graph will be\n encoded. `header` is a Boolean that specifies whether to generate\n the header ``b'>>graph6<<'`` before the remaining data.\n\n This function generates `bytes` objects in the following order:\n\n 1. the header (if requested),\n 2. the encoding of the number of nodes,\n 3. each character, one-at-a-time, in the encoding of the requested\n node-induced subgraph,\n 4. a newline character.\n\n This function raises :exc:`ValueError` if the graph is too large for\n the graph6 format (that is, greater than ``2 ** 36`` nodes).\n\n ", "language": "en", "n_whitespaces": 167, "n_words": 122, "vocab_size": 77 }
107
Python
75
f6755ffa00211b523c6c0bec5398bc6c3c43c8b1
graph6.py
176,498
17
147
_generate_graph6_bytes
https://github.com/networkx/networkx.git
Update black (#5438) * CI: sync up black dev requirements version with precommit * Run black Co-authored-by: Jarrod Millman <[email protected]>
200
0
41,937
13
4
13
def set_constrained_layout(self, constrained): if constrained is None: constrained = mpl.rcParams['figure.constrained_layout.use'] _constrained = bool(constrained) _parameters = constrained if isinstance(constrained, dict) else {} if _constrained: self.set_layout_engine(ConstrainedLayoutEngine(**_parameters)) self.stale = True
lib/matplotlib/figure.py
96
matplotlib
{ "docstring": "\n Set whether ``constrained_layout`` is used upon drawing. If None,\n :rc:`figure.constrained_layout.use` value will be used.\n\n When providing a dict containing the keys ``w_pad``, ``h_pad``\n the default ``constrained_layout`` paddings will be\n overridden. These pads are in inches and default to 3.0/72.0.\n ``w_pad`` is the width padding and ``h_pad`` is the height padding.\n\n See :doc:`/tutorials/intermediate/constrainedlayout_guide`.\n\n Parameters\n ----------\n constrained : bool or dict or None\n ", "language": "en", "n_whitespaces": 140, "n_words": 61, "vocab_size": 48 }
27
Python
20
ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22
figure.py
107,135
8
58
set_constrained_layout
https://github.com/matplotlib/matplotlib.git
ENH: implement and use base layout_engine for more flexible layout.
91
0
22,600
12
4
13
def decrypt_file(self, file, key): # precondition assert isinstance(file, str) and isinstance(key, int) try: with open(file, "r") as fin: with open("decrypt.out", "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(line, key)) except: return False return True # Tests # crypt = XORCipher() # key = 67 # # test enrcypt # print crypt.encrypt("hallo welt",key) # # test decrypt # print crypt.decrypt(crypt.encrypt("hallo welt",key), key) # # test encrypt_string # print crypt.encrypt_string("hallo welt",key) # # test decrypt_string # print crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key) # if (crypt.encrypt_file("test.txt",key)): # print "encrypt successful" # else: # print "encrypt unsuccessful" # if (crypt.decrypt_file("encrypt.out",key)): # print "decrypt successful" # else: # print "decrypt unsuccessful"
XORcipher/XOR_cipher.py
141
Python
{ "docstring": "\n input: filename (str) and a key (int)\n output: returns true if decrypt process was\n successful otherwise false\n if key not passed the method uses the key by the constructor.\n otherwise key = 1\n ", "language": "en", "n_whitespaces": 76, "n_words": 33, "vocab_size": 26 }
106
Python
60
f0af0c43340763724f139fa68aa1e5a9ffe458b4
XOR_cipher.py
22,541
10
70
decrypt_file
https://github.com/geekcomputers/Python.git
refactor: clean code Signed-off-by: slowy07 <[email protected]>
227
0
4,357
17
5
16
def cast_if_floating_dtype_and_mismatch(targets, outputs): if tf.is_tensor(targets): # There is one target, so output[0] should be the only output. return cast_single_tensor(targets, dtype=outputs[0].dtype) new_targets = [] for target, out in zip(targets, outputs): if isinstance(target, np.ndarray): target = tf.convert_to_tensor(target) if target.dtype != out.dtype: new_targets.append(cast_single_tensor(target, dtype=out.dtype)) else: new_targets.append(target) return new_targets
keras/engine/training_utils_v1.py
155
keras
{ "docstring": "Returns target data tensors using correct datatype.\n\n Checks that each target and output pair are the same datatype. If not, casts\n the target to the output's datatype.\n\n Args:\n targets: tensor or list of targets.\n outputs: tensor or list of outputs.\n\n Returns:\n Targets in appropriate datatype.\n ", "language": "en", "n_whitespaces": 75, "n_words": 45, "vocab_size": 34 }
45
Python
38
84afc5193d38057e2e2badf9c889ea87d80d8fbf
training_utils_v1.py
271,870
12
98
cast_if_floating_dtype_and_mismatch
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
128
0
80,881
15
1
11
def _get_font(self, prop): filenames = _fontManager._find_fonts_by_props(prop) font = get_font(filenames) font.set_size(self.FONT_SCALE, self.DPI) return font
lib/matplotlib/textpath.py
59
matplotlib
{ "docstring": "\n Find the `FT2Font` matching font properties *prop*, with its size set.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
13
Python
11
140257e3ac710450c97587f95532a84a28cc526c
textpath.py
109,212
5
36
_get_font
https://github.com/matplotlib/matplotlib.git
ENH: add font fallback support to svg
48
0
23,478
8
4
26
def call_hm(self, other_args): parser = argparse.ArgumentParser( prog="hm", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-l", "--limit", dest="limit", type=int, help="Display N items", default=10, ) parser.add_argument( "-c", "--category", default="", dest="category", help="Category (e.g., stablecoins). Empty for no category", ) if other_args and not other_args[0][0] == "-": other_args.insert(0, "-c") ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_FIGURES_ALLOWED ) if ns_parser: pycoingecko_view.display_crypto_heatmap( category=ns_parser.category, limit=ns_parser.limit, export=ns_parser.export, )
openbb_terminal/cryptocurrency/overview/overview_controller.py
225
OpenBBTerminal
{ "docstring": "Process hm commandDisplay cryptocurrencies heatmap with daily percentage change [Source: https://coingecko.com]\n Accepts --category or -c to display only coins of a certain category\n (default no category to display all coins ranked by market cap).\n You can look on only top N number of records with --limit.\n ", "language": "en", "n_whitespaces": 90, "n_words": 46, "vocab_size": 39 }
57
Python
50
5bc7bc070ed7c9051b1277d0db21299fd310d42b
overview_controller.py
285,746
37
139
call_hm
https://github.com/OpenBB-finance/OpenBBTerminal.git
Tests over API Best Practices (#2344) * Started new tests * Added special function * Improved dict * Finished new test * Got bad functions to 75 * Got bad func to 73 * Bad func down to 60 * Bad func down to 50 * Bad func down to 35 * Got bad func to 30 * No more bad functions * Added tests * Added fix * Fixed some tests * Fixed some tests * Fixed some tests * Fixed some tests * Added tests to CI * Fixed CI tests * Fixed CI tests * Imrproved CI tests * Fixed reports * Fixed reports * Added stuff * Removed CI * Fixed * Fixed some typing' Co-authored-by: minhhoang1023 <[email protected]>
388
0
85,414
11
5
17
def create_system_audit_entry(transaction_id=None, logger=None, **kwargs): entry = AuditLogEntry(actor_label="Sentry", **kwargs) if entry.event is not None: entry.save_or_write_to_kafka() extra = { "organization_id": entry.organization_id, "object_id": entry.target_object, "entry_id": entry.id, "actor_label": entry.actor_label, } if transaction_id is not None: extra["transaction_id"] = transaction_id if logger: # Only use the api_name for the logger message when the entry # is a real AuditLogEntry record if entry.event is not None: logger.info(audit_log.get(entry.event).api_name, extra=extra) else: logger.info(entry, extra=extra) return entry
src/sentry/utils/audit.py
202
sentry
{ "docstring": "\n Creates an audit log entry for events that are triggered by Sentry's\n systems and do not have an associated Sentry user as the \"actor\".\n ", "language": "en", "n_whitespaces": 34, "n_words": 24, "vocab_size": 23 }
66
Python
46
941184cd24186324fd9f7f304b7f713041834726
audit.py
86,879
18
123
create_system_audit_entry
https://github.com/getsentry/sentry.git
chore(hybrid-cloud): AuditLogEntry is a control silo model now (#39890) In the control silo, creating an audit log entry writes to the db directly, whilst in region silo mode creating an audit log entry will instead push to a new kafka producer that consumes into the control silo asynchronously.
182
0
18,182
15
4
15
def convert_to_svg(dvi_file, extension, page=1): result = dvi_file.with_suffix(".svg") if not result.exists(): commands = [ "dvisvgm", "--pdf" if extension == ".pdf" else "", "-p " + str(page), f'"{dvi_file}"', "-n", "-v 0", "-o " + f'"{result}"', ">", os.devnull, ] os.system(" ".join(commands)) # if the file does not exist now, this means conversion failed if not result.exists(): raise ValueError( f"Your installation does not support converting {dvi_file.suffix} files to SVG." f" Consider updating dvisvgm to at least version 2.4." f" If this does not solve the problem, please refer to our troubleshooting guide at:" f" https://docs.manim.community/en/stable/installation/troubleshooting.html", ) return result
manim/utils/tex_file_writing.py
192
manim
{ "docstring": "Converts a .dvi, .xdv, or .pdf file into an svg using dvisvgm.\n\n Parameters\n ----------\n dvi_file : :class:`Path`\n File name of the input file to be converted.\n extension : :class:`str`\n String containing the file extension and thus indicating the file type, e.g. ``.dvi`` or ``.pdf``\n page : Optional[:class:`int`], optional\n Page to be converted if input file is multi-page.\n\n Returns\n -------\n :class:`Path`\n Path to generated SVG file.\n ", "language": "en", "n_whitespaces": 120, "n_words": 65, "vocab_size": 50 }
94
Python
74
9d1f066d637cb15baea10e6907ab85efff8fb36f
tex_file_writing.py
190,076
23
101
convert_to_svg
https://github.com/ManimCommunity/manim.git
Migrate more `os.path` to `pathlib` (#2980) * Migrate more `os.path` to `pathlib` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix type errors with recent pathlib code * pathlib fixes * more pathlib fixes * remove unused imports introduced by pathlib migration * convert `open()` calls to pathlib * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Migrate tex_file_writing to pathlib * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * converted more old code to pathlib, and fixed a bug in module_ops * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix test failures * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix test failures * Apply suggestions from code review Co-authored-by: Benjamin Hackl <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Benjamin Hackl <[email protected]>
290
0
46,312
13
1
2
def group(self): return self["group"]
packages/python/plotly/plotly/graph_objs/_frame.py
22
plotly.py
{ "docstring": "\n An identifier that specifies the group to which the frame\n belongs, used by animate to select a subset of frames.\n\n The 'group' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 115, "n_words": 47, "vocab_size": 36 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_frame.py
226,746
2
11
group
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,419
7
4
12
def _match_long_opt(self, opt): try: return super()._match_long_opt(opt) except optparse.AmbiguousOptionError as e: if len(set(self._long_opt[p] for p in e.possibilities)) == 1: return e.possibilities[0] raise
yt_dlp/options.py
91
yt-dlp
{ "docstring": "Improve ambigious argument resolution by comparing option objects instead of argument strings", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
21
Python
20
db2e129ca0c11de84d57b2298dffd5d87e852518
options.py
162,533
7
56
_match_long_opt
https://github.com/yt-dlp/yt-dlp.git
[options] Better ambiguous option resolution Eg: `--write-auto` no longer results in > ambiguous option: --write-auto (--write-auto-subs, --write-automatic-subs?)
90
0
39,211
15
3
7
def global_policy(): if _global_policy is None: if base_layer_utils.v2_dtype_behavior_enabled(): return Policy(backend.floatx()) else: return Policy("_infer") return _global_policy
keras/mixed_precision/policy.py
64
keras
{ "docstring": "Returns the global dtype policy.\n\n The global policy is the default `tf.keras.mixed_precision.Policy` used for\n layers, if no policy is passed to the layer constructor. If no policy has been\n set with `keras.mixed_precision.set_global_policy`, this will return a policy\n constructed from `tf.keras.backend.floatx()` (floatx defaults to float32).\n\n >>> tf.keras.mixed_precision.global_policy()\n <Policy \"float32\">\n >>> tf.keras.layers.Dense(10).dtype_policy # Defaults to the global policy\n <Policy \"float32\">\n\n If TensorFlow 2 behavior has been disabled with\n `tf.compat.v1.disable_v2_behavior()`, this will instead return a special\n \"_infer\" policy which infers the dtype from the dtype of the first input the\n first time the layer is called. This behavior matches the behavior that\n existed in TensorFlow 1.\n\n See `tf.keras.mixed_precision.Policy` for more information on policies.\n\n Returns:\n The global Policy.\n ", "language": "en", "n_whitespaces": 168, "n_words": 114, "vocab_size": 70 }
15
Python
11
84afc5193d38057e2e2badf9c889ea87d80d8fbf
policy.py
275,142
7
35
global_policy
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
60
0
81,317
13
4
10
def __eq__(self, other): ans = True SIZE = self.size() if SIZE == other.size(): for i in range(SIZE): if self.__components[i] != other.component(i): ans = False break else: ans = False return ans
linear-algebra-python/src/lib.py
101
Python
{ "docstring": "\n returns true if the vectors are equal otherwise false.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
31
Python
22
f0af0c43340763724f139fa68aa1e5a9ffe458b4
lib.py
22,659
11
61
__eq__
https://github.com/geekcomputers/Python.git
refactor: clean code Signed-off-by: slowy07 <[email protected]>
148
0
4,388
12
4
31
def fit(self, X, y=None): # large sparse data is not supported for 32bit platforms because # _document_frequency uses np.bincount which works on arrays of # dtype NPY_INTP which is int32 for 32bit platforms. See #20923 X = self._validate_data( X, accept_sparse=("csr", "csc"), accept_large_sparse=not _IS_32BIT ) if not sp.issparse(X): X = sp.csr_matrix(X) dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64 if self.use_idf: n_samples, n_features = X.shape df = _document_frequency(X) df = df.astype(dtype, copy=False) # perform idf smoothing if required df += int(self.smooth_idf) n_samples += int(self.smooth_idf) # log+1 instead of log makes sure terms with zero idf don't get # suppressed entirely. idf = np.log(n_samples / df) + 1 self._idf_diag = sp.diags( idf, offsets=0, shape=(n_features, n_features), format="csr", dtype=dtype, ) return self
sklearn/feature_extraction/text.py
249
scikit-learn
{ "docstring": "Learn the idf vector (global term weights).\n\n Parameters\n ----------\n X : sparse matrix of shape n_samples, n_features)\n A matrix of term/token counts.\n\n y : None\n This parameter is not needed to compute tf-idf.\n\n Returns\n -------\n self : object\n Fitted transformer.\n ", "language": "en", "n_whitespaces": 129, "n_words": 40, "vocab_size": 36 }
119
Python
87
f1d3417b086550be670cbfbb5b3c1760ac99203f
text.py
259,040
22
156
fit
https://github.com/scikit-learn/scikit-learn.git
MNT Drops Python 3.7 in CI, wheel building, and docs (#22617) * MNT Drops Python 3.7 * MNT Bump NumPy and SciPy * FIX Fix build * FIX Bump versions improved * DOC Fixes numpy version [pypy] * BLD [pypy] [icc-build] * Update docs * MAINT use scipy.optimize.LinearConstraint in test * MAINT scipy 1.1.0 related code clean-up * scipy>=1.3.2 in pyproject.toml's build deps * [cd build] * DOC Adds comment about pypy * MAINT remove _astype_copy_false * FIX Update check for python version in setup.py Co-authored-by: Olivier Grisel <[email protected]>
407
0
75,536
12
2
7
def get_penalty_details(against_loan): penalty_details = frappe.db.sql( , (against_loan, against_loan), ) if penalty_details: return penalty_details[0][0], flt(penalty_details[0][1]) else: return None, 0
erpnext/loan_management/doctype/loan_repayment/loan_repayment.py
77
erpnext
{ "docstring": "\n\t\tSELECT posting_date, (penalty_amount - total_penalty_paid) as pending_penalty_amount\n\t\tFROM `tabLoan Repayment` where posting_date >= (SELECT MAX(posting_date) from `tabLoan Repayment`\n\t\twhere against_loan = %s) and docstatus = 1 and against_loan = %s\n\t", "language": "en", "n_whitespaces": 27, "n_words": 30, "vocab_size": 23 }
18
Python
17
494bd9ef78313436f0424b918f200dab8fc7c20b
loan_repayment.py
66,329
13
50
get_penalty_details
https://github.com/frappe/erpnext.git
style: format code with black
9
0
14,167
12
9
25
def polarity_scores(self, text): # convert emojis to their textual descriptions text_no_emoji = "" prev_space = True for chr in text: if chr in self.emojis: # get the textual description description = self.emojis[chr] if not prev_space: text_no_emoji += " " text_no_emoji += description prev_space = False else: text_no_emoji += chr prev_space = chr == " " text = text_no_emoji.strip() sentitext = SentiText(text) sentiments = [] words_and_emoticons = sentitext.words_and_emoticons for i, item in enumerate(words_and_emoticons): valence = 0 # check for vader_lexicon words that may be used as modifiers or negations if item.lower() in BOOSTER_DICT: sentiments.append(valence) continue if ( i < len(words_and_emoticons) - 1 and item.lower() == "kind" and words_and_emoticons[i + 1].lower() == "of" ): sentiments.append(valence) continue sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments) sentiments = self._but_check(words_and_emoticons, sentiments) valence_dict = self.score_valence(sentiments, text) return valence_dict
build/pyinstaller/vaderSentiment/vaderSentiment.py
309
OpenBBTerminal
{ "docstring": "\n Return a float for sentiment strength based on the input text.\n Positive values are positive valence, negative value are negative\n valence.\n ", "language": "en", "n_whitespaces": 50, "n_words": 21, "vocab_size": 19 }
131
Python
82
ab4de1dd70fba866930150e440a03e461a6ca6a8
vaderSentiment.py
283,210
33
186
polarity_scores
https://github.com/OpenBB-finance/OpenBBTerminal.git
Create a packaged app bundle with Pyinstaller (#1525) * Add dashboard widget assets * Add ipywidgets and ipyflex to project * Add currencies dashboard notebook * Update docs and docstrings * Add pyinstaller to project deps * Add pyinstaller artifacts to gitignore * Fix linter errors in terminal.py * Update cspell hook and action with a pyinstaller specific word * Add pyinstaller specfile and artifacts * Add splashscreen image * Add app icon * adding splash screen support to terminal.spec and terminal.py * Restore the conda env build files * Sync deps * Add border to the splashscreen image * Clean up terminal launcher * Add support for default feature flags in packages apps * Fix types and linting * Add splashscreen management to app bootup * Check prediction feature flag when entering crypto/pred * Update pyinstaller spec file * fix .spec file to work for splash and icon - removed the ".." * Allows to export when using installer (#1568) * fix export for packaged apps * fix filename * Git : replace commit_hash when it is set in config_terminal * Add update of the git commit hash in gtff default during build * Add packaged app name and feature flag to logs * Add platform specific icon assignment * Add macOS build assets * Add tensorflow to hidden imports * Move LOGGING_COMMIT_HASH to gtff * Adding files/folders needed to .spec and pyinstaller folder. This will make certain commands work again. * Linting * Workflow : ignore ./build/pyinstaller from codespell * Workflow : exclude ./build/pyinstaller from flake8 * Poetry + Workflow : add types-six * Pyinstaller : remove property_cached, user_agent and vaderSentiment * Revert "Pyinstaller : remove property_cached, user_agent and vaderSentiment" This reverts commit dbb3e2b81086f97819ebd21457148c7160a4d703. * Clean up local paths in specfile * Validate deps have correct Jinja version (they do) * Fix logging commit hash to be set correctly for the logger to see it Co-authored-by: Andrew <[email protected]> Co-authored-by: didierlopes.eth <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]>
539
0
84,470
14
2
10
def list_of_ips(self): # Defer draining call queue until we get the ip address result = [None] * len(self.list_of_block_partitions) for idx, partition in enumerate(self.list_of_block_partitions): partition.drain_call_queue() result[idx] = partition._ip_cache return result
modin/core/execution/dask/implementations/pandas_on_dask/partitioning/virtual_partition.py
75
modin
{ "docstring": "\n Get the IPs holding the physical objects composing this partition.\n\n Returns\n -------\n List\n A list of IPs as ``distributed.Future`` or str.\n ", "language": "en", "n_whitespaces": 68, "n_words": 21, "vocab_size": 19 }
29
Python
27
9bf8d57ca44e22fd69b0abc55793cf60c199ab4d
virtual_partition.py
154,155
6
45
list_of_ips
https://github.com/modin-project/modin.git
FIX-#4676: drain sub-virtual-partition call queues. (#4695) Signed-off-by: mvashishtha <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]>
86
0
35,815
10
2
8
def set_status(self, status): self.status = status if status in JobResultStatusChoices.TERMINAL_STATE_CHOICES: self.completed = timezone.now()
netbox/extras/models/models.py
49
netbox
{ "docstring": "\n Helper method to change the status of the job result. If the target status is terminal, the completion\n time is also set.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 17 }
13
Python
11
0bcc59a1e99065c1c0a143983fed4d0828d744f4
models.py
266,140
4
29
set_status
https://github.com/netbox-community/netbox.git
#8366: Add started field to JobResult
45
0
78,305
10
3
10
def to_text(value, errors='strict'): # type: (t.AnyStr, str) -> str if isinstance(value, bytes): return value.decode(ENCODING, errors) if isinstance(value, str): return value raise Exception('value is not bytes or text: %s' % type(value))
test/lib/ansible_test/_internal/encoding.py
79
ansible
{ "docstring": "Return the given value as text decoded using UTF-8 if not already text.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
30
Python
27
86779cc90376ea70bafa7044b12ce5132409fd63
encoding.py
267,380
6
47
to_text
https://github.com/ansible/ansible.git
ansible-test - Code cleanup. This helps prepare for a future pylint upgrade.
57
0
78,870
10
2
5
def _is_lr_warming_up(self): return ( self.warmup_scheduler is not None and self._number_training_updates < self.warmup_updates )
parlai/nn/lr_scheduler.py
38
ParlAI
{ "docstring": "\n Check if we're warming up the learning rate.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
13
Python
13
9d77adb86f707967b88d93964d8056a8bd5c84ec
lr_scheduler.py
194,798
5
23
_is_lr_warming_up
https://github.com/facebookresearch/ParlAI.git
Warmup updates bug for LR < 1 (#4384) * revert bug * relax restrictions * even more relaxed :/
56
0
47,089
9
1
6
def is_same_loop(self) -> bool: return get_or_create_event_loop() == self.router._event_loop
python/ray/serve/handle.py
32
ray
{ "docstring": "Whether the caller's asyncio loop is the same loop for handle.\n\n This is only useful for async handles.\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 14 }
8
Python
8
784e66b1f2265aeb9cb9a4e2404a8ac274abce3b
handle.py
136,465
6
18
is_same_loop
https://github.com/ray-project/ray.git
[all_tests][python] Remove calling of get_event_loop from python version >= 3.10 (#29285) get_event_loop is deprecated in 3.10. This PR removes its invocation with python >= 3.10 by introducing a proxy function get_or_create_event_loop in utils.py. More details please see the function comments. In the long run - we should refactor the event based code by either: using asyncio.run as much as possible after deprecating python 3.6 creating and managing the event loops explicitly across threads and different contexts. This PR only serves as a mitigation for not calling the deprecated function. Signed-off-by: rickyyx <[email protected]> Co-authored-by: Chen Shen <[email protected]>
22
0
30,926
8
1
20
def test_computed_list_display_localization(self): self.client.force_login(self.superuser) event = Event.objects.create(date=datetime.date.today()) response = self.client.get(reverse("admin:admin_changelist_event_changelist")) self.assertContains(response, formats.localize(event.date)) self.assertNotContains(response, str(event.date))
tests/admin_changelist/tests.py
124
django
{ "docstring": "\n Regression test for #13196: output of functions should be localized\n in the changelist.\n ", "language": "en", "n_whitespaces": 36, "n_words": 13, "vocab_size": 13 }
13
Python
12
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,001
6
75
test_computed_list_display_localization
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
55
0
51,830
12
5
14
def apply_transparency(self): if self.mode != "P" or "transparency" not in self.info: return from . import ImagePalette palette = self.getpalette("RGBA") transparency = self.info["transparency"] if isinstance(transparency, bytes): for i, alpha in enumerate(transparency): palette[i * 4 + 3] = alpha else: palette[transparency * 4 + 3] = 0 self.palette = ImagePalette.ImagePalette("RGBA", bytes(palette)) self.palette.dirty = 1 del self.info["transparency"]
src/PIL/Image.py
186
Pillow
{ "docstring": "\n If a P mode image has a \"transparency\" key in the info dictionary,\n remove the key and apply the transparency to the palette instead.\n ", "language": "en", "n_whitespaces": 46, "n_words": 24, "vocab_size": 19 }
54
Python
41
11be1631433f252b816802aef1a3cd109bd308c7
Image.py
243,004
14
110
apply_transparency
https://github.com/python-pillow/Pillow.git
Added apply_transparency()
172
0
69,952
13
6
26
def mark_expected_failures_and_skips(self): # Only load unittest if we're actually testing. from unittest import expectedFailure, skip for test_name in self.connection.features.django_test_expected_failures: test_case_name, _, test_method_name = test_name.rpartition('.') test_app = test_name.split('.')[0] # Importing a test app that isn't installed raises RuntimeError. if test_app in settings.INSTALLED_APPS: test_case = import_string(test_case_name) test_method = getattr(test_case, test_method_name) setattr(test_case, test_method_name, expectedFailure(test_method)) for reason, tests in self.connection.features.django_test_skips.items(): for test_name in tests: test_case_name, _, test_method_name = test_name.rpartition('.') test_app = test_name.split('.')[0] # Importing a test app that isn't installed raises RuntimeError. if test_app in settings.INSTALLED_APPS: test_case = import_string(test_case_name) test_method = getattr(test_case, test_method_name) setattr(test_case, test_method_name, skip(reason)(test_method))
django/db/backends/base/creation.py
256
django
{ "docstring": "\n Mark tests in Django's test suite which are expected failures on this\n database and test which should be skipped on this database.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 18 }
92
Python
48
890bfa368c33d6ae19fe45cf1eed7e2e8d63160e
creation.py
203,151
17
158
mark_expected_failures_and_skips
https://github.com/django/django.git
Refs #20349 -- Avoided loading testing libraries when not needed.
344
0
50,238
16
1
4
def images_found(self) -> int: return self._images_found
scripts/fsmedia.py
22
faceswap
{ "docstring": "int: The number of frames that exist in the video file, or the folder of images. ", "language": "en", "n_whitespaces": 16, "n_words": 16, "vocab_size": 14 }
6
Python
6
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
fsmedia.py
101,394
3
12
images_found
https://github.com/deepfakes/faceswap.git
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
20
0
20,809
6
4
7
def constrain_encoding(self) -> None: accept_encoding = self.headers.get("accept-encoding") if accept_encoding: self.headers["accept-encoding"] = ", ".join( e for e in {"gzip", "identity", "deflate", "br", "zstd"} if e in accept_encoding )
mitmproxy/http.py
95
mitmproxy
{ "docstring": "\n Limits the permissible Accept-Encoding values, based on what we can decode appropriately.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
27
Python
21
b3587b52b25077f68116b9852b041d33e7fc6601
http.py
251,372
11
52
constrain_encoding
https://github.com/mitmproxy/mitmproxy.git
make it black!
115
0
73,701
13
2
15
def __reduce__(self): if not hasattr(self, "model"): # Fields are sometimes used without attaching them to models (for # example in aggregation). In this case give back a plain field # instance. The code below will create a new empty instance of # class self.__class__, then update its dict with self.__dict__ # values - so, this is very close to normal pickle. state = self.__dict__.copy() # The _get_default cached_property can't be pickled due to lambda # usage. state.pop("_get_default", None) return _empty, (self.__class__,), state return _load_field, ( self.model._meta.app_label, self.model._meta.object_name, self.name, )
django/db/models/fields/__init__.py
115
django
{ "docstring": "\n Pickling should return the model._meta.fields instance of the field,\n not a new copy of that field. So, use the app registry to load the\n model and then the field back.\n ", "language": "en", "n_whitespaces": 59, "n_words": 30, "vocab_size": 25 }
89
Python
76
9c19aff7c7561e3a82978a272ecdaad40dda5c00
__init__.py
205,534
10
68
__reduce__
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
260
0
51,144
11
1
2
def meta(self): return self["meta"]
packages/python/plotly/plotly/graph_objs/_bar.py
22
plotly.py
{ "docstring": "\n Assigns extra meta information associated with this trace that\n can be used in various text attributes. Attributes such as\n trace `name`, graph, axis and colorbar `title.text`, annotation\n `text` `rangeselector`, `updatemenues` and `sliders` `label`\n text all support `meta`. To access the trace `meta` values in\n an attribute in the same trace, simply use `%{meta[i]}` where\n `i` is the index or key of the `meta` item in question. To\n access trace `meta` in layout attributes, use\n `%{data[n[.meta[i]}` where `i` is the index or key of the\n `meta` and `n` is the trace index.\n\n The 'meta' property accepts values of any type\n\n Returns\n -------\n Any|numpy.ndarray\n ", "language": "en", "n_whitespaces": 208, "n_words": 102, "vocab_size": 69 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_bar.py
226,150
2
11
meta
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
57,823
7
7
26
def process(self, in_queue, out_queue): logger.debug("Starting convert process. (in_queue: %s, out_queue: %s)", in_queue, out_queue) log_once = False while True: items = in_queue.get() if items == "EOF": logger.debug("EOF Received") logger.debug("Patch queue finished") # Signal EOF to other processes in pool logger.debug("Putting EOF back to in_queue") in_queue.put(items) break if isinstance(items, dict): items = [items] for item in items: logger.trace("Patch queue got: '%s'", item["filename"]) try: image = self._patch_image(item) except Exception as err: # pylint: disable=broad-except # Log error and output original frame logger.error("Failed to convert image: '%s'. Reason: %s", item["filename"], str(err)) image = item["image"] loglevel = logger.trace if log_once else logger.warning loglevel("Convert error traceback:", exc_info=True) log_once = True # UNCOMMENT THIS CODE BLOCK TO PRINT TRACEBACK ERRORS import sys ; import traceback exc_info = sys.exc_info() ; traceback.print_exception(*exc_info) logger.trace("Out queue put: %s", item["filename"]) out_queue.put((item["filename"], image)) logger.debug("Completed convert process")
lib/convert.py
350
faceswap
{ "docstring": " Main convert process.\n\n Takes items from the in queue, runs the relevant adjustments, patches faces to final frame\n and outputs patched frame to the out queue.\n\n Parameters\n ----------\n in_queue: :class:`queue.Queue`\n The output from :class:`scripts.convert.Predictor`. Contains detected faces from the\n Faceswap model as well as the frame to be patched.\n out_queue: :class:`queue.Queue`\n The queue to place patched frames into for writing by one of Faceswap's\n :mod:`plugins.convert.writer` plugins.\n ", "language": "en", "n_whitespaces": 160, "n_words": 66, "vocab_size": 50 }
133
Python
102
3d8e674adc88b8f4cc206ebad6fb5b600e38fe14
convert.py
100,763
30
201
process
https://github.com/deepfakes/faceswap.git
convert - Fix affine borders
635
0
20,216
16
2
18
def traceParseAction(f):
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
49
"""Decorator for debugging parse actions. When the parse action is called, this decorator will print ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``. When the parse action completes, the decorator will print ``"<<"`` followed by the returned value, or any exception that the parse action raised. Example::followed by the returned valueor any exception that the parse action
transferlearning
{ "docstring": "Decorator for debugging parse actions.\n\n When the parse action is called, this decorator will print\n ``\">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)\"``.\n When the parse action completes, the decorator will print\n ``\"<<\"`` followed by the returned value, or any exception that the parse action raised.\n\n Example::\n\n wd = Word(alphas)\n", "language": "en", "n_whitespaces": 68, "n_words": 47, "vocab_size": 34 }
2
Python
2
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
pyparsing.py
63,443
8
29
traceParseAction
https://github.com/jindongwang/transferlearning.git
upd; format
5
3
13,305
8
3
9
def _pixels_and_segments_fit_in_music_mode(self) -> bool: pixels_per_segment = self._device.pixels_per_segment segments = self._device.segments assert pixels_per_segment is not None assert segments is not None return bool( pixels_per_segment <= MUSIC_PIXELS_PER_SEGMENT_MAX and segments <= MUSIC_SEGMENTS_MAX and pixels_per_segment * segments <= MUSIC_PIXELS_MAX )
homeassistant/components/flux_led/number.py
78
core
{ "docstring": "Check if the base pixel and segment settings will fit for music mode.\n\n If they fit, they do not need to be configured.\n ", "language": "en", "n_whitespaces": 37, "n_words": 23, "vocab_size": 22 }
36
Python
22
e222e1b6f05b630bef5aed73e307ca5072b6f286
number.py
308,880
14
49
_pixels_and_segments_fit_in_music_mode
https://github.com/home-assistant/core.git
Add device configuration entities to flux_led (#62786) Co-authored-by: Chris Talkington <[email protected]>
118
0
107,608
10
2
10
def _generate_sharding_spec(world_size): placements = [f"rank:{idx}/cuda:{idx}" for idx in range(world_size)] # Shard the first nn module's weight by dim 0. # (nn.Linear transposes the weight internally so dim 0 actually means column) colwise_spec = ChunkShardingSpec( dim=0, placements=placements, ) # Shard the second nn module's weight by dim 1. rowwise_spec = ChunkShardingSpec( dim=1, placements=placements, ) # The result from the second nn.linear layer needs aggregation by dim 0. output_spec = ChunkShardingSpec( dim=0, placements=placements, ) return colwise_spec, rowwise_spec, output_spec
distributed/sharded_tensor/tensor_parallel.py
110
examples
{ "docstring": "\n We first need to create a sharding spec for our sharding work.\n\n For now, we only support sharding on one dimension. So we use\n ``ChunkShardingSpec`` to chunk the size of the given sharding\n dim to equally split length. The behavior is similar to\n `torch.chunk`.\n\n We also need to create the output sharding spec for the second nn\n because we need to aggregate(reduce) the partial result after the\n second nn layer. So we have a new sharding spec to represent that\n how we store the aggregation result in a new sharded tensor.\n ", "language": "en", "n_whitespaces": 122, "n_words": 91, "vocab_size": 56 }
76
Python
47
9ba53df5a19131e6926027b2e73aaa77cec17272
tensor_parallel.py
82,839
15
64
_generate_sharding_spec
https://github.com/pytorch/examples.git
Gh/fduwjj/2/base (#1007) * test ghstack [ghstack-poisoned] * Update base for Update on "[PT-D] Add an example for Megatron-LM style example" [ghstack-poisoned] * Update base for Update on "[PT-D] Add an example for Megatron-LM style example" [ghstack-poisoned] * Update base for Update on "[PT-D] Add an example for Megatron-LM style example" [ghstack-poisoned] * Update base for Update on "[PT-D] Add an example for Megatron-LM style example" [ghstack-poisoned] * [PT-D] Add an example for Megatron-LM style example (#1006) * [PT-D] Add an example for Megatron-LM style example [ghstack-poisoned] * Update on "[PT-D] Add an example for Megatron-LM style example" [ghstack-poisoned]
157
0
17,549
10
4
23
def execute(): frappe.reload_doc("buying", "doctype", "purchase_order") frappe.reload_doc("buying", "doctype", "supplier_quotation") frappe.reload_doc("selling", "doctype", "sales_order") frappe.reload_doc("selling", "doctype", "quotation") frappe.reload_doc("stock", "doctype", "delivery_note") frappe.reload_doc("stock", "doctype", "purchase_receipt") frappe.reload_doc("accounts", "doctype", "sales_invoice") frappe.reload_doc("accounts", "doctype", "purchase_invoice") doctypes = [ "Sales Order", "Sales Invoice", "Delivery Note", "Purchase Order", "Purchase Invoice", "Purchase Receipt", "Quotation", "Supplier Quotation", ] for doctype in doctypes: total_qty = frappe.db.sql( .format( doctype ), as_dict=True, ) # Query to update total_qty might become too big, Update in batches # batch_size is chosen arbitrarily, Don't try too hard to reason about it batch_size = 100000 for i in range(0, len(total_qty), batch_size): batch_transactions = total_qty[i : i + batch_size] # UPDATE with CASE for some reason cannot use PRIMARY INDEX, # causing all rows to be examined, leading to a very slow update # UPDATE with WHERE clause uses PRIMARY INDEX, but will lead to too many queries # INSERT with ON DUPLICATE KEY UPDATE uses PRIMARY INDEX # and can perform multiple updates per query # This is probably never used anywhere else as of now, but should be values = [] for d in batch_transactions: values.append("({0}, {1})".format(frappe.db.escape(d.parent), d.qty)) conditions = ",".join(values) frappe.db.sql( .format( doctype, conditions ) )
erpnext/patches/v11_0/update_total_qty_field.py
388
erpnext
{ "docstring": "\n\t\t\tSELECT\n\t\t\t\tparent, SUM(qty) as qty\n\t\t\tFROM\n\t\t\t\t`tab{0} Item`\n\t\t\twhere parenttype = '{0}'\n\t\t\tGROUP BY parent\n\t\t\n\t\t\t\tINSERT INTO `tab{}` (name, total_qty) VALUES {}\n\t\t\t\tON DUPLICATE KEY UPDATE name = VALUES(name), total_qty = VALUES(total_qty)\n\t\t\t", "language": "en", "n_whitespaces": 24, "n_words": 32, "vocab_size": 30 }
188
Python
126
494bd9ef78313436f0424b918f200dab8fc7c20b
update_total_qty_field.py
66,581
48
213
execute
https://github.com/frappe/erpnext.git
style: format code with black
142
0
14,229
18
26
44
def prim_mst_edges(G, minimum, weight="weight", keys=True, data=True, ignore_nan=False): is_multigraph = G.is_multigraph() push = heappush pop = heappop nodes = set(G) c = count() sign = 1 if minimum else -1 while nodes: u = nodes.pop() frontier = [] visited = {u} if is_multigraph: for v, keydict in G.adj[u].items(): for k, d in keydict.items(): wt = d.get(weight, 1) * sign if isnan(wt): if ignore_nan: continue msg = f"NaN found as an edge weight. Edge {(u, v, k, d)}" raise ValueError(msg) push(frontier, (wt, next(c), u, v, k, d)) else: for v, d in G.adj[u].items(): wt = d.get(weight, 1) * sign if isnan(wt): if ignore_nan: continue msg = f"NaN found as an edge weight. Edge {(u, v, d)}" raise ValueError(msg) push(frontier, (wt, next(c), u, v, d)) while nodes and frontier: if is_multigraph: W, _, u, v, k, d = pop(frontier) else: W, _, u, v, d = pop(frontier) if v in visited or v not in nodes: continue # Multigraphs need to handle edge keys in addition to edge data. if is_multigraph and keys: if data: yield u, v, k, d else: yield u, v, k else: if data: yield u, v, d else: yield u, v # update frontier visited.add(v) nodes.discard(v) if is_multigraph: for w, keydict in G.adj[v].items(): if w in visited: continue for k2, d2 in keydict.items(): new_weight = d2.get(weight, 1) * sign push(frontier, (new_weight, next(c), v, w, k2, d2)) else: for w, d2 in G.adj[v].items(): if w in visited: continue new_weight = d2.get(weight, 1) * sign push(frontier, (new_weight, next(c), v, w, d2)) ALGORITHMS = { "boruvka": boruvka_mst_edges, "borůvka": boruvka_mst_edges, "kruskal": kruskal_mst_edges, "prim": prim_mst_edges, } @not_implemented_for("directed")
networkx/algorithms/tree/mst.py
762
@not_implemented_for("directed")
networkx
{ "docstring": "Iterate over edges of Prim's algorithm min/max spanning tree.\n\n Parameters\n ----------\n G : NetworkX Graph\n The graph holding the tree of interest.\n\n minimum : bool (default: True)\n Find the minimum (True) or maximum (False) spanning tree.\n\n weight : string (default: 'weight')\n The name of the edge attribute holding the edge weights.\n\n keys : bool (default: True)\n If `G` is a multigraph, `keys` controls whether edge keys ar yielded.\n Otherwise `keys` is ignored.\n\n data : bool (default: True)\n Flag for whether to yield edge attribute dicts.\n If True, yield edges `(u, v, d)`, where `d` is the attribute dict.\n If False, yield edges `(u, v)`.\n\n ignore_nan : bool (default: False)\n If a NaN is found as an edge weight normally an exception is raised.\n If `ignore_nan is True` then that edge is ignored instead.\n\n ", "language": "en", "n_whitespaces": 230, "n_words": 133, "vocab_size": 80 }
264
Python
110
83cc6cd2811dbc6d20cfb3de809f21153b30e14e
mst.py
176,520
62
453
prim_mst_edges
https://github.com/networkx/networkx.git
Optimize prim for mst (#5455) Co-authored-by: Dylan <[email protected]>
1,173
1
41,941
20
1
9
def __add__(self, other): rank = (self.rank() + other) % self.cardinality rv = self.unrank_lex(self.size, rank) rv._rank = rank return rv
sympy/combinatorics/permutations.py
68
sympy
{ "docstring": "Return permutation that is other higher in rank than self.\n\n The rank is the lexicographical rank, with the identity permutation\n having rank of 0.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> I = Permutation([0, 1, 2, 3])\n >>> a = Permutation([2, 1, 3, 0])\n >>> I + a.rank() == a\n True\n\n See Also\n ========\n\n __sub__, inversion_vector\n\n ", "language": "en", "n_whitespaces": 148, "n_words": 57, "vocab_size": 44 }
19
Python
15
498015021131af4dbb07eb110e5badaba8250c7b
permutations.py
196,160
5
42
__add__
https://github.com/sympy/sympy.git
Updated import locations
54
0
47,660
11
1
6
def to_integral_exact(self, a): a = _convert_other(a, raiseit=True) return a.to_integral_exact(context=self)
python3.10.4/Lib/_pydecimal.py
44
XX-Net
{ "docstring": "Rounds to an integer.\n\n When the operand has a negative exponent, the result is the same\n as using the quantize() operation using the given operand as the\n left-hand-operand, 1E+0 as the right-hand-operand, and the precision\n of the operand as the precision setting; Inexact and Rounded flags\n are allowed in this operation. The rounding mode is taken from the\n context.\n\n >>> ExtendedContext.to_integral_exact(Decimal('2.1'))\n Decimal('2')\n >>> ExtendedContext.to_integral_exact(Decimal('100'))\n Decimal('100')\n >>> ExtendedContext.to_integral_exact(Decimal('100.0'))\n Decimal('100')\n >>> ExtendedContext.to_integral_exact(Decimal('101.5'))\n Decimal('102')\n >>> ExtendedContext.to_integral_exact(Decimal('-101.5'))\n Decimal('-102')\n >>> ExtendedContext.to_integral_exact(Decimal('10E+5'))\n Decimal('1.0E+6')\n >>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))\n Decimal('7.89E+77')\n >>> ExtendedContext.to_integral_exact(Decimal('-Inf'))\n Decimal('-Infinity')\n ", "language": "en", "n_whitespaces": 245, "n_words": 83, "vocab_size": 56 }
9
Python
9
8198943edd73a363c266633e1aa5b2a9e9c9f526
_pydecimal.py
219,712
3
27
to_integral_exact
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
30
0
55,735
9
1
9
def test_invalid_parameters_in_stacking(): stacker = StackingClassifier(estimators=[]) html_output = estimator_html_repr(stacker) assert html.escape(str(stacker)) in html_output
sklearn/utils/tests/test_estimator_html_repr.py
56
scikit-learn
{ "docstring": "Invalidate stacking configuration uses default repr.\n\n Non-regression test for #24009.\n ", "language": "en", "n_whitespaces": 16, "n_words": 10, "vocab_size": 10 }
12
Python
10
84c6421a9067de7d1b54b7a6d8e21ce38e1f0eca
test_estimator_html_repr.py
260,606
4
32
test_invalid_parameters_in_stacking
https://github.com/scikit-learn/scikit-learn.git
FIX Show a HTML repr for meta-estimatosr with invalid parameters (#24015) Co-authored-by: Jérémie du Boisberranger <[email protected]>
24
0
76,372
10
1
8
def generate_new_id() -> str: return base58.b58encode(uuid.uuid4().bytes).decode()
lib/streamlit/session_data.py
43
streamlit
{ "docstring": "Randomly generate an ID representing this session's execution.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
6
Python
6
704eab3478cf69847825b23dabf15813a8ac9fa2
session_data.py
118,576
3
24
generate_new_id
https://github.com/streamlit/streamlit.git
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
12
0
26,302
12
1
5
def test_event_match_body(self) -> None: # if the key is `content.body`, the pattern matches substrings. # non-wildcards should match condition = { "kind": "event_match", "key": "content.body", "pattern": "foobaz", } self._assert_matches( condition, {"body": "aaa FoobaZ zzz"}, "patterns should match and be case-insensitive", ) self._assert_not_matches( condition, {"body": "aa xFoobaZ yy"}, "pattern should only match at word boundaries", ) self._assert_not_matches( condition, {"body": "aa foobazx yy"}, "pattern should only match at word boundaries", ) # wildcards should match condition = { "kind": "event_match", "key": "content.body", "pattern": "f?o*baz", } self._assert_matches( condition, {"body": "aaa FoobarbaZ zzz"}, "* should match string and pattern should be case-insensitive", ) self._assert_matches( condition, {"body": "aa foobaz yy"}, "* should match 0 characters" ) self._assert_not_matches( condition, {"body": "aa fobbaz yy"}, "? should not match 0 characters" ) self._assert_not_matches( condition, {"body": "aa fiiobaz yy"}, "? should not match 2 characters" ) self._assert_not_matches( condition, {"body": "aa xfooxbaz yy"}, "pattern should only match at word boundaries", ) self._assert_not_matches( condition, {"body": "aa fooxbazx yy"}, "pattern should only match at word boundaries", ) # test backslashes condition = { "kind": "event_match", "key": "content.body", "pattern": r"f\oobaz", } self._assert_matches( condition, {"body": r"F\oobaz"}, "backslash should match itself", ) condition = { "kind": "event_match", "key": "content.body", "pattern": r"f\?obaz", } self._assert_matches( condition, {"body": r"F\oobaz"}, r"? after \ should match any character", )
tests/push/test_push_rule_evaluator.py
450
synapse
{ "docstring": "Check that event_match conditions on content.body work as expected", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
209
Python
74
9d21ecf7ceab55bc19c4457b8b07401b0b1623a7
test_push_rule_evaluator.py
247,781
71
239
test_event_match_body
https://github.com/matrix-org/synapse.git
Add type hints to tests files. (#12256)
883
0
71,916
10
1
4
def layout(self) -> Layout: return self.styles.layout
src/textual/view.py
25
textual
{ "docstring": "Convenience property for accessing ``view.styles.layout``.\n\n Returns: The Layout associated with this view\n ", "language": "en", "n_whitespaces": 26, "n_words": 12, "vocab_size": 12 }
6
Python
6
9c2a125c2412c5d011307a80f4552cf9824cc022
view.py
182,034
6
14
layout
https://github.com/Textualize/textual.git
Ensuring we get and set Layout as set in view.styles everywhere
20
0
43,737
7
1
4
def _zeropad(s, padsize): return s + b"\x00" * (-len(s) % padsize)
scapy/libs/rfc3961.py
40
scapy
{ "docstring": "\n Return s padded with 0 bytes to a multiple of padsize.\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 11 }
11
Python
11
b26f2283379d3bba48d575c1fffd1c3cdeaf64c2
rfc3961.py
209,457
2
23
_zeropad
https://github.com/secdev/scapy.git
Kerberos update (#3688) * Kerberos over TCP * Kerberos: add FAST & PKCA * Many user-friendly improvements * RFC3961 crypto * Summary, Sessions, Examples, Bugs * More tests, _n_fold edge case * Ignore potatoe (kerberos tests) from codespell
17
0
52,681
12