complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
1
2
def itemdoubleclick(self): return self["itemdoubleclick"]
packages/python/plotly/plotly/graph_objs/layout/_legend.py
22
plotly.py
{ "docstring": "\n Determines the behavior on legend item double-click. \"toggle\"\n toggles the visibility of the item clicked on the graph.\n \"toggleothers\" makes the clicked item the sole visible item on\n the graph. False disables legend item double-click\n interactions.\n\n The 'itemdoubleclick' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['toggle', 'toggleothers', False]\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 155, "n_words": 60, "vocab_size": 42 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_legend.py
231,584
2
11
itemdoubleclick
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
63,028
7
1
24
def _generate_legacy_events_context_id_query() -> Select: # This can be removed once we no longer have event_ids in the states table return ( select( *EVENT_COLUMNS, literal(value=None, type_=sqlalchemy.String).label("shared_data"), States.state, States.entity_id, States.attributes, StateAttributes.shared_attrs, ) .outerjoin(States, (Events.event_id == States.event_id)) .where(States.last_updated == States.last_changed) .where(_not_continuous_entity_matcher()) .outerjoin( StateAttributes, (States.attributes_id == StateAttributes.attributes_id) ) )
homeassistant/components/logbook/__init__.py
151
core
{ "docstring": "Generate a legacy events context id query that also joins states.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
45
Python
41
26177bd080b4eb6d11cfd9fbdd158be36f4983d4
__init__.py
300,319
18
98
_generate_legacy_events_context_id_query
https://github.com/home-assistant/core.git
Convert logbook to use lambda_stmt (#71624)
183
0
99,183
22
14
20
def get_mailbox_list(value): mailbox_list = MailboxList() while value and value[0] != ';': try: token, value = get_mailbox(value) mailbox_list.append(token) except errors.HeaderParseError: leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value or value[0] in ',;': mailbox_list.append(leader) mailbox_list.defects.append(errors.ObsoleteHeaderDefect( "empty element in mailbox-list")) else: token, value = get_invalid_mailbox(value, ',;') if leader is not None: token[:0] = [leader] mailbox_list.append(token) mailbox_list.defects.append(errors.InvalidHeaderDefect( "invalid mailbox in mailbox-list")) elif value[0] == ',': mailbox_list.defects.append(errors.ObsoleteHeaderDefect( "empty element in mailbox-list")) else: token, value = get_invalid_mailbox(value, ',;') if leader is not None: token[:0] = [leader] mailbox_list.append(token) mailbox_list.defects.append(errors.InvalidHeaderDefect( "invalid mailbox in mailbox-list")) if value and value[0] not in ',;': # Crap after mailbox; treat it as an invalid mailbox. # The mailbox info will still be available. mailbox = mailbox_list[-1] mailbox.token_type = 'invalid-mailbox' token, value = get_invalid_mailbox(value, ',;') mailbox.extend(token) mailbox_list.defects.append(errors.InvalidHeaderDefect( "invalid mailbox in mailbox-list")) if value and value[0] == ',': mailbox_list.append(ListSeparator) value = value[1:] return mailbox_list, value
python3.10.4/Lib/email/_header_value_parser.py
482
XX-Net
{ "docstring": " mailbox-list = (mailbox *(\",\" mailbox)) / obs-mbox-list\n obs-mbox-list = *([CFWS] \",\") mailbox *(\",\" [mailbox / CFWS])\n\n For this routine we go outside the formal grammar in order to improve error\n handling. We recognize the end of the mailbox list only at the end of the\n value or at a ';' (the group terminator). This is so that we can turn\n invalid mailboxes into InvalidMailbox tokens and continue parsing any\n remaining valid mailboxes. We also allow all mailbox entries to be null,\n and this condition is handled appropriately at a higher level.\n\n ", "language": "en", "n_whitespaces": 123, "n_words": 91, "vocab_size": 70 }
147
Python
69
8198943edd73a363c266633e1aa5b2a9e9c9f526
_header_value_parser.py
223,548
42
283
get_mailbox_list
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
727
0
56,965
20
1
13
def _clone_config(self, config): old_path = os.path.abspath(get_testdata(config)) new_path = os.path.join(self._temp_directory, config) shutil.copy2(old_path, new_path) return new_path
tests/functional/eks/test_kubeconfig.py
72
aws-cli
{ "docstring": "\n Copies the testdata named config into the temp directory,\n Returns the new path\n\n :param config: The name of the testdata to copy\n :type config: str\n ", "language": "en", "n_whitespaces": 61, "n_words": 25, "vocab_size": 20 }
14
Python
12
1a6b498657ec5dd29ddf4f6b240c6fc0c5d88f7a
test_kubeconfig.py
189,164
6
45
_clone_config
https://github.com/aws/aws-cli.git
Deprecate Kubernetes client API version v1alpha1 Kubernetes has deprecated v1alpha1, v1beta1 has been available since Kubernetes v1.11 (kubernetes/kubernetes#64482), and EKS currently supports Kubernetes versions v1.16 through v1.21. This is a breaking change for clients running versions v1.10 and older, which haven't been supported by EKS since September 2019. "aws eks get-token" now respects the KUBERNETES_EXEC_INFO environment variable and conservatively falls back to v1alpha1, which is supported by Kubernetes versions 1.10 through 1.22 (released upstream August 2021, to be released by EKS in Q4 2021). It also now supports "v1beta1" and "v1". "aws eks update-kubeconfig" now writes "v1beta1" in the kubeconfig which will be supported by Kubernetes until 1.29 (aproximately December 2023). At or around that date, we can change the default version written to kubeconfigs to "v1" Signed-off-by: Micah Hausler <[email protected]>
69
0
46,004
10
2
44
def test_clear_task_instances_dr_state(self, state, last_scheduling, dag_maker): with dag_maker( 'test_clear_task_instances', start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + datetime.timedelta(days=10), ) as dag: EmptyOperator(task_id='0') EmptyOperator(task_id='1', retries=2) dr = dag_maker.create_dagrun( state=State.RUNNING, run_type=DagRunType.SCHEDULED, ) ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id) dr.last_scheduling_decision = DEFAULT_DATE ti0.state = TaskInstanceState.SUCCESS ti1.state = TaskInstanceState.SUCCESS session = dag_maker.session session.flush() # we use order_by(task_id) here because for the test DAG structure of ours # this is equivalent to topological sort. It would not work in general case # but it works for our case because we specifically constructed test DAGS # in the way that those two sort methods are equivalent qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all() clear_task_instances(qry, session, dag_run_state=state, dag=dag) session.flush() session.refresh(dr) assert dr.state == state assert dr.start_date is None if state == State.QUEUED else dr.start_date assert dr.last_scheduling_decision == last_scheduling
tests/models/test_cleartasks.py
326
airflow
{ "docstring": "Test that DR state is set to None after clear.\n And that DR.last_scheduling_decision is handled OK.\n start_date is also set to None\n ", "language": "en", "n_whitespaces": 43, "n_words": 22, "vocab_size": 16 }
124
Python
95
49e336ae0302b386a2f47269a6d13988382d975f
test_cleartasks.py
47,548
25
204
test_clear_task_instances_dr_state
https://github.com/apache/airflow.git
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
355
0
9,154
15
3
14
def downgrade(): conn = op.get_bind() if conn.dialect.name == "sqlite": # in sqlite TEXT and STRING column types are the same return if conn.dialect.name == "mysql": op.alter_column( 'connection', 'description', existing_type=sa.Text(5000), type_=sa.String(length=5000), existing_nullable=True, ) else: # postgres does not allow size modifier for text type op.alter_column( 'connection', 'description', existing_type=sa.Text(), type_=sa.String(length=5000), existing_nullable=True, )
airflow/migrations/versions/64a7d6477aae_fix_description_field_in_connection_to_.py
165
airflow
{ "docstring": "Unapply Fix description field in ``connection`` to be ``text``", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
50
Python
40
69f6f9e01b6df76c3c8fa266d460324163957887
64a7d6477aae_fix_description_field_in_connection_to_.py
45,466
20
98
downgrade
https://github.com/apache/airflow.git
Autogenerate migration reference doc (#21601) * document airflow version in each alembic migration module and use this to autogen the doc * update each migration module to have the same description used in migration ref (so it can be used in autogen)
224
0
8,593
14
3
8
def _canonicalize_dtype(x64_enabled, dtype): try: dtype = np.dtype(dtype) except TypeError as e: raise TypeError(f'dtype {dtype!r} not understood') from e if x64_enabled: return dtype else: return _dtype_to_32bit_dtype.get(dtype, dtype)
jax/_src/dtypes.py
83
jax
{ "docstring": "Convert from a dtype to a canonical dtype based on config.x64_enabled.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 9 }
26
Python
24
329de7c9cc1b77f9caacf2163a77a9d8496c379b
dtypes.py
119,122
9
47
_canonicalize_dtype
https://github.com/google/jax.git
Only use config.x64_enabled as the memo cache key for canonicalize_dtype, not any other fields. This saves the time to repeatedly build a tuple as a cache key. Reduces the time for CustomLinearSolveTest.test_custom_linear_solve_pytree on my workstation from 110s to 85s. PiperOrigin-RevId: 422632700
43
0
26,545
12
4
7
def copyDataFiles(): for included_datafile in getIncludedDataFiles(): # TODO: directories should be resolved to files. if ( not isinstance(included_datafile, (IncludedDataFile)) or included_datafile.needsCopy() ): _handleDataFile( included_datafile, )
nuitka/freezer/IncludedDataFiles.py
62
Nuitka
{ "docstring": "Copy the data files needed for standalone distribution.\n\n Notes:\n This is for data files only, not DLLs or even extension modules,\n those must be registered as entry points, and would not go through\n necessary handling if provided like this.\n ", "language": "en", "n_whitespaces": 66, "n_words": 39, "vocab_size": 35 }
25
Python
25
abfb99b0a05dd76d2ecc6ebc20732a271857c6c8
IncludedDataFiles.py
178,914
9
36
copyDataFiles
https://github.com/Nuitka/Nuitka.git
Plugins: Massive cleanup of data file handling * Move data file handling out of standalone only, allowing support for other modes as well. * Attach logger and tags to data file objects.
111
0
42,859
13
7
19
def _check_pyarrow_version(): global _VERSION_VALIDATED if not _VERSION_VALIDATED: if os.environ.get(RAY_DISABLE_PYARROW_VERSION_CHECK, "0") == "1": _VERSION_VALIDATED = True return try: import pyarrow except ModuleNotFoundError: # pyarrow not installed, short-circuit. return import pkg_resources if not hasattr(pyarrow, "__version__"): logger.warning( "You are using the 'pyarrow' module, but the exact version is unknown " "(possibly carried as an internal component by another module). Please " f"make sure you are using pyarrow >= {MIN_PYARROW_VERSION}, < " f"{MAX_PYARROW_VERSION} to ensure compatibility with Ray Datasets. " "If you want to disable this pyarrow version check, set the " f"environment variable {RAY_DISABLE_PYARROW_VERSION_CHECK}=1." ) else: version = pyarrow.__version__ if ( pkg_resources.packaging.version.parse(version) < pkg_resources.packaging.version.parse(MIN_PYARROW_VERSION) ) or ( pkg_resources.packaging.version.parse(version) >= pkg_resources.packaging.version.parse(MAX_PYARROW_VERSION) ): raise ImportError( f"Datasets requires pyarrow >= {MIN_PYARROW_VERSION}, < " f"{MAX_PYARROW_VERSION}, but {version} is installed. Reinstall " f'with `pip install -U "pyarrow<{MAX_PYARROW_VERSION}"`. ' "If you want to disable this pyarrow version check, set the " f"environment variable {RAY_DISABLE_PYARROW_VERSION_CHECK}=1." ) _VERSION_VALIDATED = True
python/ray/data/_internal/util.py
266
ray
{ "docstring": "Check that pyarrow's version is within the supported bounds.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
149
Python
93
ee0fbf9d43dfa05fdf90ad0515b2671cac16a92b
util.py
134,777
37
134
_check_pyarrow_version
https://github.com/ray-project/ray.git
[Datasets] Add upper bound to pyarrow version check. (#29674) We previously weren't checking that the 7.0.0 pyarrow upper bound was being respected. This PR adds this upper bound check.
603
0
30,413
17
1
12
def cogview_attention(self, attention_scores, alpha=32): scaled_attention_scores = attention_scores / alpha max_value = scaled_attention_scores.amax(dim=(-1)).unsqueeze(-1) new_attention_scores = (scaled_attention_scores - max_value) * alpha return nn.Softmax(dim=-1)(new_attention_scores)
src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
94
transformers
{ "docstring": "\n https://arxiv.org/abs/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation\n (PB-Relax). A replacement of the original nn.Softmax(dim=-1)(attention_scores). Seems the new attention_probs\n will result in a slower speed and a little bias. Can use torch.allclose(standard_attention_probs,\n cogview_attention_probs, atol=1e-08) for comparison. The smaller atol (e.g., 1e-08), the better.\n ", "language": "en", "n_whitespaces": 80, "n_words": 44, "vocab_size": 40 }
21
Python
18
31ee80d55673f32c0f5d50936f371e661b74b21a
modeling_layoutlmv3.py
38,773
5
58
cogview_attention
https://github.com/huggingface/transformers.git
Add LayoutLMv3 (#17060) * Make forward pass work * More improvements * Remove unused imports * Remove timm dependency * Improve loss calculation of token classifier * Fix most tests * Add docs * Add model integration test * Make all tests pass * Add LayoutLMv3FeatureExtractor * Improve integration test + make fixup * Add example script * Fix style * Add LayoutLMv3Processor * Fix style * Add option to add visual labels * Make more tokenizer tests pass * Fix more tests * Make more tests pass * Fix bug and improve docs * Fix import of processors * Improve docstrings * Fix toctree and improve docs * Fix auto tokenizer * Move tests to model folder * Move tests to model folder * change default behavior add_prefix_space * add prefix space for fast * add_prefix_spcae set to True for Fast * no space before `unique_no_split` token * add test to hightligh special treatment of added tokens * fix `test_batch_encode_dynamic_overflowing` by building a long enough example * fix `test_full_tokenizer` with add_prefix_token * Fix tokenizer integration test * Make the code more readable * Add tests for LayoutLMv3Processor * Fix style * Add model to README and update init * Apply suggestions from code review * Replace asserts by value errors * Add suggestion by @ducviet00 * Add model to doc tests * Simplify script * Improve README * a step ahead to fix * Update pair_input_test * Make all tokenizer tests pass - phew * Make style * Add LayoutLMv3 to CI job * Fix auto mapping * Fix CI job name * Make all processor tests pass * Make tests of LayoutLMv2 and LayoutXLM consistent * Add copied from statements to fast tokenizer * Add copied from statements to slow tokenizer * Remove add_visual_labels attribute * Fix tests * Add link to notebooks * Improve docs of LayoutLMv3Processor * Fix reference to section Co-authored-by: SaulLu <[email protected]> Co-authored-by: Niels Rogge <[email protected]>
56
0
7,030
13
3
8
def check_planarity(G, counterexample=False): planarity_state = LRPlanarity(G) embedding = planarity_state.lr_planarity() if embedding is None: # graph is not planar if counterexample: return False, get_counterexample(G) else: return False, None else: # graph is planar return True, embedding
networkx/algorithms/planarity.py
86
networkx
{ "docstring": "Check if a graph is planar and return a counterexample or an embedding.\n\n A graph is planar iff it can be drawn in a plane without\n any edge intersections.\n\n Parameters\n ----------\n G : NetworkX graph\n counterexample : bool\n A Kuratowski subgraph (to proof non planarity) is only returned if set\n to true.\n\n Returns\n -------\n (is_planar, certificate) : (bool, NetworkX graph) tuple\n is_planar is true if the graph is planar.\n If the graph is planar `certificate` is a PlanarEmbedding\n otherwise it is a Kuratowski subgraph.\n\n Examples\n --------\n >>> G = nx.Graph([(0, 1), (0, 2)])\n >>> is_planar, P = nx.check_planarity(G)\n >>> print(is_planar)\n True\n\n When `G` is planar, a `PlanarEmbedding` instance is returned:\n\n >>> P.get_data()\n {0: [1, 2], 1: [0], 2: [0]}\n\n Notes\n -----\n A (combinatorial) embedding consists of cyclic orderings of the incident\n edges at each vertex. Given such an embedding there are multiple approaches\n discussed in literature to drawing the graph (subject to various\n constraints, e.g. integer coordinates), see e.g. [2].\n\n The planarity check algorithm and extraction of the combinatorial embedding\n is based on the Left-Right Planarity Test [1].\n\n A counterexample is only generated if the corresponding parameter is set,\n because the complexity of the counterexample generation is higher.\n\n References\n ----------\n .. [1] Ulrik Brandes:\n The Left-Right Planarity Test\n 2009\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208\n .. [2] Takao Nishizeki, Md Saidur Rahman:\n Planar graph drawing\n Lecture Notes Series on Computing: Volume 12\n 2004\n ", "language": "en", "n_whitespaces": 404, "n_words": 228, "vocab_size": 154 }
35
Python
22
1af7d49d70869081e5cb64d17165652f1b26c57b
planarity.py
176,544
10
50
check_planarity
https://github.com/networkx/networkx.git
Improve documentation of PlanarEmbedding class (#5523) * Improve documentation of PlanarEmbedding * Fix type * Make suggested changes * rst formatting nits. * Update networkx/algorithms/planarity.py Co-authored-by: Dan Schult <[email protected]> * Run black for formatting Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Dan Schult <[email protected]>
107
0
41,953
12
11
9
def draw(self, renderer): if not self.get_visible(): return self._recompute_transform() width = self.convert_xunits(self.width) height = self.convert_yunits(self.height) # If the width and height of ellipse are not equal, take into account # stretching when calculating angles to draw between
lib/matplotlib/patches.py
74
matplotlib
{ "docstring": "\n Draw the arc to the given *renderer*.\n\n Notes\n -----\n Ellipses are normally drawn using an approximation that uses\n eight cubic Bezier splines. The error of this approximation\n is 1.89818e-6, according to this unverified source:\n\n Lancaster, Don. *Approximating a Circle or an Ellipse Using\n Four Bezier Cubic Splines.*\n\n https://www.tinaja.com/glib/ellipse4.pdf\n\n There is a use case where very large ellipses must be drawn\n with very high accuracy, and it is too expensive to render the\n entire ellipse with enough segments (either splines or line\n segments). Therefore, in the case where either radius of the\n ellipse is large enough that the error of the spline\n approximation will be visible (greater than one pixel offset\n from the ideal), a different technique is used.\n\n In that case, only the visible parts of the ellipse are drawn,\n with each visible arc using a fixed number of spline segments\n (8). The algorithm proceeds as follows:\n\n 1. The points where the ellipse intersects the axes (or figure)\n bounding box are located. (This is done by performing an inverse\n transformation on the bbox such that it is relative to the unit\n circle -- this makes the intersection calculation much easier than\n doing rotated ellipse intersection directly.)\n\n This uses the \"line intersecting a circle\" algorithm from:\n\n Vince, John. *Geometry for Computer Graphics: Formulae,\n Examples & Proofs.* London: Springer-Verlag, 2005.\n\n 2. The angles of each of the intersection points are calculated.\n\n 3. Proceeding counterclockwise starting in the positive\n x-direction, each of the visible arc-segments between the\n pairs of vertices are drawn using the Bezier arc\n approximation technique implemented in `.Path.arc`.\n ", "language": "en", "n_whitespaces": 541, "n_words": 258, "vocab_size": 160 }
36
Python
31
cf995d1304bfa7f660e7158b5121a46e54f869f2
patches.py
108,776
50
404
draw
https://github.com/matplotlib/matplotlib.git
Remove ineffective exclusion of Arcs without parent Axes. The `if not hasattr(self, 'axes'): raise RuntimeError(...)` check was ineffectual, as artists now always have an Axes attribute, which can just be None for some artists. In fact, small Arcs are drawn just fine without a parent Axes; e.g. ``` from pylab import * from matplotlib.patches import * fig = figure() fig.add_artist(Ellipse((.2, .2), .1, .3, angle=45)) # for comparison fig.add_artist(Arc((.2, .2), .1, .3, angle=45, theta1=0, theta2=45)) ``` works just fine. Remove the check, and adjust the docs accordingly. On the other hand, large arcs *did* previously fail, but that occurred a bit further down, when computing `transforms.BboxTransformTo(self.axes.bbox)` (`self.axes` is None --> AttributeError). Fix that by using the figure bbox in that case (as the point is to limit the drawing to the unclipped area, which is the whole figure for Arcs without a parent Axes).
96
0
23,337
9
2
10
def _async_setup_scanner_watchdog(self) -> None: self._start_time = self._last_detection = MONOTONIC_TIME() if not self._cancel_watchdog: self._cancel_watchdog = async_track_time_interval( self.hass, self._async_scanner_watchdog, SCANNER_WATCHDOG_INTERVAL )
homeassistant/components/bluetooth/base_scanner.py
67
core
{ "docstring": "If something has restarted or updated, we need to restart the scanner.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
19
Python
17
0e2ebfe5c45716250280186234123f170e3bd08c
base_scanner.py
297,460
7
41
_async_setup_scanner_watchdog
https://github.com/home-assistant/core.git
Move bluetooth watchdog into the scanner base class (#83888)
77
0
96,429
11
1
3
def test_image_crafter_index(encoder_doc_array, tmpdir):
tests/unit/helloworld/multimodal/test_executors.py
15
jina
{ "docstring": "In this test, we input one ``DocumentArray`` with one ``Document``,\n and the `craft` method in the ``ImageCrafter`` returns chunks.\n In the ``ImageCrafter``, we filtered out all the modalities and only kept `image/jpeg`.\n So the 2 chunks should left only 1 chunk.\n And the blob value of the ``Document`` is not empty once we finished crafting since\n we converted image uri/datauri to blob.\n ", "language": "en", "n_whitespaces": 80, "n_words": 62, "vocab_size": 49 }
3
Python
3
933415bfa1f9eb89f935037014dfed816eb9815d
test_executors.py
10,208
5
49
test_image_crafter_index
https://github.com/jina-ai/jina.git
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
6
0
1,818
6
2
2
def get_rename_function(mapper):
pandas/core/common.py
13
pandas
{ "docstring": "\n Returns a function that will map names/labels, dependent if mapper\n is a dict, Series or just a function.\n ", "language": "en", "n_whitespaces": 28, "n_words": 18, "vocab_size": 16 }
2
Python
2
830130a543619fe945365fdea5e6e5877fe81c6f
common.py
167,165
3
25
get_rename_function
https://github.com/pandas-dev/pandas.git
TYP: Series.quantile (#47304) * TYP: Series.quantile * common.py
5
0
39,944
6
3
24
def _call_boxer(self, candc_out, verbose=False): f = None try: fd, temp_filename = tempfile.mkstemp( prefix="boxer-", suffix=".in", text=True ) f = os.fdopen(fd, "w") f.write(candc_out.decode("utf-8")) finally: if f: f.close() args = [ "--box", "false", "--semantics", "drs", #'--flat', 'false', # removed from boxer "--resolve", ["false", "true"][self._resolve], "--elimeq", ["false", "true"][self._elimeq], "--format", "prolog", "--instantiate", "true", "--input", temp_filename, ] stdout = self._call(None, self._boxer_bin, args, verbose) os.remove(temp_filename) return stdout
nltk/sem/boxer.py
242
nltk
{ "docstring": "\n Call the ``boxer`` binary with the given input.\n\n :param candc_out: str output from C&C parser\n :return: stdout\n ", "language": "en", "n_whitespaces": 46, "n_words": 17, "vocab_size": 16 }
60
Python
53
c6d9e0529eecce2c0742ca47135b28e5316611e0
boxer.py
42,474
30
142
_call_boxer
https://github.com/nltk/nltk.git
Update boxer.py Used to have this py2 to py3 error TypeError: write() argument must be str, not bytes
373
0
7,559
12
1
7
def unique_id() -> str: return binascii.hexlify(os.urandom(16)).decode("utf-8")
certbot-apache/certbot_apache/_internal/apache_util.py
44
certbot
{ "docstring": " Returns an unique id to be used as a VirtualHost identifier", "language": "en", "n_whitespaces": 11, "n_words": 11, "vocab_size": 11 }
6
Python
6
7d9e9a49005de7961e84d2a7c608db57dbab3046
apache_util.py
186,613
3
24
unique_id
https://github.com/certbot/certbot.git
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <[email protected]>
12
0
45,525
11
3
9
def add_auto_adjustable_area(self, use_axes, pad=0.1, adjust_dirs=None): if adjust_dirs is None: adjust_dirs = ["left", "right", "bottom", "top"] for d in adjust_dirs: self.append_size(d, Size._AxesDecorationsSize(use_axes, d) + pad)
lib/mpl_toolkits/axes_grid1/axes_divider.py
87
matplotlib
{ "docstring": "\n Add auto-adjustable padding around *use_axes* to take their decorations\n (title, labels, ticks, ticklabels) into account during layout.\n\n Parameters\n ----------\n use_axes : `~.axes.Axes` or list of `~.axes.Axes`\n The Axes whose decorations are taken into account.\n pad : float, optional\n Additional padding in inches.\n adjust_dirs : list of {\"left\", \"right\", \"bottom\", \"top\"}, optional\n The sides where padding is added; defaults to all four sides.\n ", "language": "en", "n_whitespaces": 152, "n_words": 62, "vocab_size": 50 }
24
Python
23
eb12b029ffe2f110540a4338684d1a729d1ddfc5
axes_divider.py
107,683
5
56
add_auto_adjustable_area
https://github.com/matplotlib/matplotlib.git
Document, test, and simplify impl. of auto_adjustable_area. Document behavior of auto_adjustable_area, and slightly modernize the example. Simplify its implementation: `Padded` is just size addition and `GetExtentHelper` and `SizeFromFunc` can reasonably be fused into a single class; none of them are used anywhere else, so just deprecate them as public APIs. Add a test.
67
0
22,866
12
1
12
def test_failure_subschema(self, obj): with pytest.raises(validate.ValidationError) as cm: validate.validate(validate.attr({"foo": str}), obj) assert_validationerror(cm.value, )
tests/test_api_validate.py
75
streamlink
{ "docstring": "\n ValidationError(AttrSchema):\n Could not validate attribute 'foo'\n Context(type):\n Type of 1 should be str, but is int\n ", "language": "en", "n_whitespaces": 76, "n_words": 16, "vocab_size": 16 }
12
Python
12
d09112ab1f6db6aa605650fe1ff6a3028344f90d
test_api_validate.py
187,182
9
44
test_failure_subschema
https://github.com/streamlink/streamlink.git
plugin.api.validate: rewrite tests Completely rewrite tests using pytest, with full coverage
36
0
45,728
14
1
3
def clear_checkbox_id(self, name): return name + "_id"
django/forms/widgets.py
23
django
{ "docstring": "\n Given the name of the clear checkbox input, return the HTML id for it.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 12 }
7
Python
7
9c19aff7c7561e3a82978a272ecdaad40dda5c00
widgets.py
206,026
2
12
clear_checkbox_id
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
21
0
51,329
7
1
7
def done(self): with self._condition: return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
python3.10.4/Lib/concurrent/futures/_base.py
39
XX-Net
{ "docstring": "Return True of the future was cancelled or finished executing.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
10
Python
10
8198943edd73a363c266633e1aa5b2a9e9c9f526
_base.py
221,588
3
23
done
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
35
0
56,443
9
7
26
def get_data(filters): data = [] conditions = get_conditions(filters) salary_slips = frappe.db.sql( % (conditions), as_dict=1, ) component_type_dict = frappe._dict( frappe.db.sql( ) ) if not len(component_type_dict): return [] entry = frappe.db.sql( % (conditions, ", ".join(["%s"] * len(component_type_dict))), tuple(component_type_dict.keys()), as_dict=1, ) data_list = prepare_data(entry, component_type_dict) for d in salary_slips: total = 0 if data_list.get(d.name): employee = { "employee": data_list.get(d.name).get("employee"), "employee_name": data_list.get(d.name).get("employee_name"), "pf_account": data_list.get(d.name).get("pf_account"), } if data_list.get(d.name).get("Provident Fund"): employee["pf_amount"] = data_list.get(d.name).get("Provident Fund") total += data_list.get(d.name).get("Provident Fund") if data_list.get(d.name).get("Additional Provident Fund"): employee["additional_pf"] = data_list.get(d.name).get("Additional Provident Fund") total += data_list.get(d.name).get("Additional Provident Fund") if data_list.get(d.name).get("Provident Fund Loan"): employee["pf_loan"] = data_list.get(d.name).get("Provident Fund Loan") total += data_list.get(d.name).get("Provident Fund Loan") employee["total"] = total data.append(employee) return data @frappe.whitelist()
erpnext/regional/report/provident_fund_deductions/provident_fund_deductions.py
586
@frappe.whitelist()
erpnext
{ "docstring": " select sal.name from `tabSalary Slip` sal\n\t\twhere docstatus = 1 %s\n\t\t select name, component_type from `tabSalary Component`\n\t\twhere component_type in ('Provident Fund', 'Additional Provident Fund', 'Provident Fund Loan') select sal.name, sal.employee, sal.employee_name, ded.salary_component, ded.amount\n\t\tfrom `tabSalary Slip` sal, `tabSalary Detail` ded\n\t\twhere sal.name = ded.parent\n\t\tand ded.parentfield = 'deductions'\n\t\tand ded.parenttype = 'Salary Slip'\n\t\tand sal.docstatus = 1 %s\n\t\tand ded.salary_component in (%s)\n\t", "language": "en", "n_whitespaces": 55, "n_words": 63, "vocab_size": 40 }
107
Python
60
494bd9ef78313436f0424b918f200dab8fc7c20b
provident_fund_deductions.py
67,253
52
337
get_data
https://github.com/frappe/erpnext.git
style: format code with black
67
1
14,456
17
2
18
def dag_list_import_errors(args): dagbag = DagBag(process_subdir(args.subdir)) data = [] for filename, errors in dagbag.import_errors.items(): data.append({"filepath": filename, "error": errors}) AirflowConsole().print_as( data=data, output=args.output, ) @cli_utils.action_cli @suppress_logs_and_warning
airflow/cli/commands/dag_command.py
119
@cli_utils.action_cli @suppress_logs_and_warning
airflow
{ "docstring": "Displays dags with import errors on the command line", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
23
Python
21
e1134590973355549272b1f3a213dbfa29698df7
dag_command.py
45,966
9
65
dag_list_import_errors
https://github.com/apache/airflow.git
Add `list-import-errors` to `airflow dags` command (#22084) This will help users to see the dags with import error and enable scripts process the output
60
1
8,751
12
2
7
def mktime_tz(data): if data[9] is None: # No zone info, so localtime is better assumption than GMT return time.mktime(data[:8] + (-1,)) else: t = calendar.timegm(data) return t - data[9]
python3.10.4/Lib/email/_parseaddr.py
79
XX-Net
{ "docstring": "Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
29
Python
25
8198943edd73a363c266633e1aa5b2a9e9c9f526
_parseaddr.py
223,623
6
48
mktime_tz
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
66
0
57,013
12
3
9
def identbodychars(cls): return "".join( sorted( set( cls.identchars + "0123456789" + "".join( [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()] ) ) ) )
pipenv/patched/notpip/_vendor/pyparsing/unicode.py
86
pipenv
{ "docstring": "\n all characters in this range that are valid identifier body characters,\n plus the digits 0-9\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
24
Python
18
f3166e673fe8d40277b804d35d77dcdb760fc3b3
unicode.py
20,660
12
48
identbodychars
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
196
0
3,469
21
1
5
def require_tf(test_case): return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
src/transformers/testing_utils.py
37
transformers
{ "docstring": "\n Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.\n ", "language": "en", "n_whitespaces": 22, "n_words": 15, "vocab_size": 15 }
7
Python
7
57e6464ac9a31156f1c93e59107323e6ec01309e
testing_utils.py
37,498
2
20
require_tf
https://github.com/huggingface/transformers.git
Update all require decorators to use skipUnless when possible (#16999)
13
0
6,803
10
2
18
def test_constrained_layout23(): for i in range(2): fig = plt.figure(layout="constrained", clear=True, num="123") gs = fig.add_gridspec(1, 2) sub = gs[0].subgridspec(2, 2) fig.suptitle("Suptitle{}".format(i)) @image_comparison(['test_colorbar_location.png'], remove_text=True, style='mpl20')
lib/matplotlib/tests/test_constrainedlayout.py
134
@image_comparison(['test_colorbar_location.png'], remove_text=True, style='mpl20')
matplotlib
{ "docstring": "\n Comment in #11035: suptitle used to cause an exception when\n reusing a figure w/ CL with ``clear=True``.\n ", "language": "en", "n_whitespaces": 27, "n_words": 17, "vocab_size": 17 }
23
Python
20
ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22
test_constrainedlayout.py
107,180
6
65
test_constrained_layout23
https://github.com/matplotlib/matplotlib.git
ENH: implement and use base layout_engine for more flexible layout.
73
1
22,632
12
1
4
def test_positive_integer_or_none_3(): assert_raises(Exception, positive_integer_or_none, 'foobar')
tests/driver_tests.py
25
tpot
{ "docstring": "Assert that the TPOT CLI interface's positive_integer_or_none parsing throws an exception when n is not an integer and not None.", "language": "en", "n_whitespaces": 19, "n_words": 20, "vocab_size": 18 }
5
Python
5
388616b6247ca4ea8de4e2f340d6206aee523541
driver_tests.py
181,603
2
13
test_positive_integer_or_none_3
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
11
0
43,392
8
2
5
def get_list_display_add_buttons(self, request): return self.list_display_add_buttons or self.get_list_display(request)[0]
wagtail/contrib/modeladmin/options.py
36
wagtail
{ "docstring": "\n Return the name of the field/method from list_display where action\n buttons should be added. Defaults to the first item from\n get_list_display()\n ", "language": "en", "n_whitespaces": 50, "n_words": 21, "vocab_size": 18 }
7
Python
7
d10f15e55806c6944827d801cd9c2d53f5da4186
options.py
73,182
2
22
get_list_display_add_buttons
https://github.com/wagtail/wagtail.git
Reformat with black
21
0
15,977
9
1
12
def _update_size_variant(self) -> None: width, height = self.size position_data = { "width": width, "height": height, } self.update(Panel(Align.center(Pretty(position_data)), title="Placeholder"))
src/textual/widgets/_placeholder.py
83
textual
{ "docstring": "Update the placeholder with the \"size\" variant.\n\n This variant shows the the size of the widget.\n ", "language": "en", "n_whitespaces": 30, "n_words": 16, "vocab_size": 12 }
18
Python
16
67947d5806bb3181eba349f0da3fd35e0542d1be
_placeholder.py
185,867
11
48
_update_size_variant
https://github.com/Textualize/textual.git
Fix documentation about the variant 'size'.
75
0
45,216
13
1
3
def mass_matrix_full_implicit(self): return self._mass_matrix_full(False)
sympy/physics/mechanics/kane.py
24
sympy
{ "docstring": "The mass matrix of the system, augmented by the kinematic\n differential equations in implicit form.", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 14 }
4
Python
4
1e522ee112f19216f367b457b6804fd58b94f28b
kane.py
200,100
2
13
mass_matrix_full_implicit
https://github.com/sympy/sympy.git
redo of #22626 based on feedback
18
0
49,526
7
1
9
def test_import_error(self): self.write_settings_with_import_error("settings.py") args = ["check", "admin_scripts"] out, err = self.run_manage(args) self.assertNoOutput(out) self.assertOutput(err, "No module named") self.assertOutput(err, "foo42bar")
tests/admin_scripts/tests.py
93
django
{ "docstring": "\n import error: manage.py builtin commands shows useful diagnostic info\n when settings with import errors is provided (#14130).\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 16 }
18
Python
16
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,402
7
51
test_import_error
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
67
0
51,952
8
1
12
def test_displacy_parse_spans_with_kb_id_options(en_vocab): doc = Doc(en_vocab, words=["Welcome", "to", "the", "Bank", "of", "China"]) doc.spans["sc"] = [ Span(doc, 3, 6, "ORG", kb_id="Q790068"), Span(doc, 5, 6, "GPE", kb_id="Q148"), ] spans = displacy.parse_spans( doc, {"kb_url_template": "https://wikidata.org/wiki/{}"} ) assert isinstance(spans, dict) assert spans["text"] == "Welcome to the Bank of China " assert spans["spans"] == [ { "start": 15, "end": 28, "start_token": 3, "end_token": 6, "label": "ORG", "kb_id": "Q790068", "kb_url": "https://wikidata.org/wiki/Q790068", }, { "start": 23, "end": 28, "start_token": 5, "end_token": 6, "label": "GPE", "kb_id": "Q148", "kb_url": "https://wikidata.org/wiki/Q148", }, ]
spacy/tests/test_displacy.py
294
spaCy
{ "docstring": "Test that spans with kb_id on a Doc are converted into displaCy's format", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
82
Python
57
a79cd3542b3dd667d8a97293462e22ed26a04ee5
test_displacy.py
111,253
31
165
test_displacy_parse_spans_with_kb_id_options
https://github.com/explosion/spaCy.git
Add displacy support for overlapping Spans (#10332) * Fix docstring for EntityRenderer * Add warning in displacy if doc.spans are empty * Implement parse_spans converter One notable change here is that the default spans_key is sc, and it's set by the user through the options. * Implement SpanRenderer Here, I implemented a SpanRenderer that looks similar to the EntityRenderer except for some templates. The spans_key, by default, is set to sc, but can be configured in the options (see parse_spans). The way I rendered these spans is per-token, i.e., I first check if each token (1) belongs to a given span type and (2) a starting token of a given span type. Once I have this information, I render them into the markup. * Fix mypy issues on typing * Add tests for displacy spans support * Update colors from RGB to hex Co-authored-by: Ines Montani <[email protected]> * Remove unnecessary CSS properties * Add documentation for website * Remove unnecesasry scripts * Update wording on the documentation Co-authored-by: Sofie Van Landeghem <[email protected]> * Put typing dependency on top of file * Put back z-index so that spans overlap properly * Make warning more explicit for spans_key Co-authored-by: Ines Montani <[email protected]> Co-authored-by: Sofie Van Landeghem <[email protected]>
315
0
24,366
11
3
7
def __getitem__(self, key): if key is None: key = self._key() value = self._get_recursive(key) if value is None: value = self[key] = self.default_factory() return value
keras/backend.py
78
keras
{ "docstring": "Gets the value at key (or current context), or sets default value.\n\n Args:\n key: May be `None` or `Graph`object. When `None`, the key is set to\n the current context.\n\n Returns:\n Either the cached or default value.\n ", "language": "en", "n_whitespaces": 86, "n_words": 36, "vocab_size": 27 }
24
Python
14
3613c3defc39c236fb1592c4f7ba1a9cc887343a
backend.py
278,622
7
47
__getitem__
https://github.com/keras-team/keras.git
Remove pylint comments. PiperOrigin-RevId: 452353044
81
0
82,635
11
6
19
def resume_end(self) -> None: assert self.trainer.state.fn is not None if self.resume_checkpoint_path: if self.trainer.state.fn == TrainerFn.FITTING: rank_zero_info(f"Restored all states from the checkpoint file at {self.resume_checkpoint_path}") elif self.trainer.state.fn in (TrainerFn.VALIDATING, TrainerFn.TESTING, TrainerFn.PREDICTING): rank_zero_info(f"Loaded model weights from checkpoint at {self.resume_checkpoint_path}") # TODO: remove resume_from_checkpoint_fit_path in v2.0 if ( self.trainer.state.fn == TrainerFn.FITTING and self.resume_checkpoint_path == self.resume_from_checkpoint_fit_path ): self.resume_from_checkpoint_fit_path = None self.resume_checkpoint_path = None self._loaded_checkpoint = {} # clear cache after restore torch.cuda.empty_cache() # wait for all to catch up self.trainer.strategy.barrier("CheckpointConnector.resume_end")
pytorch_lightning/trainer/connectors/checkpoint_connector.py
219
lightning
{ "docstring": "Signal the connector that all states have resumed and memory for the checkpoint object can be\n released.", "language": "en", "n_whitespaces": 23, "n_words": 17, "vocab_size": 16 }
76
Python
55
5693a94c320297cf007f3bfd13ce4d7deeb1954a
checkpoint_connector.py
241,668
18
126
resume_end
https://github.com/Lightning-AI/lightning.git
Extend the deprecation of `Trainer(resume_from_checkpoint)` (#11334)
245
0
69,643
15
1
3
def unfrack_path(pathsep=False, follow=True):
lib/ansible/cli/arguments/option_helpers.py
21
ansible
{ "docstring": "Turn an Option's data into a single path in Ansible locations", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
3
Python
3
b1ff0f4ebc7e964f8f67ffc344815a0d23577f45
option_helpers.py
268,472
3
16
unfrack_path
https://github.com/ansible/ansible.git
vault secrets file, keep context when symlink (#78734) * vault secrets file, keep context when symlink fixes #18319 Co-authored-by: Sloane Hertel <[email protected]>
6
0
79,515
6
12
20
def get_freq(self) -> str | None: if not self.is_monotonic or not self.index._is_unique: return None delta = self.deltas[0] ppd = periods_per_day(self._reso) if delta and _is_multiple(delta, ppd): return self._infer_daily_rule() # Business hourly, maybe. 17: one day / 65: one weekend if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): return "BH" # Possibly intraday frequency. Here we use the # original .asi8 values as the modified values # will not work around DST transitions. See #8772 if not self.is_unique_asi8: return None delta = self.deltas_asi8[0] pph = ppd // 24 ppm = pph // 60 pps = ppm // 60 if _is_multiple(delta, pph): # Hours return _maybe_add_count("H", delta / pph) elif _is_multiple(delta, ppm): # Minutes return _maybe_add_count("T", delta / ppm) elif _is_multiple(delta, pps): # Seconds return _maybe_add_count("S", delta / pps) elif _is_multiple(delta, (pps // 1000)): # Milliseconds return _maybe_add_count("L", delta / (pps // 1000)) elif _is_multiple(delta, (pps // 1_000_000)): # Microseconds return _maybe_add_count("U", delta / (pps // 1_000_000)) else: # Nanoseconds return _maybe_add_count("N", delta)
pandas/tseries/frequencies.py
367
pandas
{ "docstring": "\n Find the appropriate frequency string to describe the inferred\n frequency of self.i8values\n\n Returns\n -------\n str or None\n ", "language": "en", "n_whitespaces": 60, "n_words": 17, "vocab_size": 15 }
162
Python
94
e9350a4affbb424aaecad279f638a0dd1584df68
frequencies.py
166,591
35
210
get_freq
https://github.com/pandas-dev/pandas.git
infer_freq handle non-nano (#47126) * infer_freq handle non-nano * remove unused import
487
0
39,834
13
2
5
def require_tensorflow(test_case): if not is_tensorflow_available(): return unittest.skip("test requires TensorFlow")(test_case) else: return test_case
src/accelerate/test_utils/testing.py
49
accelerate
{ "docstring": "\n Decorator marking a test that requires TensorFlow installed. These tests are skipped when TensorFlow isn't\n installed\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 15 }
12
Python
11
5668270de74a09e5bff15891054f73ddbb1176ac
testing.py
337,338
5
26
require_tensorflow
https://github.com/huggingface/accelerate.git
Add logging capabilities (#293) Co-authored-by: Sylvain Gugger <[email protected]> - Added experiment tracking API, and support for Weights and Biases, TensorBoard, and CometML + Tests - Added `tensorflow` to a new dependency list to be used during tests - Added three new functions in `Accelerator` to interact with the API
35
0
121,034
11
4
12
def _errt(self): # Count (UI, OI) pairs for truncation points until we find the segment where (ui, oi) crosses the truncation line self.coords = self._get_truncation_coordinates() if (0.0, 0.0) in self.coords: # Truncation line goes through origo, so ERRT cannot be counted if (self.ui, self.oi) != (0.0, 0.0): return float("inf") else: return float("nan") if (self.ui, self.oi) == (0.0, 0.0): # (ui, oi) is origo; define errt as 0.0 return 0.0 # Count the intersection point # Note that (self.ui, self.oi) cannot be (0.0, 0.0) and self.coords has different coordinates # so we have actual line segments instead of a line segment and a point intersection = _count_intersection( ((0, 0), (self.ui, self.oi)), self.coords[-2:] ) # Count OP (length of the line from origo to (ui, oi)) op = sqrt(self.ui**2 + self.oi**2) # Count OT (length of the line from origo to truncation line that goes through (ui, oi)) ot = sqrt(intersection[0] ** 2 + intersection[1] ** 2) # OP / OT tells how well the stemming algorithm works compared to just truncating words return op / ot
nltk/metrics/paice.py
230
nltk
{ "docstring": "Count Error-Rate Relative to Truncation (ERRT).\n\n :return: ERRT, length of the line from origo to (UI, OI) divided by\n the length of the line from origo to the point defined by the same\n line when extended until the truncation line.\n :rtype: float\n ", "language": "en", "n_whitespaces": 77, "n_words": 42, "vocab_size": 28 }
175
Python
100
0fac0c0f8e4618c2bdd3d2137d5fb8a80f581246
paice.py
42,464
15
157
_errt
https://github.com/nltk/nltk.git
Update black to 22.3.0 The most recent release of Click (8.1.0) was breaking Black. See psf/black#2964
383
0
7,553
13
2
8
def get_tf_version(): global _TF_VERS # pylint:disable=global-statement if _TF_VERS is None: import tensorflow as tf # pylint:disable=import-outside-toplevel _TF_VERS = float(".".join(tf.__version__.split(".")[:2])) # pylint:disable=no-member return _TF_VERS
lib/utils.py
75
faceswap
{ "docstring": " Obtain the major.minor version of currently installed Tensorflow.\n\n Returns\n -------\n float\n The currently installed tensorflow version\n ", "language": "en", "n_whitespaces": 36, "n_words": 16, "vocab_size": 13 }
23
Python
18
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
utils.py
100,370
6
40
get_tf_version
https://github.com/deepfakes/faceswap.git
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
52
0
19,859
16
2
15
def _compile_output(self) -> Union[List[str], List[Tuple[str, int]]]: action = self._job.replace("-", "_") processor = getattr(self, f"_get_{action}") logger.debug("Processor: %s", processor) return [item for item in processor()] # pylint:disable=unnecessary-comprehension
tools/alignments/jobs.py
106
faceswap
{ "docstring": " Compile list of frames that meet criteria\n\n Returns\n -------\n list\n List of filenames or filenames and face indices for the selected criteria\n ", "language": "en", "n_whitespaces": 62, "n_words": 22, "vocab_size": 18 }
25
Python
24
e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1
jobs.py
101,714
12
63
_compile_output
https://github.com/deepfakes/faceswap.git
Alignments Tool - Typing, Documentation + Re-org
61
0
21,118
10
6
12
def find_submodule_and_param_name(model, long_key, start_prefix): if len(start_prefix) > 0 and long_key.startswith(start_prefix): long_key = ".".join(long_key.split(".")[1:]) split_key = long_key.split(".") submodule = model while len(split_key) > 1: if hasattr(submodule, split_key[0]): submodule = getattr(submodule, split_key[0]) del split_key[0] else: submodule = None break if submodule == model: submodule = None return submodule, split_key[0]
src/transformers/modeling_utils.py
178
transformers
{ "docstring": "\n A helper util to find the last sub-module and the param/buffer name. If `start_prefix` is supplied it'll be removed\n from the start of the key\n ", "language": "en", "n_whitespaces": 35, "n_words": 25, "vocab_size": 22 }
47
Python
33
5da33f872913255d64717efe745a053975bbc28e
modeling_utils.py
37,157
15
109
find_submodule_and_param_name
https://github.com/huggingface/transformers.git
[modeling utils] revamp `from_pretrained(..., low_cpu_mem_usage=True)` + tests (#16657) * add low_cpu_mem_usage tests * wip: revamping * wip * install /usr/bin/time * wip * cleanup * cleanup * cleanup * cleanup * cleanup * fix assert * put the wrapper back * cleanup; switch to bert-base-cased * Trigger CI * Trigger CI
140
0
6,748
14
1
4
def test_task_group_context_mix(): from airflow.decorators import task
tests/utils/test_task_group.py
21
airflow
{ "docstring": "Test cases to check nested TaskGroup context manager with taskgroup decorator", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
6
Python
6
49e336ae0302b386a2f47269a6d13988382d975f
test_task_group.py
47,694
50
269
test_task_group_context_mix
https://github.com/apache/airflow.git
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
12
0
9,216
6
2
20
def add_noise_to_input(self, sample, sigma, generator=None): if self.s_min <= sigma <= self.s_max: gamma = min(self.s_churn / self.num_inference_steps, 2**0.5 - 1) else: gamma = 0 # sample eps ~ N(0, S_noise^2 * I) eps = self.s_noise * torch.randn(sample.shape, generator=generator).to(sample.device) sigma_hat = sigma + gamma * sigma sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat
src/diffusers/schedulers/scheduling_karras_ve.py
159
diffusers
{ "docstring": "\n Explicit Langevin-like \"churn\" step of adding noise to the sample according to\n a factor gamma_i ≥ 0 to reach a higher noise level sigma_hat = sigma_i + gamma_i*sigma_i.\n ", "language": "en", "n_whitespaces": 50, "n_words": 28, "vocab_size": 24 }
58
Python
41
dd10da76a78e9566d12ddf1eb5aac90021b7e51d
scheduling_karras_ve.py
336,293
9
107
add_noise_to_input
https://github.com/huggingface/diffusers.git
Add an alternative Karras et al. stochastic scheduler for VE models (#160) * karras + VE, not flexible yet * Fix inputs incompatibility with the original unet * Roll back sigma scaling * Apply suggestions from code review * Old comment * Fix doc
136
0
120,864
13
5
19
def _getPythonForSconsExePath(): python_exe = Options.getPythonPathForScons() if python_exe is not None: return python_exe scons_supported_pythons = ("3.5", "3.6", "3.7", "3.8", "3.9", "3.10") if not Utils.isWin32Windows(): scons_supported_pythons += ("2.7", "2.6") # Our inline copy needs no other module, just the right version of Python is needed. python_for_scons = findInstalledPython( python_versions=scons_supported_pythons, module_name=None, module_version=None ) if python_for_scons is None: if Utils.isWin32Windows(): scons_python_requirement = "Python 3.5 or higher" else: scons_python_requirement = "Python 2.6, 2.7 or Python >= 3.5" Tracing.scons_logger.sysexit( % scons_python_requirement ) return python_for_scons.getPythonExe() @contextlib.contextmanager
nuitka/build/SconsInterface.py
192
@contextlib.contextmanager
Nuitka
{ "docstring": "Find a way to call any Python that works for Scons.\n\n Scons needs it as it doesn't support all Python versions.\n \\\nError, while Nuitka works with older Python, Scons does not, and therefore\nNuitka needs to find a %s executable, so please install\nit.\n\nYou may provide it using option \"--python-for-scons=path_to_python.exe\"\nin case it is not visible in registry, e.g. due to using uninstalled\nAnaconda Python.\n", "language": "en", "n_whitespaces": 66, "n_words": 67, "vocab_size": 54 }
79
Python
56
c4ce69f97f7fefbcf637e9e59b6df056ad03eb16
SconsInterface.py
178,460
28
102
_getPythonForSconsExePath
https://github.com/Nuitka/Nuitka.git
Scons: Refactor Python scan for major cleanup * This is in preparation of making it reusable for onefile compression which also has a simular need.
202
1
42,705
12
1
7
def test_func(self, qapp): pytest.importorskip("qutebrowser.qt.opengl") version.opengl_info()
tests/unit/utils/test_version.py
36
qutebrowser
{ "docstring": "Simply call version.opengl_info() and see if it doesn't crash.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
5
Python
5
d387b1a1084b9649009e5cffb9d71facc80bb41f
test_version.py
321,555
3
19
test_func
https://github.com/qutebrowser/qutebrowser.git
tests: Adjust most imports
26
0
117,800
8
1
11
def classifiers(self): url = self.repository+'?:action=list_classifiers' response = urllib.request.urlopen(url) log.info(self._read_pypi_response(response))
python3.10.4/Lib/distutils/command/register.py
60
XX-Net
{ "docstring": " Fetch the list of classifiers from the server.\n ", "language": "en", "n_whitespaces": 16, "n_words": 8, "vocab_size": 7 }
9
Python
8
8198943edd73a363c266633e1aa5b2a9e9c9f526
register.py
222,796
4
34
classifiers
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
37
0
56,745
9
1
2
def yanchor(self): return self["yanchor"]
packages/python/plotly/plotly/graph_objs/bar/marker/_colorbar.py
22
plotly.py
{ "docstring": "\n Sets this color bar's vertical position anchor This anchor\n binds the `y` position to the \"top\", \"middle\" or \"bottom\" of\n the color bar. Defaults to \"middle\" when `orientation` is \"v\"\n and \"bottom\" when `orientation` is \"h\".\n\n The 'yanchor' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['top', 'middle', 'bottom']\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 148, "n_words": 60, "vocab_size": 45 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_colorbar.py
228,764
2
11
yanchor
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
60,437
7
4
5
def _debugger_window_is_open(): if _Debugger.debugger is None: return False debugger = _Debugger.debugger if debugger.popout_window or debugger.watcher_window: return True return False
PySimpleGUI.py
54
PySimpleGUI
{ "docstring": "\n Determines if one of the debugger window is currently open\n :return: returns True if the popout window or the main debug window is open\n :rtype: (bool)\n ", "language": "en", "n_whitespaces": 39, "n_words": 26, "vocab_size": 19 }
19
Python
14
37c3afc8ca0dc0057a23ab512ee8b879074dd119
PySimpleGUI.py
212,794
7
32
_debugger_window_is_open
https://github.com/PySimpleGUI/PySimpleGUI.git
ButtonMenu.Click aliased added. Debugger - automatically adds a timeout to read calls if a debug window is open. Still need to handle user-level multi-window support.
48
0
53,407
7
1
4
def detected_faces(self) -> List["DetectedFace"]: return self._detected_faces
plugins/extract/pipeline.py
28
faceswap
{ "docstring": "list: A list of :class:`~lib.align.DetectedFace` objects in the :attr:`image`. ", "language": "en", "n_whitespaces": 9, "n_words": 9, "vocab_size": 9 }
6
Python
6
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
pipeline.py
101,353
3
15
detected_faces
https://github.com/deepfakes/faceswap.git
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
20
0
20,768
6
4
18
def _check_input_folder(self) -> bool: if not os.path.exists(self._args.input_dir): logger.error("Input location %s not found.", self._args.input_dir) sys.exit(1) if (os.path.isfile(self._args.input_dir) and os.path.splitext(self._args.input_dir)[1].lower() in _video_extensions): logger.info("Input Video: %s", self._args.input_dir) retval = True else: logger.info("Input Directory: %s", self._args.input_dir) retval = False return retval
scripts/fsmedia.py
186
faceswap
{ "docstring": " Check whether the input is a folder or video.\n\n Returns\n -------\n bool\n ``True`` if the input is a video otherwise ``False``\n ", "language": "en", "n_whitespaces": 61, "n_words": 21, "vocab_size": 17 }
37
Python
28
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
fsmedia.py
101,393
19
113
_check_input_folder
https://github.com/deepfakes/faceswap.git
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
153
0
20,808
15
3
58
def _background_extract(self, output_folder, progress_queue): _io = dict(saver=ImagesSaver(get_folder(output_folder), as_bytes=True), loader=ImagesLoader(self._input_location, count=self._alignments.frames_count)) for frame_idx, (filename, image) in enumerate(_io["loader"].load()): logger.trace("Outputting frame: %s: %s", frame_idx, filename) src_filename = os.path.basename(filename) frame_name = os.path.splitext(src_filename)[0] progress_queue.put(1) for face_idx, face in enumerate(self._frame_faces[frame_idx]): output = f"{frame_name}_{face_idx}.png" aligned = AlignedFace(face.landmarks_xy, image=image, centering="head", size=512) # TODO user selectable size meta = dict(alignments=face.to_png_meta(), source=dict(alignments_version=self._alignments.version, original_filename=output, face_index=face_idx, source_filename=src_filename, source_is_video=self._globals.is_video, source_frame_dims=image.shape[:2])) b_image = encode_image(aligned.face, ".png", metadata=meta) _io["saver"].save(output, b_image) _io["saver"].close()
tools/manual/detected_faces.py
366
faceswap
{ "docstring": " Perform the background extraction in a thread so GUI doesn't become unresponsive.\n\n Parameters\n ----------\n output_folder: str\n The location to save the output faces to\n progress_queue: :class:`queue.Queue`\n The queue to place incremental counts to for updating the GUI's progress bar\n ", "language": "en", "n_whitespaces": 97, "n_words": 39, "vocab_size": 33 }
65
Python
56
5e73437be47f2410439a3c6716de96354e6a0c94
detected_faces.py
101,259
24
232
_background_extract
https://github.com/deepfakes/faceswap.git
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
575
0
20,679
18
1
1
def test_default_kwargs():
tests/unit/test_isolation.py
12
pyinstaller
{ "docstring": "\n Verify that default keyword-only arguments are properly passed to the isolated function call.\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 13 }
2
Python
2
3ba0aaf983f5223000a713c9275ea66e21f78b11
test_isolation.py
262,759
8
67
test_default_kwargs
https://github.com/pyinstaller/pyinstaller.git
tests: add a test for calling isolated function with default (kw)args Add tests that show that current implementation does not transfer default arguments (function.__defaults__) nor default keyword-only arguments (function.__kwdefaults__) to the child process, resulting in a missing-positional-argument error unless all optional arguments are explicitly provided.
5
0
77,348
6
3
12
def _tune_legacy_checkpoint_score_attr(self) -> Optional[str]: if self.checkpoint_score_attribute is None: return self.checkpoint_score_attribute prefix = "" if self.checkpoint_score_order == MIN: prefix = "min-" return f"{prefix}{self.checkpoint_score_attribute}" # Alias for backwards compatibility deprecation_message = ( "`CheckpointStrategy` is deprecated and will be removed in " "the future. Please use `ray.air.config.CheckpointStrategy` " "instead." ) @Deprecated(message=deprecation_message) @dataclass
python/ray/util/ml_utils/checkpoint_manager.py
111
@Deprecated(message=deprecation_message) @dataclass
ray
{ "docstring": "Same as ``checkpoint_score_attr`` in ``tune.run``.\n\n Only used for Legacy API compatibility.\n ", "language": "en", "n_whitespaces": 25, "n_words": 11, "vocab_size": 11 }
49
Python
41
dc7ed086a5038775e378b32cb31fb4a79f418dd9
checkpoint_manager.py
123,916
11
38
_tune_legacy_checkpoint_score_attr
https://github.com/ray-project/ray.git
[AIR] More checkpoint configurability, `Result` extension (#25943) This PR: * Allows the user to set `keep_checkpoints_num` and `checkpoint_score_attr` in `RunConfig` using the `CheckpointStrategy` dataclass * Adds two new fields to the `Result` object - `best_checkpoints` - a list of saved best checkpoints as determined by `CheckpointingConfig`.
110
1
27,474
9
1
13
def test_receive_data_before_server_connected(tctx): assert ( Playbook(tcp.TCPLayer(tctx), hooks=False) << OpenConnection(tctx.server) >> DataReceived(tctx.client, b"hello!") >> reply(None, to=-2) << SendData(tctx.server, b"hello!") )
test/mitmproxy/proxy/layers/test_tcp.py
93
mitmproxy
{ "docstring": "\n assert that data received before a server connection is established\n will still be forwarded.\n ", "language": "en", "n_whitespaces": 24, "n_words": 14, "vocab_size": 14 }
18
Python
15
b3587b52b25077f68116b9852b041d33e7fc6601
test_tcp.py
251,916
8
63
test_receive_data_before_server_connected
https://github.com/mitmproxy/mitmproxy.git
make it black!
62
0
73,888
14
2
25
def demo_tp(rank, args): print(f"Running basic Megatron style TP example on rank {rank}.") setup(rank, args.world_size) # create a sharding plan based on the given world_size. module_sharding_plan = _get_toy_module_sharding_plan( args.world_size ) # create model and move it to GPU with id rank model = ToyModel().cuda(rank) # Shard the module based on created plan. shard_module(model, module_sharding_plan) # Create a optimizer for the sharded module. optimizer = _get_toy_module_optim(model, 0.002) # Perform a num of iterations of forward/backward # and optimizations for the sharded module. for _ in range(args.iter_nums): inp = torch.rand(20, 10).cuda(rank) output = model(inp) output.sum().backward() optimizer.step() cleanup()
distributed/sharded_tensor/tensor_parallel.py
176
examples
{ "docstring": "\n Main body of the demo of a basic version of tensor parallel by using\n PyTorch native sharded tensor APIs.\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 16 }
94
Python
67
9ba53df5a19131e6926027b2e73aaa77cec17272
tensor_parallel.py
82,841
15
103
demo_tp
https://github.com/pytorch/examples.git
Gh/fduwjj/2/base (#1007) * test ghstack [ghstack-poisoned] * Update base for Update on "[PT-D] Add an example for Megatron-LM style example" [ghstack-poisoned] * Update base for Update on "[PT-D] Add an example for Megatron-LM style example" [ghstack-poisoned] * Update base for Update on "[PT-D] Add an example for Megatron-LM style example" [ghstack-poisoned] * Update base for Update on "[PT-D] Add an example for Megatron-LM style example" [ghstack-poisoned] * [PT-D] Add an example for Megatron-LM style example (#1006) * [PT-D] Add an example for Megatron-LM style example [ghstack-poisoned] * Update on "[PT-D] Add an example for Megatron-LM style example" [ghstack-poisoned]
177
0
17,550
12
6
21
def makelink(self, tarinfo, targetpath): try: # For systems that support symbolic and hard links. if tarinfo.issym(): os.symlink(tarinfo.linkname, targetpath) else: # See extract(). if os.path.exists(tarinfo._link_target): os.link(tarinfo._link_target, targetpath) else: self._extract_member(self._find_link_target(tarinfo), targetpath) except symlink_exception: if tarinfo.issym(): linkpath = os.path.join(os.path.dirname(tarinfo.name), tarinfo.linkname) else: linkpath = tarinfo.linkname else: try: self._extract_member(self._find_link_target(tarinfo), targetpath) except KeyError: raise ExtractError("unable to resolve link inside archive")
pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py
219
pipenv
{ "docstring": "Make a (symbolic) link called targetpath. If it cannot be created\n (platform limitation), we try to make a copy of the referenced file\n instead of a link.\n ", "language": "en", "n_whitespaces": 52, "n_words": 27, "vocab_size": 24 }
54
Python
39
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
tarfile.py
21,391
22
133
makelink
https://github.com/pypa/pipenv.git
Vendor in pip 22.1.2
432
0
3,804
17
1
3
def get_rules(self) -> RulesMap:
src/textual/css/styles.py
16
textual
{ "docstring": "Get the rules in a mapping.\n\n Returns:\n RulesMap: A TypedDict of the rules.\n ", "language": "en", "n_whitespaces": 38, "n_words": 13, "vocab_size": 12 }
4
Python
4
116f3735b68e8dd293dba4b3a183f98afbd0b167
styles.py
182,279
6
8
get_rules
https://github.com/Textualize/textual.git
docstrings
11
0
43,780
6
1
8
def test_fillna_frame(self): super().test_fillna_frame() unhashable = pytest.mark.xfail(reason="Unhashable")
pandas/tests/extension/json/test_json.py
47
pandas
{ "docstring": "We treat dictionaries as a mapping in fillna, not a scalar.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
6
Python
6
24652cf178c12562585639cba39c46d62b95f107
test_json.py
165,750
2
13
test_fillna_frame
https://github.com/pandas-dev/pandas.git
TST: Convert skip -> xfail (#46427)
19
0
39,706
9
2
10
def push(self, exit): # We use an unbound method rather than a bound method to follow # the standard lookup behaviour for special methods. _cb_type = type(exit) try: exit_method = _cb_type.__exit__ except AttributeError: # Not a context manager, so assume it's a callable. self._push_exit_callback(exit) else: self._push_cm_exit(exit, exit_method) return exit # Allow use as a decorator.
python3.10.4/Lib/contextlib.py
75
XX-Net
{ "docstring": "Registers a callback with the standard __exit__ method signature.\n\n Can suppress exceptions the same way __exit__ method can.\n Also accepts any object with an __exit__ method (registering a call\n to the method instead of the object itself).\n ", "language": "en", "n_whitespaces": 65, "n_words": 37, "vocab_size": 26 }
55
Python
46
8198943edd73a363c266633e1aa5b2a9e9c9f526
contextlib.py
221,712
9
42
push
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
156
0
56,482
10
1
17
async def test_filter_non_existent_column(state_api_manager): data_source_client = state_api_manager.data_source_client id = b"1234" data_source_client.get_all_worker_info.return_value = GetAllWorkerInfoReply( worker_table_data=[ generate_worker_data(id, pid=1), generate_worker_data(b"12345", pid=2), ], total=2, ) result = await state_api_manager.list_workers( option=create_api_options(filters=[("exit_type", "=", "INTENDED_SYSTEM_EXIT")]) ) assert len(result.result) == 0
python/ray/tests/test_state_api.py
138
ray
{ "docstring": "Test when the non existent column is given, it handles that properly.\n\n Related: https://github.com/ray-project/ray/issues/26811\n ", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 14 }
32
Python
28
37f4692aa805eba230e2879c098320111788a64c
test_state_api.py
125,623
14
85
test_filter_non_existent_column
https://github.com/ray-project/ray.git
[State Observability] Fix "No result for get crashing the formatting" and "Filtering not handled properly when key missing in the datum" #26881 Fix two issues No result for get crashing the formatting Filtering not handled properly when key missing in the datum
106
0
27,927
16
11
24
def _url(self, hashed_name_func, name, force=False, hashed_files=None): if settings.DEBUG and not force: hashed_name, fragment = name, "" else: clean_name, fragment = urldefrag(name) if urlsplit(clean_name).path.endswith("/"): # don't hash paths hashed_name = name else: args = (clean_name,) if hashed_files is not None: args += (hashed_files,) hashed_name = hashed_name_func(*args) final_url = super().url(hashed_name) # Special casing for a @font-face hack, like url(myfont.eot?#iefix") # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax query_fragment = "?#" in name # [sic!] if fragment or query_fragment: urlparts = list(urlsplit(final_url)) if fragment and not urlparts[4]: urlparts[4] = fragment if query_fragment and not urlparts[3]: urlparts[2] += "?" final_url = urlunsplit(urlparts) return unquote(final_url)
django/contrib/staticfiles/storage.py
261
django
{ "docstring": "\n Return the non-hashed URL in DEBUG mode.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
94
Python
60
9c19aff7c7561e3a82978a272ecdaad40dda5c00
storage.py
204,364
22
156
_url
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
356
0
50,711
15
1
6
def set_raw_scale(self, in_, scale): self.__check_input(in_) self.raw_scale[in_] = scale
code/deep/BJMMD/caffe/python/caffe/io.py
39
transferlearning
{ "docstring": "\n Set the scale of raw features s.t. the input blob = input * scale.\n While Python represents images in [0, 1], certain Caffe models\n like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale\n of these models must be 255.\n\n Parameters\n ----------\n in_ : which input to assign this scale factor\n scale : scale coefficient\n ", "language": "en", "n_whitespaces": 121, "n_words": 57, "vocab_size": 44 }
8
Python
8
cc4d0564756ca067516f71718a3d135996525909
io.py
60,255
3
24
set_raw_scale
https://github.com/jindongwang/transferlearning.git
Balanced joint maximum mean discrepancy for deep transfer learning
29
0
12,047
8
4
12
def list_distinfo_files(self): base = os.path.dirname(self.path) for path, checksum, size in self._get_records(): # XXX add separator or use real relpath algo if not os.path.isabs(path): path = os.path.join(base, path) if path.startswith(self.path): yield path
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
108
transferlearning
{ "docstring": "\n Iterates over the ``RECORD`` entries and returns paths for each line if\n the path is pointing to a file located in the ``.dist-info`` directory\n or one of its subdirectories.\n\n :returns: iterator of paths\n ", "language": "en", "n_whitespaces": 69, "n_words": 33, "vocab_size": 29 }
31
Python
28
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
database.py
61,929
7
66
list_distinfo_files
https://github.com/jindongwang/transferlearning.git
upd; format
115
0
12,754
13
2
8
def validate(self): if not self._check_schedule_interval_matches_timetable(): raise AirflowDagInconsistent( f"inconsistent schedule: timetable {self.timetable.summary!r} " f"does not match schedule_interval {self.schedule_interval!r}", ) self.params.validate() self.timetable.validate()
airflow/models/dag.py
85
airflow
{ "docstring": "Validate the DAG has a coherent setup.\n\n This is called by the DAG bag before bagging the DAG.\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 15 }
20
Python
19
a1a9a8f9a3adc63e783cf3fd699066f35e488d4f
dag.py
43,013
8
37
validate
https://github.com/apache/airflow.git
Check bag DAG schedule_interval match tiemtable (#23113) This guards against the DAG's timetable or schedule_interval from being changed after it's created. Validation is done by creating a timetable and check its summary matches schedule_interval. The logic is not bullet-proof, especially if a custom timetable does not provide a useful summary. But this is the best we can do.
100
0
7,791
14
1
6
def get_tokenizer(*args, **kwargs): return AlbertTokenizer.from_pretrained(pretrained_model_name_or_path='albert-base-v1', *args, **kwargs)
modules/text/language_model/albert-base-v1/module.py
43
PaddleHub
{ "docstring": "\n Gets the tokenizer that is customized for this module.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
7
Python
7
26e56d098d7cebdc910e84ce1d0d1a909c1988c3
module.py
48,752
2
25
get_tokenizer
https://github.com/PaddlePaddle/PaddleHub.git
add albert-base-v1
21
0
9,591
9
1
10
def exit_with_error(message, code=1, **kwargs): kwargs.setdefault("style", "red") app.console.print(message, **kwargs) raise typer.Exit(code)
src/prefect/cli/base.py
66
prefect
{ "docstring": "\n Utility to print a stylized error message and exit with a non-zero code\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 12 }
10
Python
10
c0cb1fee460c1bded9e3eb741ad7979402844bf8
base.py
55,137
4
39
exit_with_error
https://github.com/PrefectHQ/prefect.git
Update `set` command; allow CLI `console` object to be patched
22
0
11,226
8
12
67
async def contracts_command(ctx, ticker="", past_transaction_days="", raw=""): try: # Debug user input if cfg.DEBUG: logger.debug( "!stocks.gov.contracts %s %s %s", ticker, past_transaction_days, raw ) if past_transaction_days == "": past_transaction_days = 10 else: if not past_transaction_days.lstrip("-").isnumeric(): raise Exception("Number has to be an integer") past_transaction_days = int(past_transaction_days) if raw in ["false", "False", "FALSE", ""]: raw = False if raw in ["true", "True", "TRUE"]: raw = True if raw not in [True, False]: raise Exception("raw argument has to be true or false") if ticker == "": raise Exception("A ticker is required") # Retrieve Data df_contracts = quiverquant_model.get_government_trading("contracts", ticker) if df_contracts.empty: raise Exception("No government contracts found") # Output Data df_contracts["Date"] = pd.to_datetime(df_contracts["Date"]).dt.date df_contracts = df_contracts[ df_contracts["Date"].isin( df_contracts["Date"].unique()[:past_transaction_days] ) ] df_contracts.drop_duplicates(inplace=True) fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI) df_contracts.groupby("Date").sum().div(1000).plot(kind="bar", rot=0, ax=ax) ax.set_ylabel("Amount ($1k)") ax.set_title(f"Sum of latest government contracts to {ticker}") fig.tight_layout() plt.savefig("gov_contracts.png") uploaded_image = gst_imgur.upload_image("gov_contracts.png", title="something") image_link = uploaded_image.link if cfg.DEBUG: logger.debug("Image URL: %s", image_link) title = f"Stocks: [quiverquant.com] Contracts by {ticker}" if raw: description = df_contracts.to_string() embed = discord.Embed( title=title, description=description, colour=cfg.COLOR ) else: embed = discord.Embed(title=title, colour=cfg.COLOR) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) embed.set_image(url=image_link) os.remove("gov_contracts.png") await ctx.send(embed=embed) except Exception as e: embed = discord.Embed( title=f"ERROR Stocks: [quiverquant.com] Contracts by {ticker}", colour=cfg.COLOR, description=e, ) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) await ctx.send(embed=embed)
discordbot/stocks/government/contracts.py
766
OpenBBTerminal
{ "docstring": "Displays contracts associated with tickers [quiverquant.com]", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
200
Python
131
f40ba0d256a78ab2b8461f0df3a9a52ca7dc5704
contracts.py
281,178
66
444
contracts_command
https://github.com/OpenBB-finance/OpenBBTerminal.git
Bot logging fix (#1105) * Write bot logs to stdout instead of a file Heroku's logging uses the stdout and has problems with files * Send "you snooze you lose" only if debug flag is enabled * Replace print statements with logger entries in the economy menu * Add logging to bot menu command calls * Silence bandit warnings about the REPLACE_ME token * Organize imports and update logging in economy menu * Organize imports and update logging in dps menu * Organize imports and update logging in dd menu * Organize imports and update logging in gov menu * Organize imports and update logging in options menu * Organize imports and update logging in screener menu * Organize imports and update logging in ta menu * Revert automatic import sorting * Add logging to the options reaction helper
799
0
83,584
16
1
14
async def test_storage_is_updated_on_add(hass, hass_storage, utcnow): await setup_test_component(hass, create_lightbulb_service) entity_map: EntityMapStorage = hass.data[ENTITY_MAP] hkid = "00:00:00:00:00:00" # Is in memory store updated? assert hkid in entity_map.storage_data # Is saved out to store? await flush_store(entity_map.store) assert hkid in hass_storage[ENTITY_MAP]["data"]["pairings"]
tests/components/homekit_controller/test_storage.py
96
core
{ "docstring": "Test entity map storage is cleaned up on adding an accessory.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
37
Python
28
b9c8d65940ec47a82332b8b1a67301da018ccadf
test_storage.py
317,256
7
56
test_storage_is_updated_on_add
https://github.com/home-assistant/core.git
Restore accessory state into pairing using new HKC methods (#75276)
64
0
115,831
9
1
2
def disable_run_logger(): with disable_logger("prefect.flow_run"), disable_logger("prefect.task_run"): yield
src/prefect/logging/loggers.py
39
prefect
{ "docstring": "\n Gets both `prefect.flow_run` and `prefect.task_run` and disables them\n within the context manager. Upon exiting the context manager, both loggers\n are returned to its original state.\n ", "language": "en", "n_whitespaces": 38, "n_words": 25, "vocab_size": 21 }
6
Python
6
895a5203623c205ede2ee0c31f99be72822d5351
loggers.py
59,074
3
17
disable_run_logger
https://github.com/PrefectHQ/prefect.git
Add docstring
19
0
11,863
10
1
10
def add_to_apply_calls(self, func, *args, length=None, width=None, **kwargs): return PandasOnDaskDataframePartition( self._data, call_queue=self.call_queue + [[func, args, kwargs]], length=length, width=width, )
modin/core/execution/dask/implementations/pandas_on_dask/partitioning/partition.py
76
modin
{ "docstring": "\n Add a function to the call queue.\n\n Parameters\n ----------\n func : callable\n Function to be added to the call queue.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n length : distributed.Future or int, optional\n Length, or reference to length, of wrapped ``pandas.DataFrame``.\n width : distributed.Future or int, optional\n Width, or reference to width, of wrapped ``pandas.DataFrame``.\n **kwargs : dict\n Additional keyword arguments to be passed in `func`.\n\n Returns\n -------\n PandasOnDaskDataframePartition\n A new ``PandasOnDaskDataframePartition`` object.\n\n Notes\n -----\n The keyword arguments are sent as a dictionary.\n ", "language": "en", "n_whitespaces": 259, "n_words": 87, "vocab_size": 54 }
18
Python
18
39b36eb2a2e3bf3d612933e1c78545a8bb28cde4
partition.py
154,339
7
54
add_to_apply_calls
https://github.com/modin-project/modin.git
PERF-#4794: Compute caches in `_propagate_index_objs` (#4888) Co-authored-by: Mahesh Vashishtha <[email protected]> Signed-off-by: Myachev <[email protected]>
83
0
35,932
11
1
10
def use_numexpr_cb(key) -> None: from pandas.core.computation import expressions expressions.set_use_numexpr(cf.get_option(key)) use_numba_doc =
pandas/core/config_init.py
48
pandas
{ "docstring": "\n: bool\n Use the numba engine option for select operations if it is installed,\n the default is False\n Valid values: False,True\n", "language": "en", "n_whitespaces": 29, "n_words": 21, "vocab_size": 19 }
11
Python
11
9612375ca28ade056f15d4338f1bfde5d045c9fc
config_init.py
167,699
3
26
use_numexpr_cb
https://github.com/pandas-dev/pandas.git
TYP: return values in core/*.py (#47587) * TYP: return values in core/*.py * fix test * to_html * to_html part 2 * DataFrame.query * more overloads * fix query? * increase stacklevel by one * fix rename_axis * and an overload for DataFrame.eval * address comments * fix typevar
16
0
40,082
9
1
11
def test_unavailable_models(self): state = migrations.state.ProjectState() # Unavailable contenttypes.ContentType with self.assertNumQueries(0): create_permissions(self.app_config, verbosity=0, apps=state.apps) # Unavailable auth.Permission state = migrations.state.ProjectState(real_apps={"contenttypes"}) with self.assertNumQueries(0): create_permissions(self.app_config, verbosity=0, apps=state.apps)
tests/auth_tests/test_management.py
130
django
{ "docstring": "\n #24075 - Permissions shouldn't be created or deleted if the ContentType\n or Permission models aren't available.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
24
Python
15
9c19aff7c7561e3a82978a272ecdaad40dda5c00
test_management.py
201,336
7
77
test_unavailable_models
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
95
0
49,920
11
1
3
def info(self): return self.headers
python3.10.4/Lib/http/client.py
19
XX-Net
{ "docstring": "Returns an instance of the class mimetools.Message containing\n meta-information associated with the URL.\n\n When the method is HTTP, these headers are those returned by\n the server at the head of the retrieved HTML page (including\n Content-Length and Content-Type).\n\n When the method is FTP, a Content-Length header will be\n present if (as is now usual) the server passed back a file\n length in response to the FTP retrieval request. A\n Content-Type header will be present if the MIME type can be\n guessed.\n\n When the method is local-file, returned headers will include\n a Date representing the file's last-modified time, a\n Content-Length giving file size, and a Content-Type\n containing a guess at the file's type. See also the\n description of the mimetools module.\n\n ", "language": "en", "n_whitespaces": 225, "n_words": 120, "vocab_size": 74 }
4
Python
4
8198943edd73a363c266633e1aa5b2a9e9c9f526
client.py
217,717
2
10
info
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
18
0
54,898
6
3
23
def get_timestamped_export_dir(export_dir_base): attempts = 0 while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS: timestamp = int(time.time()) result_dir = tf.io.gfile.join( tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(str(timestamp)), ) if not tf.compat.v1.gfile.Exists(result_dir): # Collisions are still possible (though extremely unlikely): this # directory is not actually created yet, but it will be almost # instantly on return from this function. return result_dir time.sleep(1) attempts += 1 logging.warning( "Directory {} already exists; retrying (attempt {}/{})".format( tf.compat.as_str(result_dir), attempts, MAX_DIRECTORY_CREATION_ATTEMPTS, ) ) raise RuntimeError( "Failed to obtain a unique export directory name after " f"{MAX_DIRECTORY_CREATION_ATTEMPTS} attempts." )
keras/saving/utils_v1/export_utils.py
191
keras
{ "docstring": "Builds a path to a new subdirectory within the base directory.\n\n Each export is written into a new subdirectory named using the\n current time. This guarantees monotonically increasing version\n numbers even across multiple runs of the pipeline.\n The timestamp used is the number of seconds since epoch UTC.\n\n Args:\n export_dir_base: A string containing a directory to write the exported\n graph and checkpoints.\n Returns:\n The full path of the new subdirectory (which is not actually created yet).\n\n Raises:\n RuntimeError: if repeated attempts fail to obtain a unique timestamped\n directory name.\n ", "language": "en", "n_whitespaces": 145, "n_words": 89, "vocab_size": 67 }
83
Python
69
84afc5193d38057e2e2badf9c889ea87d80d8fbf
export_utils.py
276,300
23
112
get_timestamped_export_dir
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
301
0
81,622
14
1
7
def compare(self, a, b): a = _convert_other(a, raiseit=True) return a.compare(b, context=self)
python3.10.4/Lib/_pydecimal.py
48
XX-Net
{ "docstring": "Compares values numerically.\n\n If the signs of the operands differ, a value representing each operand\n ('-1' if the operand is less than zero, '0' if the operand is zero or\n negative zero, or '1' if the operand is greater than zero) is used in\n place of that operand for the comparison instead of the actual\n operand.\n\n The comparison is then effected by subtracting the second operand from\n the first and then returning a value according to the result of the\n subtraction: '-1' if the result is less than zero, '0' if the result is\n zero or negative zero, or '1' if the result is greater than zero.\n\n >>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))\n Decimal('-1')\n >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))\n Decimal('0')\n >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))\n Decimal('0')\n >>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))\n Decimal('1')\n >>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))\n Decimal('1')\n >>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))\n Decimal('-1')\n >>> ExtendedContext.compare(1, 2)\n Decimal('-1')\n >>> ExtendedContext.compare(Decimal(1), 2)\n Decimal('-1')\n >>> ExtendedContext.compare(1, Decimal(2))\n Decimal('-1')\n ", "language": "en", "n_whitespaces": 339, "n_words": 143, "vocab_size": 67 }
11
Python
11
8198943edd73a363c266633e1aa5b2a9e9c9f526
_pydecimal.py
219,736
3
31
compare
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
32
0
55,755
9
1
5
async def count_daily_user_type(self) -> Dict[str, int]:
synapse/storage/databases/main/registration.py
23
synapse
{ "docstring": "\n Counts 1) native non guest users\n 2) native guests users\n 3) bridged users\n who registered on the homeserver in the past 24 hours\n ", "language": "en", "n_whitespaces": 73, "n_words": 23, "vocab_size": 19 }
6
Python
6
1783156dbcf4164692e66275d1c29857c434995b
registration.py
248,015
11
27
count_daily_user_type
https://github.com/matrix-org/synapse.git
Add some type hints to datastore (#12423) * Add some type hints to datastore * newsfile * change `Collection` to `List` * refactor return type of `select_users_txn` * correct type hint in `stream.py` * Remove `Optional` in `select_users_txn` * remove not needed return type in `__init__` * Revert change in `get_stream_id_for_event_txn` * Remove import from `Literal`
13
0
72,046
6
3
23
def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]: keys = self.keys if isinstance(keys, list) and len(keys) == 1: warnings.warn( ( "In a future version of pandas, a length 1 " "tuple will be returned when iterating over a " "a groupby with a grouper equal to a list of " "length 1. Don't supply a list with a single grouper " "to avoid this warning." ), FutureWarning, stacklevel=find_stack_level(), ) return self.grouper.get_iterator(self._selected_obj, axis=self.axis) # To track operations that expand dimensions, like ohlc OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame)
pandas/core/groupby/groupby.py
140
pandas
{ "docstring": "\n Groupby iterator.\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n ", "language": "en", "n_whitespaces": 57, "n_words": 14, "vocab_size": 14 }
82
Python
68
14de3fd9ca4178bfce5dd681fa5d0925e057c04d
groupby.py
168,124
23
74
__iter__
https://github.com/pandas-dev/pandas.git
DEPR: returning tuple when grouping by a list containing single element (#47761) * DOC #45443 edited the documentation of where/mask functions * DOC #45443 edited the documentation of where/mask functions * Update generic.py * ENH: add suffixes argument to DataFrame.compare #44354 * Edited the tests * space fixing * Update shared_docs.py * Update series.py * Update series.py * invalid argument tests * issue reference * syntax editing * grammar fixing * edit doc * editting doc * Update 02_read_write.rst * Update 02_read_write.rst * Update v1.5.0.rst * Update v1.5.0.rst * np * 1.5.0 rst * created tests for invalid input * space * space * space * editing test * deprecated * syntax * editting existed examples * syntax * edit past tests * editting pivot * ex * editing internal use * pivot * warning expected * warning * ignore doc warning * doc * tests * ignore warning * test * plotting * test * doc * doc * white space * doc * doc * doc * doc * stacklevel * pivot * pivot * cookbook * flake8 * flake8 * what's new * syntax * itr * car names * test edit * fixing tests * fixing tests * flake8 * rst edit * __iter__ edit * flake8 * flake8 * space * test * merge * ignore the type * mypy * type * self.keys * tests * . * . * adding keys * order * attribute * ignores * Update hist.py * ignore * . * . * . * . * . * Update doc/source/whatsnew/v1.5.0.rst Co-authored-by: Richard Shadrach <[email protected]> Co-authored-by: Richard Shadrach <[email protected]>
285
0
40,213
12
1
17
def _transform_url(url, transform_netloc): # type: (str, Callable[[str], Tuple[Any, ...]]) -> Tuple[str, NetlocTuple] purl = urllib.parse.urlsplit(url) netloc_tuple = transform_netloc(purl.netloc) # stripped url url_pieces = (purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment) surl = urllib.parse.urlunsplit(url_pieces) return surl, cast("NetlocTuple", netloc_tuple)
.venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py
109
transferlearning
{ "docstring": "Transform and replace netloc in a url.\n\n transform_netloc is a function taking the netloc and returning a\n tuple. The first element of this tuple is the new netloc. The\n entire tuple is returned.\n\n Returns a tuple containing the transformed url as item 0 and the\n original tuple returned by transform_netloc as item 1.\n ", "language": "en", "n_whitespaces": 71, "n_words": 53, "vocab_size": 35 }
35
Python
31
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
misc.py
61,234
6
69
_transform_url
https://github.com/jindongwang/transferlearning.git
upd; format
59
0
12,455
9
1
31
def test_update_notice_user_name_when_changed(self) -> None: server_notice_request_content = { "user_id": self.other_user, "content": {"msgtype": "m.text", "body": "test msg one"}, } self.make_request( "POST", self.url, access_token=self.admin_user_tok, content=server_notice_request_content, ) # simulate a change in server config after a server restart. new_display_name = "new display name" self.server_notices_manager._config.servernotices.server_notices_mxid_display_name = ( new_display_name ) self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all() self.make_request( "POST", self.url, access_token=self.admin_user_tok, content=server_notice_request_content, ) invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0) notice_room_id = invited_rooms[0].room_id self.helper.join( room=notice_room_id, user=self.other_user, tok=self.other_user_token ) notice_user_state_in_room = self.helper.get_state( notice_room_id, "m.room.member", self.other_user_token, state_key="@notices:test", ) self.assertEqual(notice_user_state_in_room["displayname"], new_display_name)
tests/rest/admin/test_server_notice.py
282
synapse
{ "docstring": "\n Tests that existing server notices user name in room is updated after\n server notice config changes.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
74
Python
57
2e2d8cc2f9b9af5f8b48d75e22c474e08feca236
test_server_notice.py
247,957
38
175
test_update_notice_user_name_when_changed
https://github.com/matrix-org/synapse.git
Update the server notices user profile in room if changed. (#12115)
383
0
72,024
11
13
39
def assign_wrt_overlaps(self, overlaps, gt_labels=None): num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) # 2. assign negative: below # the negative inds are set to be 0 if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps < self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, tuple): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) & (max_overlaps < self.neg_iou_thr[1])] = 0 # 3. assign positive: above positive IoU threshold pos_inds = max_overlaps >= self.pos_iou_thr assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 if self.match_low_quality: # Low-quality matching will overwrite the assigned_gt_inds assigned # in Step 3. Thus, the assigned gt might not be the best one for # prediction. # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2, # bbox 1 will be assigned as the best target for bbox A in step 3. # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's # assigned_gt_inds will be overwritten to be bbox 2. # This might be the reason that it is not used in ROI Heads. for i in range(num_gts): if gt_max_overlaps[i] >= self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] assigned_gt_inds[max_iou_inds] = i + 1 else: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
mmdet/core/bbox/assigners/max_iou_assigner.py
593
mmdetection
{ "docstring": "Assign w.r.t. the overlaps of bboxes with gts.\n\n Args:\n overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,\n shape(k, n).\n gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ).\n\n Returns:\n :obj:`AssignResult`: The assign result.\n ", "language": "en", "n_whitespaces": 104, "n_words": 35, "vocab_size": 32 }
342
Python
168
9bf37f509ddf6aea1be3a4ad19036f96b9fc3902
max_iou_assigner.py
243,941
50
379
assign_wrt_overlaps
https://github.com/open-mmlab/mmdetection.git
fix typos in comment (#7124) bbox A's assigned_gt_inds will be overwritten to be bbox 2 instead of bbox B (In the previous content, bbox B was not mentioned).
1,286
0
70,154
17
7
7
def _rewrite_warnings(cls, record): if record.levelno == 30 and record.funcName == "warn" and record.module == "ag_logging": # TF 2.3 in Conda is imported with the wrong gast(0.4 when 0.3.3 should be used). This # causes warnings in autograph. They don't appear to impact performance so de-elevate # warning to debug record.levelno = 10 record.levelname = "DEBUG" if record.levelno == 30 and (record.funcName == "_tfmw_add_deprecation_warning" or record.module in ("deprecation", "deprecation_wrapper")): # Keras Deprecations. record.levelno = 10 record.levelname = "DEBUG" return record
lib/logger.py
134
faceswap
{ "docstring": " Change certain warning messages from WARNING to DEBUG to avoid passing non-important\n information to output.\n\n Parameters\n ----------\n record: :class:`logging.LogRecord`\n The log record to check for rewriting\n\n Returns\n -------\n :class:`logging.LogRecord`\n The log rewritten or untouched record\n\n ", "language": "en", "n_whitespaces": 114, "n_words": 35, "vocab_size": 28 }
79
Python
55
afec52309326304f4323029039e49bfcf928ef43
logger.py
100,733
9
74
_rewrite_warnings
https://github.com/deepfakes/faceswap.git
Bugfixes: - Stats graph - Handle NaNs in data - logger - de-elevate matplotlib font messages
231
0
20,188
11
12
21
def _find_alignments(self) -> str: fname = self._args.alignments_file frames = self._args.frames_dir if fname and os.path.isfile(fname) and os.path.splitext(fname)[-1].lower() == ".fsa": return fname if fname: logger.error("Not a valid alignments file: '%s'", fname) sys.exit(1) if not frames or not os.path.exists(frames): logger.error("Not a valid frames folder: '%s'. Can't scan for alignments.", frames) sys.exit(1) fname = "alignments.fsa" if os.path.isdir(frames) and os.path.exists(os.path.join(frames, fname)): return fname if os.path.isdir(frames) or os.path.splitext(frames)[-1] not in _video_extensions: logger.error("Can't find a valid alignments file in location: %s", frames) sys.exit(1) fname = f"{os.path.splitext(frames)[0]}_{fname}" if not os.path.exists(fname): logger.error("Can't find a valid alignments file for video: %s", frames) sys.exit(1) return fname
tools/alignments/alignments.py
360
faceswap
{ "docstring": " If an alignments folder is required and hasn't been provided, scan for a file based on\n the video folder.\n\n Exits if an alignments file cannot be located\n\n Returns\n -------\n str\n The full path to an alignments file\n ", "language": "en", "n_whitespaces": 91, "n_words": 37, "vocab_size": 31 }
95
Python
50
2d312a9db228c025d0bd2ea7a4f747a2c644b5d8
alignments.py
101,635
32
204
_find_alignments
https://github.com/deepfakes/faceswap.git
Minor updates and fixups - Mask Tool - Typing + BiSeNet mask update fix - Alignments Tool - Auto search for alignments file
289
0
21,043
13
21
46
def build(self, input_shape): if self._is_graph_network: super().build(input_shape) return if input_shape is None: raise ValueError( "Input shape must be defined when calling `build()` on " "a `Model` subclass." ) valid_types = (tuple, list, tf.TensorShape, dict) if not isinstance(input_shape, valid_types): raise ValueError( "Specified input shape is not one of the valid types. " "Please specify a batch input shape of type tuple or " "list of input shapes. User provided " "input type: {}.".format(type(input_shape)) ) if input_shape and not self.inputs: # We create placeholders for the `None`s in the shape and build the model # in a Graph. Since tf.Variable is compatible with both eager execution # and graph building, the variables created after building the model in # a Graph are still valid when executing eagerly. if tf.executing_eagerly(): graph = tf.__internal__.FuncGraph("build_graph") else: graph = backend.get_graph() with graph.as_default(): if isinstance(input_shape, list) and all( d is None or isinstance(d, int) for d in input_shape ): input_shape = tuple(input_shape) if isinstance(input_shape, list): x = [ base_layer_utils.generate_placeholders_from_shape(shape) for shape in input_shape ] elif isinstance(input_shape, dict): x = { k: base_layer_utils.generate_placeholders_from_shape( shape ) for k, shape in input_shape.items() } else: x = base_layer_utils.generate_placeholders_from_shape( input_shape ) kwargs = {} call_signature = self._call_spec.full_argspec call_args = call_signature.args # Exclude `self`, `inputs`, and any argument with a default value. if len(call_args) > 2: if call_signature.defaults: call_args = call_args[2 : -len(call_signature.defaults)] else: call_args = call_args[2:] for arg in call_args: if arg == "training": # Case where `training` is a positional arg with no default. kwargs["training"] = False else: # Has invalid call signature with unknown positional arguments. raise ValueError( "Currently, you cannot build your model if it has " "positional or keyword arguments that are not " "inputs to the model, but are required for its " "`call()` method. Instead, in order to instantiate " "and build your model, `call()` your model on real " "tensor data with all expected call arguments. The argument " "for `call()` can be a single list/tuple that contains " "multiple inputs." ) elif len(call_args) < 2: # Signature without `inputs`. raise ValueError( "You can only call `build()` on a model if its `call()` " "method accepts an `inputs` argument." ) try: self.call(x, **kwargs) except (tf.errors.InvalidArgumentError, TypeError) as e: raise ValueError( "You cannot build your model by calling `build` " "if your layers do not support float type inputs. " "Instead, in order to instantiate and build your " "model, call your model on real tensor data (of " "the correct dtype).\n\nThe actual error from " f"`call` is: {e}." ) super().build(input_shape)
keras/engine/training.py
609
keras
{ "docstring": "Builds the model based on input shapes received.\n\n This is to be used for subclassed models, which do not know at instantiation\n time what their inputs look like.\n\n This method only exists for users who want to call `model.build()` in a\n standalone way (as a substitute for calling the model on real data to\n build it). It will never be called by the framework (and thus it will\n never throw unexpected errors in an unrelated workflow).\n\n Args:\n input_shape: Single tuple, `TensorShape` instance, or list/dict of shapes,\n where shapes are tuples, integers, or `TensorShape` instances.\n\n Raises:\n ValueError:\n 1. In case of invalid user-provided data (not of type tuple,\n list, `TensorShape`, or dict).\n 2. If the model requires call arguments that are agnostic\n to the input shapes (positional or keyword arg in call signature).\n 3. If not all layers were properly built.\n 4. If float type inputs are not supported within the layers.\n\n In each of these cases, the user should build their model by calling it\n on real tensor data.\n ", "language": "en", "n_whitespaces": 349, "n_words": 169, "vocab_size": 117 }
414
Python
227
84afc5193d38057e2e2badf9c889ea87d80d8fbf
training.py
271,605
82
345
build
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
2,068
0
80,825
20
1
7
def check(self) -> bool: modified = self._get_modified() changed = modified != self._modified self._modified = modified return changed
src/textual/file_monitor.py
50
textual
{ "docstring": "Check the monitored file. Return True if it was changed.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
17
Python
11
7df1c123e9fbc8641052a30ba74282f9d9ec1870
file_monitor.py
184,610
6
29
check
https://github.com/Textualize/textual.git
docstrings
52
0
44,712
8
1
3
def _new_training(self): self.should_training_stop = False
paddlenlp/trainer/trainer_callback.py
21
PaddleNLP
{ "docstring": "Internal method that resets the variable for a new training.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
5
Python
5
44a290e94d1becd1f09fddc3d873f9e19c9d6919
trainer_callback.py
323,160
2
11
_new_training
https://github.com/PaddlePaddle/PaddleNLP.git
[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761) * add some datasets for finetune. * support fine tune for all tastks. * add trainer prototype. * init verison for paddlenlp trainer. * refine trainer. * update for some details. * support multi-cards training evaluation. * support load from ckpt. * support for export inference model. * first version of trainer. * seq cls support clue. * trainer support for token classification and question answersing tasks. * fix as reviews. Co-authored-by: Zeyu Chen <[email protected]>
19
0
118,391
7
1
4
def set_state(self, state): raise NotImplementedError()
mitmproxy/coretypes/serializable.py
22
mitmproxy
{ "docstring": "\n Set object state to the given state. Consumes the passed state.\n May return a `dataclasses.FrozenInstanceError` if the object is immutable.\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 16 }
5
Python
5
07a40208a32cb2d48a1f2a24d2569894b5a378a0
serializable.py
253,285
2
12
set_state
https://github.com/mitmproxy/mitmproxy.git
`rm -rf stateobject`
19
0
74,053
7
1
5
def get_next_event_id(self, event, snuba_filter): raise NotImplementedError
src/sentry/eventstore/base.py
20
sentry
{ "docstring": "\n Gets the next event given a current event and some conditions/filters.\n Returns a tuple of (project_id, event_id)\n\n Arguments:\n event (Event): Event object\n snuba_filter (Filter): Filter\n ", "language": "en", "n_whitespaces": 68, "n_words": 25, "vocab_size": 22 }
6
Python
6
94c896a4a3663abbd31775957f1aa5448fde5491
base.py
98,544
2
12
get_next_event_id
https://github.com/getsentry/sentry.git
ref: clean up sentry flake8 plugin (#33847) * fix: Remove unused `# noqa` lint disable comments * ref: clean up sentry flake8 plugin - remove S005: pyupgrade handles this for us - remove `pycodestyle` handling: flake8 does this natively - clean up the ignore list and use extend-ignore
20
0
19,582
6
7
26
def center(self, frequency=1000): equal_energy_fr = self.__class__(name='equal_energy', frequency=self.frequency.copy(), raw=self.raw.copy()) equal_energy_fr.interpolate() interpolator = InterpolatedUnivariateSpline(np.log10(equal_energy_fr.frequency), equal_energy_fr.raw, k=1) if type(frequency) in [list, np.ndarray] and len(frequency) > 1: # Use the average of the gain values between the given frequencies as the difference to be subtracted diff = np.mean(equal_energy_fr.raw[np.logical_and( equal_energy_fr.frequency >= frequency[0], equal_energy_fr.frequency <= frequency[1] )]) else: if type(frequency) in [list, np.ndarray]: # List or array with only one element frequency = frequency[0] # Use the gain value at the given frequency as the difference to be subtracted diff = interpolator(np.log10(frequency)) self.raw -= diff if len(self.smoothed): self.smoothed -= diff if len(self.error): self.error += diff if len(self.error_smoothed): self.error_smoothed += diff # Everything but raw, smoothed, errors and target is affected by centering, reset them self.reset(raw=False, smoothed=False, error=False, error_smoothed=False, target=False) return -diff
research/neo_peq/legacy_frequency_response.py
353
AutoEq
{ "docstring": "Removed bias from frequency response.\n\n Args:\n frequency: Frequency which is set to 0 dB. If this is a list with two values then an average between the two\n frequencies is set to 0 dB.\n\n Returns:\n Gain shifted\n ", "language": "en", "n_whitespaces": 102, "n_words": 37, "vocab_size": 30 }
125
Python
87
9120cdffe618c6c2ff16fe6a311b6a1367efdbc8
legacy_frequency_response.py
162,744
22
225
center
https://github.com/jaakkopasanen/AutoEq.git
Added PEQ configs to CLI and function interfaces. Improved default value handling for PEQ parameters and added more predefined configs. Removed legacy PEQ optimization. Fixed readme write. Improved shelf filter initialization. Added plot method to PEQ. Notebook for comparing old and new optimizers. Bug fixes.
375
0
39,282
15
3
6
def additional_resources_per_worker(self): return { k: v for k, v in self._resources_per_worker_not_none.items() if k not in ["CPU", "GPU"] }
python/ray/air/config.py
56
ray
{ "docstring": "Resources per worker, not including CPU or GPU resources.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
18
Python
16
b3878e26d765e28dd7c69abadbd856181037db97
config.py
124,636
6
33
additional_resources_per_worker
https://github.com/ray-project/ray.git
[AIR] Fix `ResourceChangingScheduler` not working with AIR (#26307) This PR ensures that the new trial resources set by `ResourceChangingScheduler` are respected by the train loop logic by modifying the scaling config to match. Previously, even though trials had their resources updated, the scaling config was not modified which lead to eg. new workers not being spawned in the `DataParallelTrainer` even though resources were available. In order to accomplish this, `ScalingConfigDataClass` is updated to allow equality comparisons with other `ScalingConfigDataClass`es (using the underlying PGF) and to create a `ScalingConfigDataClass` from a PGF. Please note that this is an internal only change intended to actually make `ResourceChangingScheduler` work. In the future, `ResourceChangingScheduler` should be updated to operate on `ScalingConfigDataClass`es instead of PGFs as it is now. That will require a deprecation cycle.
72
0
27,641
10
3
12
def match(self, image): if self.lut is None: msg = "No operator loaded" raise Exception(msg) if image.mode != "L": msg = "Image mode must be L" raise ValueError(msg) return _imagingmorph.match(bytes(self.lut), image.im.id)
src/PIL/ImageMorph.py
96
Pillow
{ "docstring": "Get a list of coordinates matching the morphological operation on\n an image.\n\n Returns a list of tuples of (x,y) coordinates\n of all matching pixels. See :ref:`coordinate-system`.", "language": "en", "n_whitespaces": 46, "n_words": 26, "vocab_size": 19 }
30
Python
26
2ae55ccbdad9c842929fb238ea1eb81d1f999024
ImageMorph.py
243,769
8
56
match
https://github.com/python-pillow/Pillow.git
Improve exception traceback readability
102
0
70,119
10
4
10
def get_files(d, pattern, sort=True): files = glob(osp.join(d, pattern)) files = [f for f in files if osp.isfile(f)] if sort:
ludwig/utils/checkpoint_utils.py
69
ludwig
{ "docstring": "Return a list of files in a given directory.\n\n Args:\n d (str): The path to the directory.\n pattern (str): The wildcard to filter files with.\n sort (bool): Whether to sort the returned list. Assumes filenames contain a number value to sort by (tmp-001).\n ", "language": "en", "n_whitespaces": 64, "n_words": 43, "vocab_size": 31 }
19
Python
15
cbff12a584ac253b6953551fecd8a66afc320de7
checkpoint_utils.py
6,465
7
80
get_files
https://github.com/ludwig-ai/ludwig.git
Fixes FileExistsError thrown after training on windows completes (#1845) * Catch exception when os.rename throws when renaming checkpoint. * Filter out -tmp prefix (or any other) when sorting files in get_files. * Use os.replace instead of os.rename, this works on windows * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add comment to sort. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Daniel Treiman <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
31
0
1,000
10
1
4
def free_symbols(self, reference_frame): return self.to_matrix(reference_frame).free_symbols
sympy/physics/vector/vector.py
29
sympy
{ "docstring": "Returns the free symbols in the measure numbers of the vector\n expressed in the given reference frame.\n\n Parameters\n ==========\n reference_frame : ReferenceFrame\n The frame with respect to which the free symbols of the given\n vector is to be determined.\n\n Returns\n =======\n set of Symbol\n set of symbols present in the measure numbers of\n ``reference_frame``.\n\n See Also\n ========\n\n - :meth:`~sympy.core.basic.Basic.free_symbols`\n\n ", "language": "en", "n_whitespaces": 180, "n_words": 59, "vocab_size": 37 }
5
Python
5
c03c0eb2136e693b8431c19dd3294d832b4a394c
vector.py
197,247
2
17
free_symbols
https://github.com/sympy/sympy.git
Add .free_dynamicsymbols to physics vectors.
19
0
48,408
8
10
15
def should_strip_auth(self, old_url, new_url): old_parsed = urlparse(old_url) new_parsed = urlparse(new_url) if old_parsed.hostname != new_parsed.hostname: return True # Special case: allow http -> https redirect when using the standard # ports. This isn't specified by RFC 7235, but is kept to avoid # breaking backwards compatibility with older versions of requests # that allowed any redirects on the same host. if ( old_parsed.scheme == "http" and old_parsed.port in (80, None) and new_parsed.scheme == "https" and new_parsed.port in (443, None) ): return False # Handle default port usage corresponding to scheme. changed_port = old_parsed.port != new_parsed.port changed_scheme = old_parsed.scheme != new_parsed.scheme default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) if ( not changed_scheme and old_parsed.port in default_port and new_parsed.port in default_port ): return False # Standard case: root URI must match return changed_port or changed_scheme
pipenv/patched/pip/_vendor/requests/sessions.py
206
pipenv
{ "docstring": "Decide whether Authorization header should be removed when redirecting", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
130
Python
87
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
sessions.py
22,123
22
128
should_strip_auth
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
366
0
4,199
11
2
8
def units(self): max_position = self.u_height + decimal.Decimal(0.5) if self.desc_units: drange(0.5, max_position, 0.5) return drange(max_position, 0.5, -0.5)
netbox/dcim/models/racks.py
65
netbox
{ "docstring": "\n Return a list of unit numbers, top to bottom.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
16
Python
16
84f056171286d18c1c14a2fc9d28155a7dcf169a
racks.py
265,088
5
51
units
https://github.com/netbox-community/netbox.git
Initial work on half-height RUs
55
0
77,978
9
2
41
def test_visualization_compare_classifiers_from_pred_csv_output_saved(csv_filename): input_features = [category_feature(vocab_size=10)] output_features = [category_feature(vocab_size=2, reduce_input="sum")] # Generate test data rel_path = generate_data(input_features, output_features, csv_filename) exp_dir_name = run_experiment_with_visualization(input_features, output_features, dataset=rel_path) vis_output_pattern_pdf = os.path.join(exp_dir_name, "*.pdf") vis_output_pattern_png = os.path.join(exp_dir_name, "*.png") output_feature_name = get_output_feature_name(exp_dir_name) prediction = os.path.join(exp_dir_name, PREDICTIONS_PARQUET_FILE_NAME) experiment_source_data_name = csv_filename.split(".")[0] ground_truth = experiment_source_data_name + ".csv" split_file = experiment_source_data_name + ".split.csv" ground_truth_metadata = experiment_source_data_name + ".meta.json" test_cmd_pdf = [ "python", "-m", "ludwig.visualize", "--visualization", "compare_classifiers_performance_from_pred", "--ground_truth_metadata", ground_truth_metadata, "--ground_truth", ground_truth, "--output_feature_name", output_feature_name, "--split_file", split_file, "--predictions", prediction, prediction, "--model_names", "Model1", "Model2", "-od", exp_dir_name, ] test_cmd_png = test_cmd_pdf.copy() + ["-ff", "png"] commands = [test_cmd_pdf, test_cmd_png] vis_patterns = [vis_output_pattern_pdf, vis_output_pattern_png] for command, viz_pattern in zip(commands, vis_patterns): result = subprocess.run(command) figure_cnt = glob.glob(viz_pattern) assert 0 == result.returncode assert 1 == len(figure_cnt)
tests/integration_tests/test_visualization.py
385
ludwig
{ "docstring": "Ensure pdf and png figures from the experiments can be saved.\n\n Predictions are loaded from csv file.\n :param csv_filename: csv fixture from tests.fixtures.filenames.csv_filename\n :return: None\n ", "language": "en", "n_whitespaces": 37, "n_words": 25, "vocab_size": 22 }
115
Python
86
4fb8f63181f5153b4f6778c6ef8dad61022c4f3f
test_visualization.py
5,869
44
234
test_visualization_compare_classifiers_from_pred_csv_output_saved
https://github.com/ludwig-ai/ludwig.git
Use tempfile to automatically garbage collect data and modeling artifacts in ludwig integration tests. (#1642) * Use tmpdir to automatically garbage collect data and modeling artifacts in ludwig integration tests.
350
0
864
11
6
36
def real_root(arg, n=None, evaluate=None): r from sympy.functions.elementary.complexes import Abs, im, sign from sympy.functions.elementary.piecewise import Piecewise if n is not None: return Piecewise( (root(arg, n, evaluate=evaluate), Or(Eq(n, S.One), Eq(n, S.NegativeOne))), (Mul(sign(arg), root(Abs(arg), n, evaluate=evaluate), evaluate=evaluate), And(Eq(im(arg), S.Zero), Eq(Mod(n, 2), S.One))), (root(arg, n, evaluate=evaluate), True)) rv = sympify(arg) n1pow = Transform(lambda x: -(-x.base)**x.exp, lambda x: x.is_Pow and x.base.is_negative and x.exp.is_Rational and x.exp.p == 1 and x.exp.q % 2) return rv.xreplace(n1pow) ############################################################################### ############################# MINIMUM and MAXIMUM ############################# ###############################################################################
sympy/functions/elementary/miscellaneous.py
322
sympy
{ "docstring": "Return the real *n*'th-root of *arg* if possible.\n\n Parameters\n ==========\n\n n : int or None, optional\n If *n* is ``None``, then all instances of\n $(-n)^{1/\\text{odd}}$ will be changed to $-n^{1/\\text{odd}}$.\n This will only create a real root of a principal root.\n The presence of other factors may cause the result to not be\n real.\n\n evaluate : bool, optional\n The parameter determines if the expression should be evaluated.\n If ``None``, its value is taken from\n ``global_parameters.evaluate``.\n\n Examples\n ========\n\n >>> from sympy import root, real_root\n\n >>> real_root(-8, 3)\n -2\n >>> root(-8, 3)\n 2*(-1)**(1/3)\n >>> real_root(_)\n -2\n\n If one creates a non-principal root and applies real_root, the\n result will not be real (so use with caution):\n\n >>> root(-8, 3, 2)\n -2*(-1)**(2/3)\n >>> real_root(_)\n -2*(-1)**(2/3)\n\n See Also\n ========\n\n sympy.polys.rootoftools.rootof\n sympy.core.power.integer_nthroot\n root, sqrt\n ", "language": "en", "n_whitespaces": 259, "n_words": 128, "vocab_size": 88 }
75
Python
58
cda8dfe6f45dc5ed394c2f5cda706cd6c729f713
miscellaneous.py
195,861
61
221
real_root
https://github.com/sympy/sympy.git
Improved documentation formatting
248
0
47,448
16
2
12
def get_dashboard_url(): if ray_constants.RAY_OVERRIDE_DASHBOARD_URL in os.environ: return _remove_protocol_from_url( os.environ.get(ray_constants.RAY_OVERRIDE_DASHBOARD_URL) ) else: worker = global_worker worker.check_connected() return _global_node.webui_url
python/ray/_private/worker.py
72
ray
{ "docstring": "Get the URL to access the Ray dashboard.\n\n Note that the URL does not specify which node the dashboard is on.\n\n Returns:\n The URL of the dashboard as a string.\n ", "language": "en", "n_whitespaces": 46, "n_words": 30, "vocab_size": 23 }
17
Python
16
4692e8d8023e789120d3f22b41ffb136b50f70ea
worker.py
127,153
9
42
get_dashboard_url
https://github.com/ray-project/ray.git
[core] Don't override external dashboard URL in internal KV store (#27901) Fix 2.0.0 release blocker bug where Ray State API and Jobs not accessible if the override URL doesn't support adding additional subpaths. This PR keeps the localhost dashboard URL in the internal KV store and only overrides in values printed or returned to the user. images.githubusercontent.com/6900234/184809934-8d150874-90fe-4b45-a13d-bce1807047de.png">
72
0
28,373
12
1
26
def test_install_non_rpm_using_dnf_gen_error(): info_fake_error = dnf_call = MagicMock( return_value={"retcode": 1, "stdout": "", "stderr": info_fake_error} ) list_pkgs_mock = MagicMock(side_effect=[{"info": "6.6-2"}, {"info": "6.6-2"}]) with patch("pathlib.Path.is_file", return_value=True): with patch.dict( aixpkg.__salt__, {"cmd.run_all": dnf_call, "config.get": MagicMock(return_value=False)}, ), patch.object(aixpkg, "list_pkgs", list_pkgs_mock): expected = { "changes": {}, "errors": [info_fake_error], } with pytest.raises(CommandExecutionError) as exc_info: aixpkg.install("info_fake.rpm") assert exc_info.value.info == expected, exc_info.value.info assert dnf_call.call_count == 1 libpath_env = {"LIBPATH": "/opt/freeware/lib:/usr/lib"} dnf_call.assert_any_call( "/opt/freeware/bin/dnf install --allowerasing --assumeyes info_fake.rpm", env=libpath_env, ignore_retcode=True, python_shell=False, )
tests/pytests/unit/modules/test_aixpkg.py
303
salt
{ "docstring": "\n Test install of non rpm using dnf which should generate an error\n Last metadata expiration check: 1 day, 23:40:22 ago on Mon Dec 6 19:26:36 EST 2021.\nNo match for argument: info_fake\nError: Unable to find a match: info_fake\n", "language": "en", "n_whitespaces": 44, "n_words": 39, "vocab_size": 38 }
70
Python
60
f1c37893caf90738288e789c3233ab934630254f
test_aixpkg.py
215,096
29
172
test_install_non_rpm_using_dnf_gen_error
https://github.com/saltstack/salt.git
Working tests for install
326
0
53,813
16
3
13
def get_install_candidate(self, link_evaluator, link): # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate] is_candidate, result = link_evaluator.evaluate_link(link) if not is_candidate: if result: self._log_skipped_link(link, reason=result) return None return InstallationCandidate( name=link_evaluator.project_name, link=link, version=result, )
.venv/lib/python3.8/site-packages/pip/_internal/index/package_finder.py
89
transferlearning
{ "docstring": "\n If the link is a candidate for install, convert it to an\n InstallationCandidate and return it. Otherwise, return None.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 18 }
29
Python
27
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
package_finder.py
60,741
11
57
get_install_candidate
https://github.com/jindongwang/transferlearning.git
upd; format
141
0
12,270
12