id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
247,606
37
12
16
207
14
0
47
186
test_remove_alias
Add type hints to some tests/handlers files. (#12224)
https://github.com/matrix-org/synapse.git
def test_remove_alias(self) -> None: # Set this new alias as the canonical alias for this room self._set_canonical_alias( {"alias": self.test_alias, "alt_aliases": [self.test_alias]} ) data = self._get_canonical_alias() self.assertEqual(data["content"]["alias"], self.test_alias) self.assertEqual(data["content"]["alt_aliases"], [self.test_alias]) # Finally, delete the alias. self.get_success( self.handler.delete_association( create_requester(self.admin_user), self.room_alias ) ) data = self._get_canonical_alias() self.assertNotIn("alias", data["content"]) self.assertNotIn("alt_aliases", data["content"])
120
test_directory.py
Python
tests/handlers/test_directory.py
5dd949bee6158a8b651db9f2ae417a62c8184bfd
synapse
1
261,784
58
12
31
339
36
1
89
233
test_forest_regressor_oob
ENH Add option to pass custom scorer for RandomForest OOB calculation (#25177) Co-authored-by: simonandras <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_forest_regressor_oob(ForestRegressor, X, y, X_type, lower_bound_r2, oob_score): X = _convert_container(X, constructor_name=X_type) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.5, random_state=0, ) regressor = ForestRegressor( n_estimators=50, bootstrap=True, oob_score=oob_score, random_state=0, ) assert not hasattr(regressor, "oob_score_") assert not hasattr(regressor, "oob_prediction_") regressor.fit(X_train, y_train) if callable(oob_score): test_score = oob_score(y_test, regressor.predict(X_test)) else: test_score = regressor.score(X_test, y_test) assert regressor.oob_score_ >= lower_bound_r2 assert abs(test_score - regressor.oob_score_) <= 0.1 assert hasattr(regressor, "oob_score_") assert hasattr(regressor, "oob_prediction_") assert not hasattr(regressor, "oob_decision_function_") if y.ndim == 1: expected_shape = (X_train.shape[0],) else: expected_shape = (X_train.shape[0], y.ndim) assert regressor.oob_prediction_.shape == expected_shape @pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values())
@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values())
210
test_forest.py
Python
sklearn/ensemble/tests/test_forest.py
0b202caf569b6ca0e008d456c99362f7b615b510
scikit-learn
3
194,585
38
16
29
293
22
0
77
332
add_dep_paths
Replace deprecated logging.warn with logging.warning (#7906)
https://github.com/kivy/kivy.git
def add_dep_paths(): paths = [] if old_deps is not None: for importer, modname, ispkg in pkgutil.iter_modules( old_deps.__path__): if not ispkg: continue try: mod = importer.find_module(modname).load_module(modname) except ImportError as e: logging.warning(f"deps: Error importing dependency: {e}") continue if hasattr(mod, 'dep_bins'): paths.extend(mod.dep_bins) sys.path.extend(paths) if kivy_deps is None: return paths = [] for importer, modname, ispkg in pkgutil.iter_modules(kivy_deps.__path__): if not ispkg: continue try: mod = importer.find_module(modname).load_module(modname) except ImportError as e: logging.warning(f"deps: Error importing dependency: {e}") continue if hasattr(mod, 'dep_bins'): paths.extend(mod.dep_bins) sys.path.extend(paths)
171
__init__.py
Python
kivy/tools/packaging/pyinstaller_hooks/__init__.py
e6c144b5423dada62fd13034c2d40bf48a2bc423
kivy
11
289,434
73
13
58
814
35
0
265
482
test_logbook_view_period_entity
Ensure recorder test fixture is setup before hass fixture (#80528) * Ensure recorder test fixture is setup before hass fixture * Adjust more tests
https://github.com/home-assistant/core.git
async def test_logbook_view_period_entity(recorder_mock, hass, hass_client, set_utc): await async_setup_component(hass, "logbook", {}) await async_recorder_block_till_done(hass) entity_id_test = "switch.test" hass.states.async_set(entity_id_test, STATE_OFF) hass.states.async_set(entity_id_test, STATE_ON) entity_id_second = "switch.second" hass.states.async_set(entity_id_second, STATE_OFF) hass.states.async_set(entity_id_second, STATE_ON) await async_wait_recording_done(hass) client = await hass_client() # Today time 00:00:00 start = dt_util.utcnow().date() start_date = datetime(start.year, start.month, start.day) # Test today entries without filters response = await client.get(f"/api/logbook/{start_date.isoformat()}") assert response.status == HTTPStatus.OK response_json = await response.json() assert len(response_json) == 2 assert response_json[0]["entity_id"] == entity_id_test assert response_json[1]["entity_id"] == entity_id_second # Test today entries with filter by period response = await client.get(f"/api/logbook/{start_date.isoformat()}?period=1") assert response.status == HTTPStatus.OK response_json = await response.json() assert len(response_json) == 2 assert response_json[0]["entity_id"] == entity_id_test assert response_json[1]["entity_id"] == entity_id_second # Test today entries with filter by entity_id response = await client.get( f"/api/logbook/{start_date.isoformat()}?entity=switch.test" ) assert response.status == HTTPStatus.OK response_json = await response.json() assert len(response_json) == 1 assert response_json[0]["entity_id"] == entity_id_test # Test entries for 3 days with filter by entity_id response = await client.get( f"/api/logbook/{start_date.isoformat()}?period=3&entity=switch.test" ) assert response.status == HTTPStatus.OK response_json = await response.json() assert len(response_json) == 1 assert response_json[0]["entity_id"] == entity_id_test # Tomorrow time 00:00:00 start = (dt_util.utcnow() + timedelta(days=1)).date() start_date = datetime(start.year, start.month, start.day) # Test tomorrow entries without filters response = await client.get(f"/api/logbook/{start_date.isoformat()}") assert response.status == HTTPStatus.OK response_json = await response.json() assert len(response_json) == 0 # Test tomorrow entries with filter by entity_id response = await client.get( f"/api/logbook/{start_date.isoformat()}?entity=switch.test" ) assert response.status == HTTPStatus.OK response_json = await response.json() assert len(response_json) == 0 # Test entries from tomorrow to 3 days ago with filter by entity_id response = await client.get( f"/api/logbook/{start_date.isoformat()}?period=3&entity=switch.test" ) assert response.status == HTTPStatus.OK response_json = await response.json() assert len(response_json) == 1 assert response_json[0]["entity_id"] == entity_id_test
448
test_init.py
Python
tests/components/logbook/test_init.py
31a787558fd312331b55e5c2c4b33341fc3601fc
core
1
70,612
16
12
7
73
10
0
17
74
get_task_type_filter_choices
add another helper method get_task_type_filter_choices
https://github.com/wagtail/wagtail.git
def get_task_type_filter_choices(self): task_type_choices = [ (model, model.get_verbose_name()) for model in self.task_models ] task_type_choices.sort(key=lambda task_type: task_type[1].lower()) return task_type_choices
45
workflows.py
Python
wagtail/admin/views/workflows.py
284380672da91b5f1d5ea35e01b5b86d7a534dbb
wagtail
2
312,840
11
9
4
51
8
0
11
32
poll_state
Refactor Sonos polling (#65722) * Refactor Sonos polling Explicitly rename fallback polling Catch soco exceptions centrally where possible Create SonosPollingEntity subclass Remove unnecessary soco_error fixture argument Remove unnecessary polling in update_volume() Adjust log levels and wording Set explicit timeout on library * Adjust logging to use raised exceptions * Simplify availabiliity checks when using built-in poller * Fix typing for return values
https://github.com/home-assistant/core.git
def poll_state(self) -> None: state = getattr(self.soco, self.feature_type) setattr(self.speaker, self.feature_type, state)
32
switch.py
Python
homeassistant/components/sonos/switch.py
a7fd477c641eda40bda767a8f395ce42e4abf9a6
core
1
197,182
61
19
20
261
21
0
86
407
diff
Do not re-express differentiated vectors back into the original frame The prior behavior of sympy.physics.vector.vector.Vector.diff() creates more complex equations that it should in some scenarios. Take this example: import sympy as sm import sympy.physics.mechanics as me a, b, c, d, e, f = sm.symbols('a, b, c, d, e, f') alpha, beta = sm.symbols('alpha, beta') A = me.ReferenceFrame('A') B = me.ReferenceFrame('B') C = me.ReferenceFrame('C') B.orient_axis(A, alpha, A.x) C.orient_axis(B, beta, B.y) v = a*A.x + b*A.y + c*B.x + d*B.y + e*C.x + f*C.y v.diff(alpha, A) Prior to this commit, the prior result would be: (d*sin(alpha)**2 + d*cos(alpha)**2)*B.z + (-(e*sin(alpha)*sin(beta) + f*cos(alpha))*sin(beta)*cos(alpha) + (e*sin(beta)*cos(alpha) - f*sin(alpha))*sin(alpha)*sin(beta))*C.x + ((e*sin(alpha)*sin(beta) + f*cos(alpha))*sin(alpha) + (e*sin(beta)*cos(alpha) - f*sin(alpha))*cos(alpha))*C.y + ((e*sin(alpha)*sin(beta) + f*cos(alpha))*cos(alpha)*cos(beta) - (e*sin(beta)*cos(alpha) - f*sin(alpha))*sin(alpha)*cos(beta))*C.z and the result in this commit is: (-d*sin(alpha) + e*sin(beta)*cos(alpha) - f*sin(alpha))*A.y + (d*cos(alpha) + e*sin(alpha)*sin(beta) + f*cos(alpha))*A.z These are mathematically the equivalent results. The prior implementation re-expresses each component back in the original reference frames the vector is expressed in. If the user wants this they can express the vector in whatever reference frame they want after the differentiation step to control the complexity and reference frame expression. This change will result in simpler expressions by default.
https://github.com/sympy/sympy.git
def diff(self, var, frame, var_in_dcm=True): from sympy.physics.vector.frame import _check_frame var = sympify(var) _check_frame(frame) inlist = [] for vector_component in self.args: measure_number = vector_component[0] component_frame = vector_component[1] if component_frame == frame: inlist += [(measure_number.diff(var), frame)] else: # If the direction cosine matrix relating the component frame # with the derivative frame does not contain the variable. if not var_in_dcm or (frame.dcm(component_frame).diff(var) == zeros(3, 3)): inlist += [(measure_number.diff(var), component_frame)] else: # else express in the frame reexp_vec_comp = Vector([vector_component]).express(frame) deriv = reexp_vec_comp.args[0][0].diff(var) inlist += Vector([(deriv, frame)]).args return Vector(inlist)
167
vector.py
Python
sympy/physics/vector/vector.py
0bcbb67c2144906be61ba35c9a545d49b4cabf9a
sympy
5
104,578
23
12
11
86
8
0
23
125
flatten
Fix flatten of complex feature types (#3723) * Flatten Translation and TranslationVariableLanguages * Add tests * Style * Flatten for decodable features * Fix flatten for non-dict types * Add test * Descriptive message in flatten for Audio feature * Small refactor * Add flatten to features * Update table_flatten * Revert changes in Dataset.flatten_/flatten * Apply Quentin's suggestions from code review Co-authored-by: Quentin Lhoest <[email protected]> * Improve table_flatten docstring * Fix tests * Add nested test * Minor fix * Remove comment Co-authored-by: Quentin Lhoest <[email protected]>
https://github.com/huggingface/datasets.git
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return ( self if self.decode else { "bytes": Value("binary"), "path": Value("string"), } )
48
image.py
Python
src/datasets/features/image.py
3804442bb7cfcb9d52044d92688115cfdc69c2da
datasets
2
128,075
69
16
31
261
23
0
99
590
test_is_and_wis_math
[RLlib] Fix ope speed (#28834) * 1. Introduced new abstraction: OfflineEvaluator that is the parent of OPE and feature importance 2. introduced estimate_multi_step vs. estimate_single_step Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * algorithm ope evaluation is now able to skip split_by_episode Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * lint Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * lint Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * fixed some unittests Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * wip Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * wip Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * fixed dm and dr variance issues Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * lint Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * cleaned up the inheritance Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * lint Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * lint Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * fixed test Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * nit Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * fixed nits Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * fixed the typos Signed-off-by: Kourosh Hakhamaneshi <[email protected]> Signed-off-by: Kourosh Hakhamaneshi <[email protected]>
https://github.com/ray-project/ray.git
def test_is_and_wis_math(self): ope_classes = [ ImportanceSampling, WeightedImportanceSampling, ] for class_module in ope_classes: for policy_tag in ["good", "bad"]: target_policy = self.policies[policy_tag] estimator = class_module(target_policy, gamma=0) s = time.time() estimate_1 = estimator.estimate( self.sample_batch, split_batch_by_episode=True, ) dt1 = time.time() - s s = time.time() estimate_2 = estimator.estimate( self.sample_batch, split_batch_by_episode=False ) dt2 = time.time() - s if policy_tag == "good": # check if the v_gain is larger than 1 self.assertGreater(estimate_1["v_gain"], 1) else: self.assertLess(estimate_1["v_gain"], 1) # check that the estimates are the same for bandit vs RL check(estimate_1, estimate_2) self.assertGreater( dt1, dt2, f"in bandits split_by_episode = False should improve " f"performance, dt_wo_split={dt2}, dt_with_split={dt1}", )
156
test_ope_math.py
Python
rllib/offline/estimators/tests/test_ope_math.py
e6c995d23e5d0b831557a4267a181a095bd68b58
ray
4
322,908
70
14
18
329
33
0
104
337
forward
Add NLP model interpretation (#1752) * upload NLP interpretation * fix problems and relocate project * remove abandoned picture * remove abandoned picture * fix dead link in README * fix dead link in README * fix code style problems * fix CR round 1 * remove .gitkeep files * fix code style * fix file encoding problem * fix code style * delete duplicated files due to directory rebuild * fix CR round 2 * fix code style * fix ernie tokenizer * fix code style * fix problem from CR round 1 * fix bugs * fix README * remove duplicated files * deal with diff of old and new tokenizer results * fix CR round 4 * fix code style * add missing dependence * fix broken import path * move some data file to cloud * MRC upper case to lower case Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: binlinquge <xxx> Co-authored-by: Guo Sheng <[email protected]>
https://github.com/PaddlePaddle/PaddleNLP.git
def forward(self, input, mask=None): forward_input, backward_input = paddle.chunk(input, chunks=2, axis=2) # elementwise-sum forward_x and backward_x # Shape: (batch_size, max_seq_len, hidden_size) h = paddle.add_n([forward_input, backward_input]) # Shape: (batch_size, hidden_size, 1) att_weight = self.att_weight.tile( repeat_times=(paddle.shape(h)[0], 1, 1)) # Shape: (batch_size, max_seq_len, 1) att_score = paddle.bmm(paddle.tanh(h), att_weight) if mask is not None: # mask, remove the effect of 'PAD' mask = paddle.cast(mask, dtype='float32') mask = mask.unsqueeze(axis=-1) inf_tensor = paddle.full( shape=mask.shape, dtype='float32', fill_value=-INF) att_score = paddle.multiply(att_score, mask) + paddle.multiply( inf_tensor, (1 - mask)) # Shape: (batch_size, max_seq_len, 1) att_weight = F.softmax(att_score, axis=1) # Shape: (batch_size, lstm_hidden_size) reps = paddle.bmm(h.transpose(perm=(0, 2, 1)), att_weight).squeeze(axis=-1) reps = paddle.tanh(reps) return reps, att_weight
211
model.py
Python
examples/model_interpretation/task/senti/rnn/model.py
93cae49c0c572b5c1ac972759140fbe924b0374d
PaddleNLP
2
132,292
6
8
5
25
4
0
6
19
SigOptSearch
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def SigOptSearch(*args, **kwargs): raise DeprecationWarning( )
14
__init__.py
Python
python/ray/tune/suggest/__init__.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
258,618
37
11
11
201
22
0
54
87
make_s_curve
ENH Replaced RandomState with Generator compatible calls (#22271)
https://github.com/scikit-learn/scikit-learn.git
def make_s_curve(n_samples=100, *, noise=0.0, random_state=None): generator = check_random_state(random_state) t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5) x = np.sin(t) y = 2.0 * generator.rand(1, n_samples) z = np.sign(t) * (np.cos(t) - 1) X = np.concatenate((x, y, z)) X += noise * generator.standard_normal(size=(3, n_samples)) X = X.T t = np.squeeze(t) return X, t
136
_samples_generator.py
Python
sklearn/datasets/_samples_generator.py
254ea8c453cd2100ade07644648f1f00392611a6
scikit-learn
1
263,990
94
11
56
240
28
0
148
261
django_dottedstring_imports
hookutils: django: port to the PyInstaller.isolated framework Port the `django_dottedstring_imports` helper to the new isolated framework in lieu of using old `exec_script` function with a separate script file.
https://github.com/pyinstaller/pyinstaller.git
def django_dottedstring_imports(django_root_dir): import sys import os import PyInstaller.utils.misc from PyInstaller.utils import hooks as hookutils # Extra search paths to add to sys.path: # - parent directory of the django_root_dir # - django_root_dir itself; often, Django users do not specify absolute imports in the settings module. search_paths = [ PyInstaller.utils.misc.get_path_to_toplevel_modules(django_root_dir), django_root_dir, ] sys.path += search_paths # Set the path to project's settings module default_settings_module = os.path.basename(django_root_dir) + '.settings' settings_module = os.environ.get('DJANGO_SETTINGS_MODULE', default_settings_module) os.environ['DJANGO_SETTINGS_MODULE'] = settings_module # Calling django.setup() avoids the exception AppRegistryNotReady() and also reads the user settings # from DJANGO_SETTINGS_MODULE. # https://stackoverflow.com/questions/24793351/django-appregistrynotready import django # noqa: E402 django.setup() # This allows to access all django settings even from the settings.py module. from django.conf import settings # noqa: E402 hiddenimports = list(settings.INSTALLED_APPS) # Do not fail script when settings does not have such attributes. if hasattr(settings, 'TEMPLATE_CONTEXT_PROCESSORS'): hiddenimports += list(settings.TEMPLATE_CONTEXT_PROCESSORS) if hasattr(settings, 'TEMPLATE_LOADERS'): hiddenimports += list(settings.TEMPLATE_LOADERS) hiddenimports += [settings.ROOT_URLCONF]
378
django.py
Python
PyInstaller/utils/hooks/django.py
3575538da7d92962e93c18129ead2be02fb097e3
pyinstaller
17
10,856
25
14
13
164
6
0
34
113
all_args
refactor: rename pod to deployment (#4230) * refactor: rename pod to deployment * style: fix overload and cli autocomplete * fix: undo daemon mistake * refactor: leftover cleanup * fix: more test fixes * fix: more fixes * fix: more fixes * fix: more fixes * fix: more tests * fix: fix more tests * refactor: fix more tests * refactor: more tests fixes * refactor: rename pea to pod * refactor: adjust docs * refactor: complete pea renaming * refactor: more fixes * fix: pea_type in k8s yamls * fix: adjust pod args name * refactor: rename peapods parser folder * fix: da init Co-authored-by: Jina Dev Bot <[email protected]>
https://github.com/jina-ai/jina.git
def all_args(self) -> List[Namespace]: all_args = ( ([self.pod_args['uses_before']] if self.pod_args['uses_before'] else []) + ([self.pod_args['uses_after']] if self.pod_args['uses_after'] else []) + ([self.pod_args['head']] if self.pod_args['head'] else []) ) for shard_id in self.pod_args['pods']: all_args += self.pod_args['pods'][shard_id] return all_args
100
__init__.py
Python
jina/orchestrate/deployments/__init__.py
13edc16d806fb5d77a6849551178ccc75937f25f
jina
5
288,641
34
14
17
211
26
1
46
156
test_form_only_stillimage_gif
Add visual image preview during generic camera config flow (#71269) * Add visual preview during setup of generic camera * Code review: standardize preview url * Fix slug test * Refactor to use HomeAssistantView * Code review: simplify * Update manifest * Don't illegally access protected member * Increase test coverage * Prevent browser caching of preview images. * Code review:move incrementor to ?t=X + simplify * Discard old flow preview data * Increase test coverage * Code review: rename variables for clarity * Add timeout for image previews * Fix preview timeout tests * Simplify: store cam image preview in config_flow * Call step method to transition between flow steps * Only store user_input in flow, not CameraObject * Fix problem where test wouldn't run in isolation. * Simplify test * Don't move directly to another step's form * Remove unused constant * Simplify test Co-authored-by: Dave T <[email protected]>
https://github.com/home-assistant/core.git
async def test_form_only_stillimage_gif(hass, fakeimg_gif, user_flow): data = TESTDATA.copy() data.pop(CONF_STREAM_SOURCE) with patch("homeassistant.components.generic.async_setup_entry", return_value=True): result1 = await hass.config_entries.flow.async_configure( user_flow["flow_id"], data, ) assert result1["type"] == data_entry_flow.RESULT_TYPE_FORM assert result1["step_id"] == "user_confirm_still" result2 = await hass.config_entries.flow.async_configure( result1["flow_id"], user_input={CONF_CONFIRMED_OK: True}, ) await hass.async_block_till_done() assert result2["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY assert result2["options"][CONF_CONTENT_TYPE] == "image/gif" @respx.mock
@respx.mock
120
test_config_flow.py
Python
tests/components/generic/test_config_flow.py
6040c30b453a4dd3c20044fa46f03d0e77a07436
core
1
43,401
24
14
10
166
22
0
33
98
add_user_permissions_to_dag
Upgrade FAB to 4.1.1 (#24399) * Upgrade FAB to 4.1.1 The Flask Application Builder have been updated recently to support a number of newer dependencies. This PR is the attempt to migrate FAB to newer version. This includes: * update setup.py and setup.cfg upper and lower bounds to account for proper version of dependencies that FAB < 4.0.0 was blocking from upgrade * added typed Flask application retrieval with a custom application fields available for MyPy typing checks. * fix typing to account for typing hints added in multiple upgraded libraries optional values and content of request returned as Mapping * switch to PyJWT 2.* by using non-deprecated "required" claim as list rather than separate fields * add possibiliyt to install providers without constraints so that we could avoid errors on conflicting constraints when upgrade-to-newer-dependencies is used * add pre-commit to check that 2.4+ only get_airflow_app is not used in providers * avoid Bad Request in case the request sent to Flask 2.0 is not JSon content type * switch imports of internal classes to direct packages where classes are available rather than from "airflow.models" to satisfy MyPY * synchronize changes of FAB Security Manager 4.1.1 with our copy of the Security Manager. * add error handling for a few "None" cases detected by MyPY * corrected test cases that were broken by immutability of Flask 2 objects and better escaping done by Flask 2 * updated test cases to account for redirection to "path" rather than full URL by Flask2 Fixes: #22397 * fixup! Upgrade FAB to 4.1.1
https://github.com/apache/airflow.git
def add_user_permissions_to_dag(sender, template, context, **extra): if 'dag' in context: dag = context['dag'] can_create_dag_run = get_airflow_app().appbuilder.sm.has_access( permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN ) dag.can_edit = get_airflow_app().appbuilder.sm.can_edit_dag(dag.dag_id) dag.can_trigger = dag.can_edit and can_create_dag_run dag.can_delete = get_airflow_app().appbuilder.sm.can_delete_dag(dag.dag_id) context['dag'] = dag before_render_template.connect(add_user_permissions_to_dag)
95
views.py
Python
airflow/www/views.py
e2f19505bf3622935480e80bee55bf5b6d80097b
airflow
3
249,236
31
10
26
212
17
0
50
258
test_unknown_device
Use literals in place of `HTTPStatus` constants in tests (#13479) Replace - `HTTPStatus.NOT_FOUND` - `HTTPStatus.FORBIDDEN` - `HTTPStatus.UNAUTHORIZED` - `HTTPStatus.CONFLICT` - `HTTPStatus.CREATED` Signed-off-by: Dirk Klimpel <[email protected]>
https://github.com/matrix-org/synapse.git
def test_unknown_device(self) -> None: url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote( self.other_user ) channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) channel = self.make_request( "PUT", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) # Delete unknown device returns status 200 self.assertEqual(200, channel.code, msg=channel.json_body)
136
test_device.py
Python
tests/rest/admin/test_device.py
1595052b2681fb86c1c1b9a6028c1bc0d38a2e4b
synapse
1
101,635
50
13
32
360
21
0
95
289
_find_alignments
Minor updates and fixups - Mask Tool - Typing + BiSeNet mask update fix - Alignments Tool - Auto search for alignments file
https://github.com/deepfakes/faceswap.git
def _find_alignments(self) -> str: fname = self._args.alignments_file frames = self._args.frames_dir if fname and os.path.isfile(fname) and os.path.splitext(fname)[-1].lower() == ".fsa": return fname if fname: logger.error("Not a valid alignments file: '%s'", fname) sys.exit(1) if not frames or not os.path.exists(frames): logger.error("Not a valid frames folder: '%s'. Can't scan for alignments.", frames) sys.exit(1) fname = "alignments.fsa" if os.path.isdir(frames) and os.path.exists(os.path.join(frames, fname)): return fname if os.path.isdir(frames) or os.path.splitext(frames)[-1] not in _video_extensions: logger.error("Can't find a valid alignments file in location: %s", frames) sys.exit(1) fname = f"{os.path.splitext(frames)[0]}_{fname}" if not os.path.exists(fname): logger.error("Can't find a valid alignments file for video: %s", frames) sys.exit(1) return fname
204
alignments.py
Python
tools/alignments/alignments.py
2d312a9db228c025d0bd2ea7a4f747a2c644b5d8
faceswap
12
270,995
5
6
2
18
4
0
5
19
update_state
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def update_state(self, data): raise NotImplementedError
10
base_preprocessing_layer.py
Python
keras/engine/base_preprocessing_layer.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
104,400
7
8
2
41
5
0
7
21
to_pandas
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <[email protected]> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Lysandre Debut <[email protected]>
https://github.com/huggingface/datasets.git
def to_pandas(self, *args, **kwargs): return self.table.to_pandas(*args, **kwargs)
25
table.py
Python
src/datasets/table.py
e35be138148333078284b942ccc9ed7b1d826f97
datasets
1
20,511
21
14
13
115
13
0
35
110
looks_like_xml
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def looks_like_xml(text): if xml_decl_re.match(text): return True key = hash(text) try: return _looks_like_xml_cache[key] except KeyError: m = doctype_lookup_re.search(text) if m is not None: return True rv = tag_re.search(text[:1000]) is not None _looks_like_xml_cache[key] = rv return rv
71
util.py
Python
pipenv/patched/notpip/_vendor/pygments/util.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
4
269,274
67
12
16
157
6
0
106
185
check_validation_split_arg
Export split_dataset utility. PiperOrigin-RevId: 447783753
https://github.com/keras-team/keras.git
def check_validation_split_arg(validation_split, subset, shuffle, seed): if validation_split and not 0 < validation_split < 1: raise ValueError( '`validation_split` must be between 0 and 1, ' f'received: {validation_split}') if (validation_split or subset) and not (validation_split and subset): raise ValueError( 'If `subset` is set, `validation_split` must be set, and inversely.') if subset not in ('training', 'validation', 'both', None): raise ValueError('`subset` must be either "training", ' f'"validation" or "both", received: {subset}') if validation_split and shuffle and seed is None: raise ValueError( 'If using `validation_split` and shuffling the data, you must provide ' 'a `seed` argument, to make sure that there is no overlap between the ' 'training and validation subset.')
85
dataset_utils.py
Python
keras/utils/dataset_utils.py
06f5ef7989db314ee210455b04fe6f71e8dc57a7
keras
11
186,101
5
6
5
16
1
0
5
8
test_focused_child_widget_with_movement_bindings_no_inherit
Add a test for widget bindings with binding inheritance turned off
https://github.com/Textualize/textual.git
async def test_focused_child_widget_with_movement_bindings_no_inherit() -> None:
53
test_binding_inheritance.py
Python
tests/test_binding_inheritance.py
a44c0f5d7f7809bc3a6ac349ab2b2e110a90b096
textual
2
250,551
68
16
26
283
27
0
91
397
register
Rename new async helper functions. async_trigger -> trigger_event invoke_addon -> invoke_addon_sync (API breakage) async_invoke_addon -> invoke_addon
https://github.com/mitmproxy/mitmproxy.git
def register(self, addon): api_changes = { # mitmproxy 6 -> mitmproxy 7 "clientconnect": "client_connected", "clientdisconnect": "client_disconnected", "serverconnect": "server_connect and server_connected", "serverdisconnect": "server_disconnected", } for a in traverse([addon]): for old, new in api_changes.items(): if hasattr(a, old): ctx.log.warn(f"The {old} event has been removed, use {new} instead. " f"For more details, see https://docs.mitmproxy.org/stable/addons-events/.") name = _get_name(a) if name in self.lookup: raise exceptions.AddonManagerError( "An addon called '%s' already exists." % name ) l = Loader(self.master) self.invoke_addon_sync(addon, LoadHook(l)) for a in traverse([addon]): name = _get_name(a) self.lookup[name] = a for a in traverse([addon]): self.master.commands.collect_commands(a) self.master.options.process_deferred() return addon
164
addonmanager.py
Python
mitmproxy/addonmanager.py
ee4999e8e4380f7b67faef92f04c361deffba412
mitmproxy
7
153,576
20
11
7
80
9
0
21
87
mode
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
https://github.com/modin-project/modin.git
def mode(self, axis=0, numeric_only=False, dropna=True): # noqa: PR01, RT01, D200 axis = self._get_axis_number(axis) return self.__constructor__( query_compiler=self._query_compiler.mode( axis=axis, numeric_only=numeric_only, dropna=dropna ) )
52
base.py
Python
modin/pandas/base.py
605efa618e7994681f57b11d04d417f353ef8d50
modin
1
80,802
62
11
14
163
16
0
83
240
get_attr
Fix up logger .warn() calls to use .warning() instead This is a usage that was deprecated in Python 3.0.
https://github.com/ansible/awx.git
def get_attr(self, attributes, conf_key, default_attribute): key = self.conf.get(conf_key, default_attribute) value = attributes[key] if key in attributes else None # In certain implementations (like https://pagure.io/ipsilon) this value is a string, not a list if isinstance(value, (list, tuple)): value = value[0] if conf_key in ('attr_first_name', 'attr_last_name', 'attr_username', 'attr_email') and value is None: logger.warning( "Could not map user detail '%s' from SAML attribute '%s'; " "update SOCIAL_AUTH_SAML_ENABLED_IDPS['%s']['%s'] with the correct SAML attribute.", conf_key[5:], key, self.name, conf_key, ) return str(value) if value is not None else value
105
backends.py
Python
awx/sso/backends.py
b852baaa39035b4836b1b5498dc702bf0f3bbd9b
awx
6
141,955
35
10
20
191
13
0
50
129
test_syncer_sync_exclude
[tune] Refactor Syncer / deprecate Sync client (#25655) This PR includes / depends on #25709 The two concepts of Syncer and SyncClient are confusing, as is the current API for passing custom sync functions. This PR refactors Tune's syncing behavior. The Sync client concept is hard deprecated. Instead, we offer a well defined Syncer API that can be extended to provide own syncing functionality. However, the default will be to use Ray AIRs file transfer utilities. New API: - Users can pass `syncer=CustomSyncer` which implements the `Syncer` API - Otherwise our off-the-shelf syncing is used - As before, syncing to cloud disables syncing to driver Changes: - Sync client is removed - Syncer interface introduced - _DefaultSyncer is a wrapper around the URI upload/download API from Ray AIR - SyncerCallback only uses remote tasks to synchronize data - Rsync syncing is fully depracated and removed - Docker and kubernetes-specific syncing is fully deprecated and removed - Testing is improved to use `file://` URIs instead of mock sync clients
https://github.com/ray-project/ray.git
def test_syncer_sync_exclude(temp_data_dirs): tmp_source, tmp_target = temp_data_dirs syncer = _DefaultSyncer() syncer.sync_up( local_dir=tmp_source, remote_dir="memory:///test/test_syncer_sync_exclude", exclude=["*_exclude*"], ) syncer.wait() syncer.sync_down( remote_dir="memory:///test/test_syncer_sync_exclude", local_dir=tmp_target ) syncer.wait() # Excluded files should not be found in target assert_file(True, tmp_target, "level0.txt") assert_file(False, tmp_target, "level0_exclude.txt") assert_file(True, tmp_target, "subdir/level1.txt") assert_file(False, tmp_target, "subdir/level1_exclude.txt") assert_file(True, tmp_target, "subdir/nested/level2.txt") assert_file(False, tmp_target, "subdir_nested_level2_exclude.txt") assert_file(False, tmp_target, "subdir_exclude/something/somewhere.txt")
113
test_syncer.py
Python
python/ray/tune/tests/test_syncer.py
6313ddc47cf9df4df8c8907997df559850a1b874
ray
1
64,502
19
12
11
103
9
0
30
18
set_holiday_list
test: employee leave balance report - fix expired leaves calculation when filters span across 2 different allocation periods
https://github.com/frappe/erpnext.git
def set_holiday_list(holiday_list, company_name): try: company = frappe.get_doc('Company', company_name) previous_holiday_list = company.default_holiday_list company.default_holiday_list = holiday_list company.save() yield finally: # restore holiday list setup company = frappe.get_doc('Company', company_name) company.default_holiday_list = previous_holiday_list company.save()
58
test_holiday_list.py
Python
erpnext/hr/doctype/holiday_list/test_holiday_list.py
c050ce49c2b3ad7b36640edf01099bb9cb002e9d
erpnext
2
107,139
38
10
7
73
8
0
46
140
_check_layout_engines_compat
ENH: implement and use base layout_engine for more flexible layout.
https://github.com/matplotlib/matplotlib.git
def _check_layout_engines_compat(self, old, new): if old is None or old.colorbar_gridspec == new.colorbar_gridspec: return True # colorbar layout different, so check if any colorbars are on the # figure... for ax in self.axes: if hasattr(ax, '_colorbar'): # colorbars list themselvs as a colorbar. return False return True
44
figure.py
Python
lib/matplotlib/figure.py
ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22
matplotlib
5
289,149
29
11
10
71
11
0
34
121
_async_update_data
Add reauth support to LaMetric (#80355) * Add reauth support to LaMetric * Adjust docblock
https://github.com/home-assistant/core.git
async def _async_update_data(self) -> Device: try: return await self.lametric.device() except LaMetricAuthenticationError as err: raise ConfigEntryAuthFailed from err except LaMetricError as ex: raise UpdateFailed( "Could not fetch device information from LaMetric device" ) from ex
40
coordinator.py
Python
homeassistant/components/lametric/coordinator.py
7bd9ce72f7c78413924cba9b8406b6492bb83494
core
3
303,744
7
6
3
34
6
0
8
22
custom_effects
Improve type hints in yeelight lights (#76018) Co-authored-by: Franck Nijhof <[email protected]>
https://github.com/home-assistant/core.git
def custom_effects(self) -> dict[str, dict[str, Any]]: return self._custom_effects
22
light.py
Python
homeassistant/components/yeelight/light.py
66b742f110025013e60ca8cac7aeb3247bac8f47
core
1
166,527
22
11
45
133
18
0
29
89
normalize
ENH: liboffsets support non-nano (#47076) * ENH: liboffsets support non-nano * fix non-mac buildS * mypy fixup * fix doctest
https://github.com/pandas-dev/pandas.git
def normalize(self) -> DatetimeArray: new_values = normalize_i8_timestamps(self.asi8, self.tz, reso=self._reso) dt64_values = new_values.view(self._ndarray.dtype) dta = type(self)._simple_new(dt64_values, dtype=dt64_values.dtype) dta = dta._with_freq("infer") if self.tz is not None: dta = dta.tz_localize(self.tz) return dta
83
datetimes.py
Python
pandas/core/arrays/datetimes.py
b3dee360925539d741a2c29f4e666c11a9d31087
pandas
2
81,681
49
13
12
136
14
0
58
174
cancel_dispatcher_process
Refactor canceling to work through messaging and signals, not database If canceled attempted before, still allow attempting another cancel in this case, attempt to send the sigterm signal again. Keep clicking, you might help! Replace other cancel_callbacks with sigterm watcher adapt special inventory mechanism for this too Get rid of the cancel_watcher method with exception in main thread Handle academic case of sigterm race condition Process cancelation as control signal Fully connect cancel method and run_dispatcher to control Never transition workflows directly to canceled, add logs
https://github.com/ansible/awx.git
def cancel_dispatcher_process(self): if not self.celery_task_id: return canceled = [] try: # Use control and reply mechanism to cancel and obtain confirmation timeout = 5 canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id]) except socket.timeout: logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s') except Exception: logger.exception("error encountered when checking task status") return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained
71
unified_jobs.py
Python
awx/main/models/unified_jobs.py
c59bbdecdbdd920c5d3d298d691129c6bbc94c5e
awx
4
287,680
150
17
57
521
43
0
290
1,177
state
Introduce UnitConverter protocol (#78888) * Introduce ConversionUtility * Use ConversionUtility in number * Use ConversionUtility in sensor * Use ConversionUtility in sensor recorder * Add normalise to ConversionUtility * Revert changes to recorder.py * Reduce size of PR * Adjust recorder statistics * Rename variable * Rename * Apply suggestion Co-authored-by: Erik Montnemery <[email protected]> * Apply suggestion Co-authored-by: Erik Montnemery <[email protected]> * Apply suggestion Co-authored-by: Erik Montnemery <[email protected]> Co-authored-by: Erik Montnemery <[email protected]>
https://github.com/home-assistant/core.git
def state(self) -> Any: native_unit_of_measurement = self.native_unit_of_measurement unit_of_measurement = self.unit_of_measurement value = self.native_value device_class = self.device_class # Received a datetime if value is not None and device_class == DEVICE_CLASS_TIMESTAMP: try: # We cast the value, to avoid using isinstance, but satisfy # typechecking. The errors are guarded in this try. value = cast(datetime, value) if value.tzinfo is None: raise ValueError( f"Invalid datetime: {self.entity_id} provides state '{value}', " "which is missing timezone information" ) if value.tzinfo != timezone.utc: value = value.astimezone(timezone.utc) return value.isoformat(timespec="seconds") except (AttributeError, OverflowError, TypeError) as err: raise ValueError( f"Invalid datetime: {self.entity_id} has timestamp device class " f"but provides state {value}:{type(value)} resulting in '{err}'" ) from err # Received a date value if value is not None and device_class == DEVICE_CLASS_DATE: try: # We cast the value, to avoid using isinstance, but satisfy # typechecking. The errors are guarded in this try. value = cast(date, value) return value.isoformat() except (AttributeError, TypeError) as err: raise ValueError( f"Invalid date: {self.entity_id} has date device class " f"but provides state {value}:{type(value)} resulting in '{err}'" ) from err if ( value is not None and native_unit_of_measurement != unit_of_measurement and device_class in UNIT_CONVERTERS ): assert unit_of_measurement assert native_unit_of_measurement value_s = str(value) prec = len(value_s) - value_s.index(".") - 1 if "." in value_s else 0 # Scale the precision when converting to a larger unit # For example 1.1 Wh should be rendered as 0.0011 kWh, not 0.0 kWh ratio_log = max( 0, log10( UNIT_RATIOS[device_class][native_unit_of_measurement] / UNIT_RATIOS[device_class][unit_of_measurement] ), ) prec = prec + floor(ratio_log) # Suppress ValueError (Could not convert sensor_value to float) with suppress(ValueError): value_f = float(value) # type: ignore[arg-type] value_f_new = UNIT_CONVERTERS[device_class].convert( value_f, native_unit_of_measurement, unit_of_measurement, ) # Round to the wanted precision value = round(value_f_new) if prec == 0 else round(value_f_new, prec) return value
284
__init__.py
Python
homeassistant/components/sensor/__init__.py
39315b7fe3b13be5c026a5e7d7180ec3715ab882
core
14
213,077
6
9
3
45
7
0
6
27
__delitem__
fix: Py27hash fix (#2182) * Add third party py27hash code * Add Py27UniStr and unit tests * Add py27hash_fix utils and tests * Add to_py27_compatible_template and tests * Apply py27hash fix to wherever it is needed * Apply py27hash fix, all tests pass except api_with_any_method_in_swagger * apply py27hash fix in openapi + run black * remove py27 testing * remove other py27 references * black fixes * fixes/typos * remove py27 from tox.ini * refactoring * third party notice * black * Fix py27hash fix to deal with null events * Fix Py27UniStr repr for unicode literals * black reformat * Update _template_has_api_resource to check data type more defensively * Apply py27Dict in _get_authorizers * Apply Py27Dict to authorizers and gateway responses which will go into swagger * Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class * Rename _convert_to_py27_dict to _convert_to_py27_type * Apply Py27UniStr to path param name * Handle HttpApi resource under to_py27_compatible_template * Fix InvalidDocumentException to not sort different exceptions * black reformat * Remove unnecessary test files Co-authored-by: Wing Fung Lau <[email protected]>
https://github.com/aws/serverless-application-model.git
def __delitem__(self, key): super(Py27Dict, self).__delitem__(key) self.keylist.remove(key)
27
py27hash_fix.py
Python
samtranslator/utils/py27hash_fix.py
a5db070f446b7cfebdaa6ad2e3dcf78f6105a272
serverless-application-model
1
147,479
5
8
2
29
4
0
5
19
from_object
[tune] Use new Checkpoint interface internally (#22801) Follow up from #22741, also use the new checkpoint interface internally. This PR is low friction and just replaces some internal bookkeeping methods. With the new Checkpoint interface, there is no need to revamp the save/restore APIs completely. Instead, we will focus on the bookkeeping part, which takes place in the Ray Tune's and Ray Train's checkpoint managers. These will be consolidated in a future PR.
https://github.com/ray-project/ray.git
def from_object(value=None): return _TuneCheckpoint(_TuneCheckpoint.MEMORY, value)
17
checkpoint_manager.py
Python
python/ray/tune/checkpoint_manager.py
1465eaa30634c189fe3ebc9db8609f47d19a78cc
ray
1
105,744
14
9
99
95
10
0
21
130
test_from_yaml_string
Dataset infos in yaml (#4926) * wip * fix Features yaml * splits to yaml * add _to_yaml_list * style * example: conll2000 * example: crime_and_punish * add pyyaml dependency * remove unused imports * remove validation tests * style * allow dataset_infos to be struct or list in YAML * fix test * style * update "datasets-cli test" + remove "version" * remove config definitions in conll2000 and crime_and_punish * remove versions for conll2000 and crime_and_punish * move conll2000 and cap dummy data * fix test * add tests * comments and tests * more test * don't mention the dataset_infos.json file in docs * nit in docs * docs * dataset_infos -> dataset_info * again * use id2label in class_label * update conll2000 * fix utf-8 yaml dump * --save_infos -> --save_info * Apply suggestions from code review Co-authored-by: Polina Kazakova <[email protected]> * style * fix reloading a single dataset_info * push info to README.md in push_to_hub * update test Co-authored-by: Polina Kazakova <[email protected]>
https://github.com/huggingface/datasets.git
def test_from_yaml_string(self): valid_yaml_string = _dedent( ) assert DatasetMetadata.from_yaml_string(valid_yaml_string) duplicate_yaml_keys = _dedent( ) with self.assertRaises(TypeError): DatasetMetadata.from_yaml_string(duplicate_yaml_keys) valid_yaml_with_optional_keys = _dedent( ) assert DatasetMetadata.from_yaml_string(valid_yaml_with_optional_keys)
51
test_metadata_util.py
Python
tests/test_metadata_util.py
67e65c90e9490810b89ee140da11fdd13c356c9c
datasets
1
276,835
68
15
29
277
10
0
112
374
slice_arrays
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def slice_arrays(arrays, start=None, stop=None): if arrays is None: return [None] if isinstance(start, list) and stop is not None: raise ValueError( "The stop argument has to be None if the value of start " f"is a list. Received start={start}, stop={stop}" ) elif isinstance(arrays, list): if hasattr(start, "__len__"): # hdf5 datasets only support list objects as indices if hasattr(start, "shape"): start = start.tolist() return [None if x is None else x[start] for x in arrays] return [ None if x is None else None if not hasattr(x, "__getitem__") else x[start:stop] for x in arrays ] else: if hasattr(start, "__len__"): if hasattr(start, "shape"): start = start.tolist() return arrays[start] if hasattr(start, "__getitem__"): return arrays[start:stop] return [None]
169
generic_utils.py
Python
keras/utils/generic_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
15
314,372
7
10
6
37
6
0
7
21
__hash__
Speed up generation of template states (#73728) * Speed up generation of template states * tweak * cache * cache hash * weaken * Revert "weaken" This reverts commit 4856f500807c21aa1c9333d44fd53555bae7bb82. * lower cache size as it tends to be the same ones over and over * lower cache size as it tends to be the same ones over and over * lower cache size as it tends to be the same ones over and over * cover * Update homeassistant/helpers/template.py Co-authored-by: Paulus Schoutsen <[email protected]> * id reuse is possible * account for iterting all sensors Co-authored-by: Paulus Schoutsen <[email protected]>
https://github.com/home-assistant/core.git
def __hash__(self) -> int: return hash((id(self), self.last_updated))
22
core.py
Python
homeassistant/core.py
32e0d9f47c47c1caea81cfe56150531beeafb3f7
core
1
42,808
19
10
11
106
15
0
20
113
test_previous_pods_ignored_for_reattached
Use KubernetesHook to create api client in KubernetesPodOperator (#20578) Add support for k8s hook in KPO; use it always (even when no conn id); continue to consider the core k8s settings that KPO already takes into account but emit deprecation warning about them. KPO historically takes into account a few settings from core airflow cfg (e.g. verify ssl, tcp keepalive, context, config file, and in_cluster). So to use the hook to generate the client, somehow the hook has to take these settings into account. But we don't want the hook to consider these settings in general. So we read them in KPO and if necessary patch the hook and warn.
https://github.com/apache/airflow.git
def test_previous_pods_ignored_for_reattached(self): k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", name="test", task_id="task", ) self.run_pod(k) k.client.list_namespaced_pod.assert_called_once() _, kwargs = k.client.list_namespaced_pod.call_args assert 'already_checked!=True' in kwargs['label_selector']
60
test_kubernetes_pod.py
Python
tests/providers/cncf/kubernetes/operators/test_kubernetes_pod.py
60eb9e106f5915398eafd6aa339ec710c102dc09
airflow
1
272,260
72
11
18
409
27
0
105
178
make_training_model
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def make_training_model(): float_in = tf.keras.Input(shape=(1,), dtype="float32", name="float_col") # After preprocessing, both the string and int column are integer ready for # embedding. int_in = tf.keras.Input(shape=(1,), dtype="int64", name="int_col") string_in = tf.keras.Input(shape=(1,), dtype="int64", name="string_col") # Feed the lookup layers into an embedding. int_embedding = tf.keras.layers.Embedding(VOCAB_SIZE + 1, 8, input_length=1) int_out = int_embedding(int_in) int_out = tf.keras.layers.Flatten()(int_out) string_embedding = tf.keras.layers.Embedding( VOCAB_SIZE + 1, 8, input_length=1 ) string_out = string_embedding(string_in) string_out = tf.keras.layers.Flatten()(string_out) # Concatenate outputs. concatate = tf.keras.layers.Concatenate() # Feed our preprocessed inputs into a simple MLP. x = concatate((float_in, int_out, string_out)) x = tf.keras.layers.Dense(32, activation="relu")(x) x = tf.keras.layers.Dense(32, activation="relu")(x) outputs = tf.keras.layers.Dense(1, activation="softmax")(x) return tf.keras.Model(inputs=(float_in, int_in, string_in), outputs=outputs)
255
preprocessing_test_utils.py
Python
keras/integration_test/preprocessing_test_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
294,289
18
9
11
99
15
0
30
116
icon
Use DataUpdateCoordinator in here_travel_time (#61398) * Add DataUpdateCoordinator * Use TypedDict for extra_state_attributes * Extend DataUpdateCoordinator * Use platform enum * Use attribution property * Use relative imports * Revert native_value logic * Directly return result in build_hass_attribution * Correctly declare traffic_mode as bool * Use self._attr_* * Fix mypy issues * Update homeassistant/components/here_travel_time/__init__.py Co-authored-by: Allen Porter <[email protected]> * Update homeassistant/components/here_travel_time/__init__.py Co-authored-by: Allen Porter <[email protected]> * Update homeassistant/components/here_travel_time/sensor.py Co-authored-by: Allen Porter <[email protected]> * blacken * from datetime import time * remove none check * Move dataclasses to models.py * Set destination to now if None * Add mypy error code Co-authored-by: Allen Porter <[email protected]>
https://github.com/home-assistant/core.git
def icon(self) -> str: if self.coordinator.config.travel_mode == TRAVEL_MODE_BICYCLE: return ICON_BICYCLE if self.coordinator.config.travel_mode == TRAVEL_MODE_PEDESTRIAN: return ICON_PEDESTRIAN if self.coordinator.config.travel_mode in TRAVEL_MODES_PUBLIC: return ICON_PUBLIC if self.coordinator.config.travel_mode == TRAVEL_MODE_TRUCK: return ICON_TRUCK return ICON_CAR
62
sensor.py
Python
homeassistant/components/here_travel_time/sensor.py
adbacdd5c222bcc4803d570072f1db6b2e1f5bbb
core
5
274,555
16
7
2
71
10
1
16
37
_ragged_tensor_mse
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _ragged_tensor_mse(y_true, y_pred): return _ragged_tensor_apply_loss(mean_squared_error, y_true, y_pred) @keras_export( "keras.metrics.mean_absolute_error", "keras.metrics.mae", "keras.metrics.MAE", "keras.losses.mean_absolute_error", "keras.losses.mae", "keras.losses.MAE", ) @tf.__internal__.dispatch.add_dispatch_support
@keras_export( "keras.metrics.mean_absolute_error", "keras.metrics.mae", "keras.metrics.MAE", "keras.losses.mean_absolute_error", "keras.losses.mae", "keras.losses.MAE", ) @tf.__internal__.dispatch.add_dispatch_support
17
losses.py
Python
keras/losses.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
280,311
80
18
33
273
21
1
110
410
save_model
Prepare public API surface for v3 saving. PiperOrigin-RevId: 484397600
https://github.com/keras-team/keras.git
def save_model(model, filepath, overwrite=True, save_format=None, **kwargs): save_format = get_save_format(filepath, save_format) if save_format not in ("keras", "tf", "h5", "keras_v3"): raise ValueError( "Unknown `save_format` argument. Expected one of " "'keras', 'tf', or 'h5'. " f"Received: save_format{save_format}" ) if save_format == "keras_v3" or ( saving_lib.saving_v3_enabled() and save_format == "keras" ): # If file exists and should not be overwritten. try: exists = os.path.exists(filepath) except TypeError: exists = False if exists and not overwrite: proceed = io_utils.ask_to_proceed_with_overwrite(filepath) if not proceed: return if kwargs: raise ValueError( "The following argument(s) are not supported " f"with the native Keras format: {list(kwargs.keys())}" ) saving_lib.save_model(model, filepath) else: # Legacy case return legacy_sm_saving_lib.save_model( model, filepath, overwrite=overwrite, save_format=save_format, **kwargs, ) @keras_export("keras.models.load_model")
@keras_export("keras.models.load_model")
144
saving_api.py
Python
keras/saving/saving_api.py
c9068087d9142bab573e0c300bf9874a957accff
keras
10
128,770
67
11
35
606
18
0
165
294
test_metrics_agent_proxy_record_and_export_basic
[Metrics] Clean up metrics from a dead workers. (#29115) This PR does several things. Clean up MetricsAgent API and add class-level unit tests. Add WorkerId to metrics report to figure out if the request is from workers. New logic to clean up metrics when the worker is dead. The logic is as follow; We use the metrics report as a heartbeat and clean up all related metrics when the report doesn't come on time. In order to clean up metrics, we should clean up view_data.tag_value_aggregation_data_map[tag_vals]. In order to properly clean this up, we should track worker ID -> corresponding tag vals that it owns. We can guarantee 1 unique tuple of tag vals cannot be owned by multiple workers because it always contains the worker id
https://github.com/ray-project/ray.git
def test_metrics_agent_proxy_record_and_export_basic(get_agent): namespace = "test" agent, agent_port = get_agent # Test the basic case. m = generate_protobuf_metric( "test", "desc", "", label_keys=["a", "b"], timeseries=[] ) m.timeseries.append(generate_timeseries(["a", "b"], [1, 2, 3])) agent.proxy_export_metrics([m]) name, samples = get_metric(f"{namespace}_test", agent_port) assert name == f"{namespace}_test" assert len(samples) == 1 assert samples[0].labels == {"a": "a", "b": "b"} assert samples[0].value == 3 # Test new metric has proxyed. m = generate_protobuf_metric( "test", "desc", "", label_keys=["a", "b"], timeseries=[] ) m.timeseries.append(generate_timeseries(["a", "b"], [4])) agent.proxy_export_metrics([m]) name, samples = get_metric(f"{namespace}_test", agent_port) assert name == f"{namespace}_test" assert len(samples) == 1 assert samples[0].labels == {"a": "a", "b": "b"} assert samples[0].value == 4 # Test new metric with different tag is reported. m = generate_protobuf_metric( "test", "desc", "", label_keys=["a", "b"], timeseries=[] ) m.timeseries.append(generate_timeseries(["a", "c"], [5])) agent.proxy_export_metrics([m]) name, samples = get_metric(f"{namespace}_test", agent_port) assert name == f"{namespace}_test" assert len(samples) == 2 assert samples[0].labels == {"a": "a", "b": "b"} assert samples[0].value == 4 # Newly added metric has different tags and values. assert samples[1].labels == {"a": "a", "b": "c"} assert samples[1].value == 5
341
test_metrics_agent_2.py
Python
python/ray/tests/test_metrics_agent_2.py
c48cba7b4b43e29843954a3d102763cf5b33472e
ray
1
8,366
30
13
17
108
15
0
34
80
load_training_stats_for_viz
Changes learning_curves to use "step" or "epoch" as x-axis label. (#2578) * Started building dataclasses for model training output. * Adds EvaluationFrequency to training stats, dataclasses for training results. * Adds x_label, x_step to learning_curves. * fix x axis when using checkpoints_per_epoch. * Fixes CLI test by making dataclass JSON-serializable and implementing __contains__. * Adds default value for EvaluationFrequency, maybe fixes test_learning_curves with only training metrics. * Fixes kfold CV. * Fixes viz tests, restoring original functionality of load_data_for_viz * Adds todos to deprecate.
https://github.com/ludwig-ai/ludwig.git
def load_training_stats_for_viz(load_type, model_file_statistics, dtype=int, ground_truth_split=2) -> TrainingStats: stats_per_model = load_data_for_viz( load_type, model_file_statistics, dtype=dtype, ground_truth_split=ground_truth_split ) try: stats_per_model = [TrainingStats.Schema().load(j) for j in stats_per_model] except Exception: logger.exception(f"Failed to load model statistics {model_file_statistics}!") raise return stats_per_model
67
visualize.py
Python
ludwig/visualize.py
4f40ffec8e81eb3f6385243498babe1409a675be
ludwig
3
258,777
41
16
21
216
15
0
77
340
_w_intercept_raw
ENH Loss module LogisticRegression (#21808) * ENH replace loss in linear logistic regression * MNT remove logistic regression's own loss functions * CLN remove comment * DOC add whatsnew * DOC more precise whatsnew * CLN restore improvements of #19571 * ENH improve fit time by separating mat-vec in multiclass * DOC update whatsnew * not only a bit ;-) * DOC note memory benefit for multiclass case * trigger CI * trigger CI * CLN rename variable to hess_prod * DOC address reviewer comments * CLN remove C/F for 1d arrays * CLN rename to gradient_per_sample * CLN rename alpha to l2_reg_strength * ENH respect F-contiguity * TST fix sag tests * CLN rename to LinearModelLoss * CLN improve comments according to review * CLN liblinear comment * TST add / move test to test_linear_loss.py * CLN comment placement * trigger CI * CLN add comment about contiguity of raw_prediction * DEBUG debian-32 * DEBUG test only linear_model module * Revert "DEBUG test only linear_model module" This reverts commit 9d6e6987ff4fbcd32fc9a07944b260688162e14b. * DEBUG test -k LogisticRegression * Revert "DEBUG test -k LogisticRegression" This reverts commit c20316704185da400857b0a3f32935ee1b56c8d9. * Revert "DEBUG debian-32" This reverts commit ef0b98f23251d1b2b0bd8801e456f258392a8d18. * DEBUG set n_jobs=1 * Revert "DEBUG set n_jobs=1" This reverts commit c7f6f72a8c1ee21299786130e097df248fc1a0fb. * CLN always use n_threads=1 * CLN address review * ENH avoid array copy * CLN simplify L2 norm * CLN rename w to weights * CLN rename to hessian_sum and hx_sum * CLN address review * CLN rename to init arg and attribute to base_loss * CLN apply review suggestion Co-authored-by: Alexandre Gramfort <[email protected]> * CLN base_loss instead of _loss attribute Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Alexandre Gramfort <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def _w_intercept_raw(self, coef, X): if not self.base_loss.is_multiclass: if self.fit_intercept: intercept = coef[-1] weights = coef[:-1] else: intercept = 0.0 weights = coef raw_prediction = X @ weights + intercept else: # reshape to (n_classes, n_dof) if coef.ndim == 1: weights = coef.reshape((self.base_loss.n_classes, -1), order="F") else: weights = coef if self.fit_intercept: intercept = weights[:, -1] weights = weights[:, :-1] else: intercept = 0.0 raw_prediction = X @ weights.T + intercept # ndarray, likely C-contiguous return weights, intercept, raw_prediction
136
_linear_loss.py
Python
sklearn/linear_model/_linear_loss.py
d8d5637cfe372dd353dfc9f79dbb63c3189a9ecc
scikit-learn
5
160,738
45
11
43
97
16
0
55
97
assert_almost_equal
API: Enforce float64 precision for `assert_almost_equal` This ensures that the precision is not downcast, which could make a small value zero (for float16 mostly). This lets tests pass that check whether `np.float16(0)` is almost equal to 0, which otherwise fail (because `float16(0.00000001)` will evaluate to 0 exactly.
https://github.com/numpy/numpy.git
def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): __tracebackhide__ = True # Hide traceback for py.test from numpy.core import ndarray from numpy.lib import iscomplexobj, real, imag # Handle complex numbers: separate into real/imag to handle # nan/inf/negative zero correctly # XXX: catch ValueError for subclasses of ndarray where iscomplex fail try: usecomplex = iscomplexobj(actual) or iscomplexobj(desired) except ValueError: usecomplex = False
291
utils.py
Python
numpy/testing/_private/utils.py
80af7aceb1af9e35ed67abce66beab46ddd2bfa6
numpy
18
272,635
20
11
7
115
14
1
21
73
test_locallyconnected1d_invalid_output_shapes
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def test_locallyconnected1d_invalid_output_shapes(self): kwargs = {"filters": 2, "kernel_size": 10} with self.assertRaisesRegex( ValueError, r ): layer = keras.layers.LocallyConnected1D(**kwargs) layer.build((None, 5, 2)) @test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
50
locally_connected_test.py
Python
keras/layers/locally_connected/locally_connected_test.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
9,958
4
8
2
25
4
0
4
18
status
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
https://github.com/jina-ai/jina.git
def status(self): return self.proto.header.status
14
data.py
Python
jina/types/request/data.py
933415bfa1f9eb89f935037014dfed816eb9815d
jina
1
150,418
10
10
3
45
7
0
10
35
broadcast
initial concept for replicate, basic leader and follower logic
https://github.com/freqtrade/freqtrade.git
async def broadcast(self, data): for channel in self.channels.values(): await channel.send(data)
26
channel.py
Python
freqtrade/rpc/replicate/channel.py
9f6bba40af1a407f190a89f5c0c8b4e3f528ba46
freqtrade
2
247,129
7
11
8
39
7
0
8
29
test_preserves_stacktraces_on_preformed_failure
Improve exception handling for concurrent execution (#12109) * fix incorrect unwrapFirstError import this was being imported from the wrong place * Refactor `concurrently_execute` to use `yieldable_gather_results` * Improve exception handling in `yieldable_gather_results` Try to avoid swallowing so many stack traces. * mark unwrapFirstError deprecated * changelog
https://github.com/matrix-org/synapse.git
def test_preserves_stacktraces_on_preformed_failure(self): d1 = Deferred() f = Failure(_TestException("bah"))
46
test_async_helpers.py
Python
tests/util/test_async_helpers.py
9d11fee8f223787c04c6574b8a30967e2b73cc35
synapse
1
20,217
6
8
3
27
4
0
6
20
user_config_path
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def user_config_path(self) -> Path: return Path(self.user_config_dir)
15
api.py
Python
pipenv/patched/notpip/_vendor/platformdirs/api.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
1
293,952
48
11
38
461
18
0
134
300
test_is_hass_url
Improve error handling process_play_media_url (#68322)
https://github.com/home-assistant/core.git
async def test_is_hass_url(hass): assert hass.config.api is None assert hass.config.internal_url is None assert hass.config.external_url is None assert is_hass_url(hass, "http://example.com") is False assert is_hass_url(hass, "bad_url") is False assert is_hass_url(hass, "bad_url.com") is False assert is_hass_url(hass, "http:/bad_url.com") is False hass.config.api = Mock(use_ssl=False, port=8123, local_ip="192.168.123.123") assert is_hass_url(hass, "http://192.168.123.123:8123") is True assert is_hass_url(hass, "https://192.168.123.123:8123") is False assert is_hass_url(hass, "http://192.168.123.123") is False await async_process_ha_core_config( hass, {"internal_url": "http://example.local:8123"}, ) assert is_hass_url(hass, "http://example.local:8123") is True assert is_hass_url(hass, "https://example.local:8123") is False assert is_hass_url(hass, "http://example.local") is False await async_process_ha_core_config( hass, {"external_url": "https://example.com:443"}, ) assert is_hass_url(hass, "https://example.com:443") is True assert is_hass_url(hass, "https://example.com") is True assert is_hass_url(hass, "http://example.com:443") is False assert is_hass_url(hass, "http://example.com") is False with patch.object( hass.components.cloud, "async_remote_ui_url", return_value="https://example.nabu.casa", ): assert is_hass_url(hass, "https://example.nabu.casa") is False hass.config.components.add("cloud") assert is_hass_url(hass, "https://example.nabu.casa:443") is True assert is_hass_url(hass, "https://example.nabu.casa") is True assert is_hass_url(hass, "http://example.nabu.casa:443") is False assert is_hass_url(hass, "http://example.nabu.casa") is False
274
test_network.py
Python
tests/helpers/test_network.py
929df2bc29b377b746079afcbb9b5d94b4a51f11
core
1
113,067
27
11
7
88
10
0
30
91
first_cell_transformation_factory
Make models in search space hub work with one-shot (#4921)
https://github.com/microsoft/nni.git
def first_cell_transformation_factory(self) -> Optional[nn.Module]: if self.downsampling: return FactorizedReduce(self.estimated_out_channels_prev, self.estimated_out_channels) elif self.estimated_out_channels_prev is not self.estimated_out_channels: # Can't use != here, ValueChoice doesn't support return ReLUConvBN(self.estimated_out_channels_prev, self.estimated_out_channels, 1, 1, 0) return None
58
nasnet.py
Python
nni/retiarii/hub/pytorch/nasnet.py
2815fb1f22e36854463d3ede0a9da4aa7435739a
nni
3
48,720
2
6
9
13
2
0
2
9
test_empty_html_checkbox_allow_null_with_default
Fix BooleanField's allow_null behavior (#8614) * Fix BooleanField's allow_null behavior * Update rest_framework.fields - Use .get with default value for 'allow_null' kwarg in BooleanField's init
https://github.com/encode/django-rest-framework.git
def test_empty_html_checkbox_allow_null_with_default(self):
82
test_fields.py
Python
tests/test_fields.py
1fbe16a8d26ff5be64797cafb7004898f72ca52b
django-rest-framework
1
262,773
31
12
6
118
15
0
32
54
remove_signature_from_binary
macOS: Remove the timeouts for codesigning/signature stripping/lipo. (#6644)
https://github.com/pyinstaller/pyinstaller.git
def remove_signature_from_binary(filename): logger.debug("Removing signature from file %r", filename) cmd_args = ['codesign', '--remove', '--all-architectures', filename] p = subprocess.run(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) if p.returncode: raise SystemError(f"codesign command ({cmd_args}) failed with error code {p.returncode}!\noutput: {p.stdout}")
60
osx.py
Python
PyInstaller/utils/osx.py
1cd3b73e2939052271a0bc26cf204eebee4dcd15
pyinstaller
2
270,678
12
11
4
49
6
0
14
54
_set_trainable_state
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _set_trainable_state(self, trainable_state): for layer in self._flatten_layers(): if layer in trainable_state: layer.trainable = trainable_state[layer]
30
base_layer.py
Python
keras/engine/base_layer.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
3
30,247
80
12
18
314
15
0
95
226
get_config
fixed arguments for frozen env fixed pylint errors fixed arguments black fixed argument parser for all scenarios black docs black
https://github.com/spotDL/spotify-downloader.git
def get_config() -> Dict[str, Any]: config_path = get_config_file() if not config_path.exists(): raise ConfigError( "Config file not found." "Please run `spotdl --generate-config` to create a config file." ) with open(config_path, "r", encoding="utf-8") as config_file: return json.load(config_file) DEFAULT_CONFIG = { "log_level": "INFO", "simple_tui": False, "cache_path": str(get_cache_path()), "audio_provider": "youtube-music", "lyrics_provider": "musixmatch", "ffmpeg": "ffmpeg", "variable_bitrate": None, "constant_bitrate": None, "ffmpeg_args": None, "format": "mp3", "save_file": None, "m3u": None, "output": "{artists} - {title}.{output-ext}", "overwrite": "skip", "client_id": "5f573c9620494bae87890c0f08a60293", "client_secret": "212476d9b0f3472eaa762d90b19b0ba8", "user_auth": False, "search_query": "{artists} - {title}", "filter_results": True, "threads": 4, "no_cache": False, "cookie_file": None, "headless": False, "restrict": False, "print_errors": False, "sponsor_block": False, }
52
config.py
Python
spotdl/utils/config.py
773398048b7990ab58e2998fe4d15355f7998774
spotify-downloader
2
268,488
19
12
9
92
12
0
25
96
_is_in_config_mode
Add `use_rsa_sha2_algorithms` option for paramiko (#78789) Fixes #76737 Fixes #77673 Co-authored-by: Matt Clay <[email protected]>
https://github.com/ansible/ansible.git
def _is_in_config_mode(self): cfg_mode = False cur_prompt = to_text( self.get_prompt(), errors="surrogate_then_replace" ).strip() cfg_prompt = getattr(self._terminal, "terminal_config_prompt", None) if cfg_prompt and cfg_prompt.match(cur_prompt): cfg_mode = True return cfg_mode
54
network_cli.py
Python
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/connection/network_cli.py
76b746655a36807fa9198064ca9fe7c6cc00083a
ansible
3
110,757
10
11
6
56
7
0
10
48
context
DOC: Simplify some grid tables These are not so complicated that they can't use simple tables, though in one case, I thought a definition list looked better.
https://github.com/matplotlib/matplotlib.git
def context(style, after_reset=False): with mpl.rc_context(): if after_reset: mpl.rcdefaults() use(style) yield
30
core.py
Python
lib/matplotlib/style/core.py
41e83e22009669b4507a84675d9841c4c172c745
matplotlib
2
259,847
14
12
4
73
10
0
15
27
test_compute_sample_weight_sparse
FIX Make compute_sample_weight work on sparse target (#23115) Co-authored-by: Guillaume Lemaitre <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_compute_sample_weight_sparse(): y = sparse.csc_matrix(np.asarray([0, 1, 1])).T sample_weight = compute_sample_weight("balanced", y) assert_allclose(sample_weight, [1.5, 0.75, 0.75])
52
test_class_weight.py
Python
sklearn/utils/tests/test_class_weight.py
00c13632b5022d7e445b38832106a2316ca124fd
scikit-learn
1
291,920
27
11
12
110
12
0
30
95
mock_create_stream
Update nest camera to pull still images from stream component (#66427) * Update nest to use stream thumbnail when it exists * Update nest camera to always pull still image from stream Update nest camera to always pull the still iamge from the stream component, removing the use of the separate ffmpeg call, and removing use of the nest event image. Image for events can now be pulled using the media source APIs, rather than relying on the camera snapshot. * Simplify a comment * Remove more unused variables * Simplify comments, image, and test code * Remove assertions for placeholder images
https://github.com/home-assistant/core.git
async def mock_create_stream(hass) -> Mock: assert await async_setup_component(hass, "stream", {}) with patch( "homeassistant.components.camera.create_stream", autospec=True ) as mock_stream: mock_stream.return_value.endpoint_url.return_value = ( "http://home.assistant/playlist.m3u8" ) mock_stream.return_value.async_get_image = AsyncMock() mock_stream.return_value.async_get_image.return_value = IMAGE_BYTES_FROM_STREAM yield mock_stream
62
test_camera_sdm.py
Python
tests/components/nest/test_camera_sdm.py
572fa7d0552c2726d919067db72abe1efdfb19c4
core
1
275,978
4
6
2
16
3
0
4
18
python_properties
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def python_properties(self): raise NotImplementedError
8
base_serialization.py
Python
keras/saving/saved_model/base_serialization.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
175,181
47
17
21
278
28
0
83
248
wait
bpo-20369: concurrent.futures.wait() now deduplicates futures given a… (GH-30168) * bpo-20369: concurrent.futures.wait() now deduplicates futures given as arg. * 📜🤖 Added by blurb_it. Co-authored-by: blurb-it[bot] <43283697+blurb-it[bot]@users.noreply.github.com>
https://github.com/python/cpython.git
def wait(fs, timeout=None, return_when=ALL_COMPLETED): fs = set(fs) with _AcquireFutures(fs): done = {f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]} not_done = fs - done if (return_when == FIRST_COMPLETED) and done: return DoneAndNotDoneFutures(done, not_done) elif (return_when == FIRST_EXCEPTION) and done: if any(f for f in done if not f.cancelled() and f.exception() is not None): return DoneAndNotDoneFutures(done, not_done) if len(done) == len(fs): return DoneAndNotDoneFutures(done, not_done) waiter = _create_and_install_waiters(fs, return_when) waiter.event.wait(timeout) for f in fs: with f._condition: f._waiters.remove(waiter) done.update(waiter.finished_futures) return DoneAndNotDoneFutures(done, fs - done)
177
_base.py
Python
Lib/concurrent/futures/_base.py
7d7817cf0f826e566d8370a0e974bbfed6611d91
cpython
13
338,353
110
17
33
308
31
0
156
680
join_uneven_inputs
Add `join_uneven_inputs` context manager to Accelerator (#820) * Add test for join context manager * Add join_uneven_inputs context manager * Format * add conditional import for join * Replace bare yield with nullcontext * Update accelerator to maintain references to dataloaders * add override option to join context manager * format * Add minimal docstring * updates based on initial feedback * remove launcher used for local testing from test script * fix quality issues * DEBUG: try resetting accelerator state to fix test * Revert "DEBUG: try resetting accelerator state to fix test" This reverts commit a13a56ea8e084cad72317cd451a176a2d3fa5dff. * Reset state after accelerator tests * Update src/accelerate/accelerator.py Co-authored-by: Zachary Mueller <[email protected]> * Warn if at least one iterable dataset seen * remove launcher used for local test running Co-authored-by: Zachary Mueller <[email protected]>
https://github.com/huggingface/accelerate.git
def join_uneven_inputs(self, joinables, even_batches=None): if is_torch_version("<", "1.10.0"): raise ValueError(f"Joining uneven inputs requires PyTorch >= 1.10.0, You have {torch.__version__}.") if self.distributed_type == DistributedType.MULTI_GPU: dl_even_batches_values = [] if even_batches is not None: iterable_dl_seen = False # override value in batch sampler for map-style datasets for dl_idx, dl in enumerate(self._dataloaders): if isinstance(dl, DataLoaderDispatcher): iterable_dl_seen = True continue dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches)) dl.batch_sampler.even_batches = even_batches if iterable_dl_seen: warnings.warn( "Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable" ) else: even_batches = self.even_batches enable_join = False if even_batches else True try: with Join(joinables, enable=enable_join, throw_on_early_termination=False): yield finally: # reset any batch samplers that have been modified for dl_idx, even_batches_value in dl_even_batches_values: self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value else: # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs if self.distributed_type != DistributedType.NO: warnings.warn( "Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect." ) with contextlib.nullcontext(joinables): yield
180
accelerator.py
Python
src/accelerate/accelerator.py
074d8d5a5a131501cc0a2a690cc14337395dcb09
accelerate
11
133,390
13
10
5
45
8
0
14
53
_load_state_id
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def _load_state_id(self, state_id): remote_calls = [ worker.load_state_stream.remote(state_id) for worker in self.remote_workers ] return remote_calls
28
worker_group.py
Python
python/ray/util/sgd/torch/worker_group.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
2
246,126
6
6
7
20
2
0
6
20
all
Create singletons for `StateFilter.{all,none}()` (#11836) No point recreating these for each call, since they are frozen
https://github.com/matrix-org/synapse.git
def all() -> "StateFilter": return _ALL_STATE_FILTER
9
state.py
Python
synapse/storage/state.py
57e4786e907c390502f4ec6fb915e24cf5124351
synapse
1
271,859
109
13
17
241
20
0
173
313
_append_composite_tensor
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _append_composite_tensor(target, to_append): if type(target) is not type(to_append): raise RuntimeError( "Unable to concatenate %s and %s" % (type(target), type(to_append)) ) # Perform type-specific concatenation. # TODO(b/125094323): This should be replaced by a simple call to # target.append() that should work on all of the below classes. # If we're seeing a CompositeTensor here, we know it's because we're in # Eager mode (or else we'd have evaluated the CT to a CT Value object # already). Therefore, it's safe to call concat() on it without evaluating # the result any further. If not - that is, if we're seeing a # SparseTensorValue or a RaggedTensorValue - we need to hand-update it # since we're outside of the graph anyways. if isinstance(target, tf.SparseTensor): # We need to invoke the sparse version of concatenate here - tf.concat # won't work. return tf.compat.v1.sparse_concat(sp_inputs=[target, to_append], axis=0) elif isinstance(target, tf.RaggedTensor): return tf.concat([target, to_append], axis=0) elif isinstance(target, tf.compat.v1.SparseTensorValue): return _append_sparse_tensor_value(target, to_append) elif isinstance(target, tf.compat.v1.ragged.RaggedTensorValue): return _append_ragged_tensor_value(target, to_append) else: raise RuntimeError( "Attempted to concatenate unsupported object %s." % type(target) )
149
training_utils_v1.py
Python
keras/engine/training_utils_v1.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
6
194,407
3
7
2
22
3
0
3
17
on_double_tap
Textinput on double tap improvement (#7636) * Improved on_double_tap in textinput - words are selected considering commas, brackets, etc. * Created separate method for word selection * Minor refactoring
https://github.com/kivy/kivy.git
def on_double_tap(self): self._select_word()
11
textinput.py
Python
kivy/uix/textinput.py
4320c7271cbedef1cf59cb4a381f6a75c4f6738c
kivy
1
267,983
49
12
12
141
17
0
63
163
get_docker_container_id
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
https://github.com/ansible/ansible.git
def get_docker_container_id() -> t.Optional[str]: path = '/proc/self/cpuset' container_id = None if os.path.exists(path): # File content varies based on the environment: # No Container: / # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507 # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891 # Podman: /../../../../../.. contents = read_text_file(path) cgroup_path, cgroup_name = os.path.split(contents.strip()) if cgroup_path in ('/docker', '/azpl_job'): container_id = cgroup_name if container_id: display.info('Detected execution in Docker container: %s' % container_id, verbosity=1) return container_id
79
docker_util.py
Python
test/lib/ansible_test/_internal/docker_util.py
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
4
293,626
8
8
3
40
6
1
8
13
code_expiration_delta
Update google calendar integration with a config flow (#68010) * Convert google calendar to config flow and async * Call correct exchange method * Fix async method and reduce unnecessary diffs * Wording improvements * Reduce unnecessary diffs * Run load/update config from executor * Update homeassistant/components/google/calendar.py Co-authored-by: Martin Hjelmare <[email protected]> * Remove unnecessary updating of unexpected multiple config entries. * Remove unnecessary unique_id checks * Improve readability with comments about device code expiration * Update homeassistant/components/google/calendar.py Co-authored-by: Martin Hjelmare <[email protected]> * Update homeassistant/components/google/calendar.py Co-authored-by: Martin Hjelmare <[email protected]> * Update homeassistant/components/google/api.py Co-authored-by: Martin Hjelmare <[email protected]> * Add comment for when code is none on timeout Co-authored-by: Martin Hjelmare <[email protected]>
https://github.com/home-assistant/core.git
async def code_expiration_delta() -> datetime.timedelta: return datetime.timedelta(minutes=3) @pytest.fixture
@pytest.fixture
18
test_config_flow.py
Python
tests/components/google/test_config_flow.py
7876ffe9e392b20da16f0d0c44c723f526f807e6
core
1
268,706
5
8
2
32
7
0
5
19
root_path
ansible-test - Improve container management. (#78550) See changelogs/fragments/ansible-test-container-management.yml for details.
https://github.com/ansible/ansible.git
def root_path(self): return pathlib.PurePosixPath(CGroupPath.ROOT, self.subsystem)
19
cgroup.py
Python
test/lib/ansible_test/_internal/cgroup.py
cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc
ansible
1
287,988
57
12
35
363
28
0
76
506
_data_to_save
Add serial_number to device registry entries (#77713)
https://github.com/home-assistant/core.git
def _data_to_save(self) -> dict[str, list[dict[str, Any]]]: data: dict[str, list[dict[str, Any]]] = {} data["devices"] = [ { "area_id": entry.area_id, "config_entries": list(entry.config_entries), "configuration_url": entry.configuration_url, "connections": list(entry.connections), "disabled_by": entry.disabled_by, "entry_type": entry.entry_type, "hw_version": entry.hw_version, "id": entry.id, "identifiers": list(entry.identifiers), "manufacturer": entry.manufacturer, "model": entry.model, "name_by_user": entry.name_by_user, "name": entry.name, "serial_number": entry.serial_number, "sw_version": entry.sw_version, "via_device_id": entry.via_device_id, } for entry in self.devices.values() ] data["deleted_devices"] = [ { "config_entries": list(entry.config_entries), "connections": list(entry.connections), "identifiers": list(entry.identifiers), "id": entry.id, "orphaned_timestamp": entry.orphaned_timestamp, } for entry in self.deleted_devices.values() ] return data
224
device_registry.py
Python
homeassistant/helpers/device_registry.py
cba3b6ad944408b9ffd906f4da5e5f5fd615b174
core
3
85,459
56
11
37
325
33
0
86
275
test_spanner_indexer_individual_insert
feat(indexer-spanner): Implementation of core api's (#37802) Implementation of all the api's of `RawCloudSpannerIndexer`. The `bulk_record` implementation uses DML instead of mutations. Did not implement the `bulk_record` implementation using mutations since this PR is already big. The test cases run when setup correctly with our cloud instance.
https://github.com/getsentry/sentry.git
def test_spanner_indexer_individual_insert(testing_indexer): codec = IdCodec() indexed_string = get_random_string(10) model1_id = get_id() expected_key_result = KeyResults() expected_key_result.add_key_result(KeyResult(org_id=55555, string=indexed_string, id=model1_id)) key_results1 = KeyResults() model1 = SpannerIndexerModel( id=codec.encode(model1_id), decoded_id=model1_id, string=indexed_string, organization_id=55555, date_added=datetime.now(), last_seen=datetime.now(), retention_days=55, ) testing_indexer._insert_collisions_handled(UseCaseKey.PERFORMANCE, [model1], key_results1) assert ( key_results1.get_mapped_key_strings_to_ints() == expected_key_result.get_mapped_key_strings_to_ints() ) # Insert the same record with a different id but the key result would # have the id of model1. key_results2 = KeyResults() model2_id = get_id() model2 = SpannerIndexerModel( id=codec.encode(model2_id), decoded_id=model2_id, string=indexed_string, organization_id=55555, date_added=datetime.now(), last_seen=datetime.now(), retention_days=55, ) testing_indexer._insert_collisions_handled(UseCaseKey.PERFORMANCE, [model2], key_results2) assert ( key_results2.get_mapped_key_strings_to_ints() == expected_key_result.get_mapped_key_strings_to_ints() )
209
test_cloudspanner.py
Python
tests/sentry/sentry_metrics/test_cloudspanner.py
21bf2ff99d3352c7cc8b7901fb3b4c264a71a8e8
sentry
1
154,582
18
11
7
90
13
0
21
46
replace_frame_in_exprs
FEAT-#4946: Replace OmniSci with HDK (#4947) Co-authored-by: Iaroslav Igoshev <[email protected]> Signed-off-by: Andrey Pavlenko <[email protected]>
https://github.com/modin-project/modin.git
def replace_frame_in_exprs(exprs, old_frame, new_frame): mapper = InputMapper() mapper.add_mapper(old_frame, FrameMapper(new_frame)) res = OrderedDict() for col in exprs.keys(): res[col] = exprs[col].translate_input(mapper) return res
56
df_algebra.py
Python
modin/experimental/core/execution/native/implementations/hdk_on_native/df_algebra.py
e5b1888cd932909e49194d58035da34b210b91c4
modin
2
161,965
14
9
5
66
8
0
15
54
get_equiv_set
Ensure untracked tuples won't be treated as a scalar by get_equiv_const
https://github.com/numba/numba.git
def get_equiv_set(self, obj): names = self._get_names(obj) if len(names) != 1: return None return super(ShapeEquivSet, self).get_equiv_set(names[0])
41
array_analysis.py
Python
numba/parfors/array_analysis.py
a3555a03132153a20e067e06c52cf19532a4fa74
numba
2
249,670
22
11
13
76
9
0
23
102
_check_event_push_backfill_thread_id
Update the thread_id right before use (in case the bg update hasn't finished) (#14222) This avoids running a forced-update of a null thread_id rows. An index is added (in the background) to hopefully make this easier in the future.
https://github.com/matrix-org/synapse.git
async def _check_event_push_backfill_thread_id(self) -> None: done = await self.db_pool.updates.has_completed_background_update( "event_push_backfill_thread_id" ) if done: self._event_push_backfill_thread_id_done = True else: # Reschedule to run. self._clock.call_later(15.0, self._check_event_push_backfill_thread_id)
45
event_push_actions.py
Python
synapse/storage/databases/main/event_push_actions.py
dbf18f514ea5d2539ba3148049eae5a6793f1d60
synapse
2
143,548
24
14
13
196
10
0
52
183
_get_episode_info
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def _get_episode_info(self): player_red_info, player_blue_info = {}, {} if len(self.red_pick) > 0: red_pick = sum(self.red_pick) player_red_info["pick_speed"] = red_pick / len(self.red_pick) if red_pick > 0: player_red_info["pick_own_color"] = sum(self.red_pick_own) / red_pick if len(self.blue_pick) > 0: blue_pick = sum(self.blue_pick) player_blue_info["pick_speed"] = blue_pick / len(self.blue_pick) if blue_pick > 0: player_blue_info["pick_own_color"] = sum(self.blue_pick_own) / blue_pick return player_red_info, player_blue_info
117
coin_game_non_vectorized_env.py
Python
rllib/examples/env/coin_game_non_vectorized_env.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
5
189,492
12
11
4
58
8
0
12
44
set_all_lines_alignments
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
https://github.com/ManimCommunity/manim.git
def _set_all_lines_alignments(self, alignment): for line_no in range(0, self.lines[0].__len__()): self._change_alignment_for_a_line(alignment, line_no) return self
37
text_mobject.py
Python
manim/mobject/svg/text_mobject.py
902e7eb4f0147b5882a613b67467e38a1d47f01e
manim
2
300,623
18
13
7
71
10
0
20
61
strptime
Fail template functions when no default specified (#71687)
https://github.com/home-assistant/core.git
def strptime(string, fmt, default=_SENTINEL): try: return datetime.strptime(string, fmt) except (ValueError, AttributeError, TypeError): if default is _SENTINEL: raise_no_default("strptime", string) return default
45
template.py
Python
homeassistant/helpers/template.py
4885331509eeffe50f42d76b234996467b06170f
core
3
297,838
5
6
60
24
4
0
5
12
_async_operate
String formatting and max line length - Part 2 (#84393)
https://github.com/home-assistant/core.git
async def _async_operate(self, time=None, force=False):
286
humidifier.py
Python
homeassistant/components/generic_hygrostat/humidifier.py
cb13418babd21a1e9584978b0c523f1b1e4e1cb0
core
22
224,942
6
9
8
32
4
0
6
12
get_build_date
Add a lot more type annotations, fix new type warnings (#2970) (including some behavior changes, assumed to be no-op) This is based on auto-generated annotations from "monkeytype".
https://github.com/mkdocs/mkdocs.git
def get_build_date() -> str: return get_build_datetime().strftime('%Y-%m-%d')
16
__init__.py
Python
mkdocs/utils/__init__.py
df3739d51903ab56771ac071a05b5aa9cdf9e129
mkdocs
1
19,435
31
12
10
162
14
1
46
106
test_outdated_setuptools_with_pep517_cython_import_in_setuppy
missed these tests becasue they run only on earlier python versions.
https://github.com/pypa/pipenv.git
def test_outdated_setuptools_with_pep517_cython_import_in_setuppy(PipenvInstance): with PipenvInstance(chdir=True) as p: c = p.pipenv('run pip install "setuptools<=40.2"') assert c.returncode == 0 c = p.pipenv("run python -c 'import setuptools; print(setuptools.__version__)'") assert c.returncode == 0 assert c.stdout.splitlines()[1] == "40.2.0" c = p.pipenv("install cython-import-package") assert c.returncode == 0 assert "vistir" in p.lockfile["default"] @pytest.mark.index @pytest.mark.install
@pytest.mark.index @pytest.mark.install
80
test_lock.py
Python
tests/integration/test_lock.py
5a151615aa47901f7c44e5b543fe2e2b0f6e9d24
pipenv
1
89,966
28
16
13
145
17
0
35
162
test_error_issues_content
feat(github): Support generic issue types (#42472) Add support to GitHub issues for generic issue types. This uses the issue title from the evidence, and renders the evidence display in a table view similar to how it's done for [performance issues](https://github.com/getsentry/sentry/pull/41041). Closes #42049 Relies on https://github.com/getsentry/sentry/pull/42470 being merged first for the assertion about the title to pass. <img width="1011" alt="Screen Shot 2022-12-19 at 3 34 12 PM" src="https://user-images.githubusercontent.com/29959063/208548490-159e9c0d-9b56-482b-b974-e8500141c6d1.png">
https://github.com/getsentry/sentry.git
def test_error_issues_content(self): event = self.store_event( data={ "event_id": "a" * 32, "message": "oh no", "timestamp": iso_format(before_now(minutes=1)), }, project_id=self.project.id, ) description = GitHubIssueBasic().get_group_description(event.group, event) assert "oh no" in description title = GitHubIssueBasic().get_group_title(event.group, event) assert title == event.title
86
test_issues.py
Python
tests/sentry/integrations/github/test_issues.py
5d006832f3fa640a6927bcbf38b41bbbcf3388fa
sentry
1
190,025
56
12
32
225
20
1
83
254
test_gif_format_output
Migrate more `os.path` to `pathlib` in tests (#2991) * Migrate more `os.path` to `pathlib` in tests * Convert test fixtures to pathlib * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix mypy errors in tests * migrate another pathlib instance Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Benjamin Hackl <[email protected]>
https://github.com/ManimCommunity/manim.git
def test_gif_format_output(tmp_path, manim_cfg_file, simple_scenes_path): scene_name = "SquareToCircle" command = [ sys.executable, "-m", "manim", "--renderer", "opengl", "-ql", "--media_dir", str(tmp_path), "--format", "gif", str(simple_scenes_path), scene_name, ] out, err, exit_code = capture(command) assert exit_code == 0, err unexpected_mp4_path = ( tmp_path / "videos" / "simple_scenes" / "480p15" / "SquareToCircle.mp4" ) assert not unexpected_mp4_path.exists(), "unexpected mp4 file found at " + str( unexpected_mp4_path, ) expected_gif_path = ( tmp_path / "videos" / "simple_scenes" / "480p15" / add_version_before_extension("SquareToCircle.gif") ) assert expected_gif_path.exists(), "gif file not found at " + str(expected_gif_path) @pytest.mark.slow
@pytest.mark.slow
122
test_cli_flags_opengl.py
Python
tests/test_scene_rendering/opengl/test_cli_flags_opengl.py
206db54af53a87985c0d243d75304ea620dad520
manim
1
19,677
112
21
53
596
34
0
199
978
base_paths
Issue 4993 Add standard pre commit hooks and apply linting. (#4994) * Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
https://github.com/pypa/pipenv.git
def base_paths(self): # type: () -> Dict[str, str] prefix = make_posix(self.prefix.as_posix()) paths = {} if self._base_paths: paths = self._base_paths.copy() else: try: paths = self.get_paths() except Exception: install_scheme = "nt" if (os.name == "nt") else "posix_prefix" paths = get_paths( install_scheme, vars={ "base": prefix, "platbase": prefix, }, ) current_version = get_python_version() try: for k in list(paths.keys()): if not os.path.exists(paths[k]): paths[k] = self._replace_parent_version( paths[k], current_version ) except OSError: # Sometimes virtualenvs are made using virtualenv interpreters and there is no # include directory, which will cause this approach to fail. This failsafe # will make sure we fall back to the shell execution to find the real include path paths = self.get_include_path() paths.update(self.get_lib_paths()) paths["scripts"] = self.script_basedir if not paths: install_scheme = "nt" if (os.name == "nt") else "posix_prefix" paths = get_paths( install_scheme, vars={ "base": prefix, "platbase": prefix, }, ) if not os.path.exists(paths["purelib"]) and not os.path.exists(paths["platlib"]): lib_paths = self.get_lib_paths() paths.update(lib_paths) paths["PATH"] = paths["scripts"] + os.pathsep + os.defpath if "prefix" not in paths: paths["prefix"] = prefix purelib = paths["purelib"] = make_posix(paths["purelib"]) platlib = paths["platlib"] = make_posix(paths["platlib"]) if purelib == platlib: lib_dirs = purelib else: lib_dirs = purelib + os.pathsep + platlib paths["libdir"] = purelib paths["PYTHONPATH"] = os.pathsep.join(["", ".", lib_dirs]) paths["libdirs"] = lib_dirs return paths
346
environment.py
Python
pipenv/environment.py
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
pipenv
13
269,041
7
6
7
28
5
0
7
9
call_with_layout
Expose keras/dtensor package to public PiperOrigin-RevId: 430366845
https://github.com/keras-team/keras.git
def call_with_layout(fn, layout, *args, **kwargs): if layout:
44
utils.py
Python
keras/dtensor/utils.py
a179ed22f002e2f4a43ae4770348a9b8e1d5a051
keras
2
127,106
47
14
33
145
15
0
53
473
_repr_html_
[AIR] Add rich notebook repr for DataParallelTrainer (#26335)
https://github.com/ray-project/ray.git
def _repr_html_(self) -> str: try: from tabulate import tabulate except ImportError: return ( "Tabulate isn't installed. Run " "`pip install tabulate` for rich notebook output." ) return Template("scrollableTable.html.j2").render( table=tabulate( { "Setting": [ "Upload directory", "Sync on checkpoint", "Sync period", ], "Value": [ self.upload_dir, self.sync_on_checkpoint, self.sync_period, ], }, tablefmt="html", showindex=False, headers="keys", ), max_height="none", )
84
syncer.py
Python
python/ray/tune/syncer.py
4d19c0222bdcf268a1e2a7ef8dbccd0941c87b97
ray
2
297,157
22
11
6
90
18
0
23
41
test_update_failure
Blebox add thermoBox to climate (#81090) Co-authored-by: Martin Hjelmare <[email protected]>
https://github.com/home-assistant/core.git
async def test_update_failure(saunabox, hass, caplog): caplog.set_level(logging.ERROR) feature_mock, entity_id = saunabox feature_mock.async_update = AsyncMock(side_effect=blebox_uniapi.error.ClientError) await async_setup_entity(hass, entity_id) assert f"Updating '{feature_mock.full_name}' failed: " in caplog.text
51
test_climate.py
Python
tests/components/blebox/test_climate.py
923fa473e171fcdf396556ea200612e378f9b0a5
core
1
258,617
37
17
12
222
14
0
63
103
make_friedman3
ENH Replaced RandomState with Generator compatible calls (#22271)
https://github.com/scikit-learn/scikit-learn.git
def make_friedman3(n_samples=100, *, noise=0.0, random_state=None): generator = check_random_state(random_state) X = generator.rand(n_samples, 4) X[:, 0] *= 100 X[:, 1] *= 520 * np.pi X[:, 1] += 40 * np.pi X[:, 3] *= 10 X[:, 3] += 1 y = np.arctan( (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0] ) + noise * generator.standard_normal(size=(n_samples)) return X, y
151
_samples_generator.py
Python
sklearn/datasets/_samples_generator.py
254ea8c453cd2100ade07644648f1f00392611a6
scikit-learn
1
216,049
55
14
51
240
4
1
75
927
gpg_pillar_encrypted
Convert gpg pytest constants to fixtures - Normalize module-global data into pytest fixtures. It is not the least opaque option but we work within the constraints of pytest. - Also rescope gpg pillar fixtures to module. If they are needed in a larger context, they should be defined in an appropriately prominent file.
https://github.com/saltstack/salt.git
def gpg_pillar_encrypted(): return { "secrets": { "vault": { "foo": ( "-----BEGIN PGP MESSAGE-----\n" "\n" "hQEMAw2B674HRhwSAQgAhTrN8NizwUv/VunVrqa4/X8t6EUulrnhKcSeb8sZS4th\n" "W1Qz3K2NjL4lkUHCQHKZVx/VoZY7zsddBIFvvoGGfj8+2wjkEDwFmFjGE4DEsS74\n" "ZLRFIFJC1iB/O0AiQ+oU745skQkU6OEKxqavmKMrKo3rvJ8ZCXDC470+i2/Hqrp7\n" "+KWGmaDOO422JaSKRm5D9bQZr9oX7KqnrPG9I1+UbJyQSJdsdtquPWmeIpamEVHb\n" "VMDNQRjSezZ1yKC4kCWm3YQbBF76qTHzG1VlLF5qOzuGI9VkyvlMaLfMibriqY73\n" "zBbPzf6Bkp2+Y9qyzuveYMmwS4sEOuZL/PetqisWe9JGAWD/O+slQ2KRu9hNww06\n" "KMDPJRdyj5bRuBVE4hHkkP23KrYr7SuhW2vpe7O/MvWEJ9uDNegpMLhTWruGngJh\n" "iFndxegN9w==\n" "=bAuo\n" "-----END PGP MESSAGE-----\n" ), "bar": "this was unencrypted already", "baz": ( "-----BEGIN PGP MESSAGE-----\n" "\n" "hQEMAw2B674HRhwSAQf+Ne+IfsP2IcPDrUWct8sTJrga47jQvlPCmO+7zJjOVcqz\n" "gLjUKvMajrbI/jorBWxyAbF+5E7WdG9WHHVnuoywsyTB9rbmzuPqYCJCe+ZVyqWf\n" "9qgJ+oUjcvYIFmH3h7H68ldqbxaAUkAOQbTRHdr253wwaTIC91ZeX0SCj64HfTg7\n" "Izwk383CRWonEktXJpientApQFSUWNeLUWagEr/YPNFA3vzpPF5/Ia9X8/z/6oO2\n" "q+D5W5mVsns3i2HHbg2A8Y+pm4TWnH6mTSh/gdxPqssi9qIrzGQ6H1tEoFFOEq1V\n" "kJBe0izlfudqMq62XswzuRB4CYT5Iqw1c97T+1RqENJCASG0Wz8AGhinTdlU5iQl\n" "JkLKqBxcBz4L70LYWyHhYwYROJWjHgKAywX5T67ftq0wi8APuZl9olnOkwSK+wrY\n" "1OZi\n" "=7epf\n" "-----END PGP MESSAGE-----\n" ), "qux": [ "foo", "bar", "-----BEGIN PGP MESSAGE-----\n" "\n" "hQEMAw2B674HRhwSAQgAg1YCmokrweoOI1c9HO0BLamWBaFPTMblOaTo0WJLZoTS\n" "ksbQ3OJAMkrkn3BnnM/djJc5C7vNs86ZfSJ+pvE8Sp1Rhtuxh25EKMqGOn/SBedI\n" "gR6N5vGUNiIpG5Tf3DuYAMNFDUqw8uY0MyDJI+ZW3o3xrMUABzTH0ew+Piz85FDA\n" "YrVgwZfqyL+9OQuu6T66jOIdwQNRX2NPFZqvon8liZUPus5VzD8E5cAL9OPxQ3sF\n" "f7/zE91YIXUTimrv3L7eCgU1dSxKhhfvA2bEUi+AskMWFXFuETYVrIhFJAKnkFmE\n" "uZx+O9R9hADW3hM5hWHKH9/CRtb0/cC84I9oCWIQPdI+AaPtICxtsD2N8Q98hhhd\n" "4M7I0sLZhV+4ZJqzpUsOnSpaGyfh1Zy/1d3ijJi99/l+uVHuvmMllsNmgR+ZTj0=\n" "=LrCQ\n" "-----END PGP MESSAGE-----\n", ], }, }, } @pytest.fixture(scope="module")
@pytest.fixture(scope="module")
77
test_gpg.py
Python
tests/pytests/functional/pillar/test_gpg.py
90192621745895b9e352ee8c885eb6b742200b96
salt
1
20,731
8
8
16
29
3
0
9
30
capture
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def capture(self) -> Capture: capture = Capture(self) return capture
16
console.py
Python
pipenv/patched/notpip/_vendor/rich/console.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
1
176,304
6
9
2
35
5
0
6
12
draw_random
Update `draw_<layout>` docstrings with usage examples (#5264) * Update descriptions, add Notes and SeeAlso sections. Notify users that the draw_ fns are shorthand for using the layouts explicitly. * Fix gh-5215. * Rm unused warnings. * Rm >>> to prevent doctest from running. * Fix planar layout example with planar graph. * Fix links to layout functions (cross-module. * Fix misnamed layout in example Co-authored-by: Mridul Seth <[email protected]> Co-authored-by: Mridul Seth <[email protected]>
https://github.com/networkx/networkx.git
def draw_random(G, **kwargs): draw(G, random_layout(G), **kwargs)
21
nx_pylab.py
Python
networkx/drawing/nx_pylab.py
69db45cdb6f56a3e337cdc2cc54386270ab18308
networkx
1
310,593
7
8
3
29
3
0
7
21
async_enable_recording
Migrate amcrest integration to new async API (#56294)
https://github.com/home-assistant/core.git
async def async_enable_recording(self) -> None: await self._async_enable_recording(True)
15
camera.py
Python
homeassistant/components/amcrest/camera.py
7781e308cd7b28c67b6cf339f9b115c7190456fe
core
1
10,208
3
6
5
15
3
0
3
6
test_image_crafter_index
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
https://github.com/jina-ai/jina.git
def test_image_crafter_index(encoder_doc_array, tmpdir):
49
test_executors.py
Python
tests/unit/helloworld/multimodal/test_executors.py
933415bfa1f9eb89f935037014dfed816eb9815d
jina
1
212,153
108
13
47
556
42
0
211
806
parameters
Add Init signatures to Bokeh models (#12035) * Add signatures to Bokeh Model initializers * use explicit type for override default * move InstanceDefault to bokeh.core.properties * enable assertions
https://github.com/bokeh/bokeh.git
def parameters(cls): arg_params = [] no_more_defaults = False for arg in reversed(cls._args): descriptor = cls.lookup(arg) default = descriptor.class_default(cls, no_eval=True) if default is None: no_more_defaults = True # simplify field(x) defaults to just present the column name if isinstance(default, dict) and set(default) == {"field"}: default = default["field"] # make sure built-ins don't hold on to references to actual Models if cls.__module__.startswith("bokeh.models"): assert not isinstance(default, Model) param = Parameter( name=arg, kind=Parameter.POSITIONAL_OR_KEYWORD, # For positional arg properties, default=None means no default. default=Parameter.empty if no_more_defaults else default ) if default: del default typ = type_link(descriptor.property) arg_params.insert(0, (param, typ, descriptor.__doc__)) # these are not really useful, and should also really be private, just skip them omissions = {'js_event_callbacks', 'js_property_callbacks', 'subscribed_events'} kwarg_params = [] kws = set(cls.properties()) - set(cls._args) - omissions for kw in kws: descriptor = cls.lookup(kw) default = descriptor.class_default(cls, no_eval=True) # simplify field(x) defaults to just present the column name if isinstance(default, dict) and set(default) == {"field"}: default = default["field"] # make sure built-ins don't hold on to references to actual Models if cls.__module__.startswith("bokeh.models"): assert not isinstance(default, Model) param = Parameter( name=kw, kind=Parameter.KEYWORD_ONLY, default=default ) del default typ = type_link(descriptor.property) kwarg_params.append((param, typ, descriptor.__doc__)) for kw, (typ, doc) in cls._extra_kws.items(): param = Parameter( name=kw, kind=Parameter.KEYWORD_ONLY, ) kwarg_params.append((param, typ, doc)) kwarg_params.sort(key=lambda x: x[0].name) return arg_params + kwarg_params
349
model.py
Python
bokeh/model/model.py
1b3e6acd6eebd352106cc5ecf5e12dbf90e0607c
bokeh
13
300,065
22
9
7
87
15
0
23
69
turn_on
Update Zigpy attribute cache for switch devices that do not report state (#71417) * fix devices that do not report state * whoops
https://github.com/home-assistant/core.git
async def turn_on(self) -> bool: result = await self.on() if isinstance(result, Exception) or result[1] is not Status.SUCCESS: return False self.cluster.update_attribute(self.ON_OFF, t.Bool.true) return True
54
general.py
Python
homeassistant/components/zha/core/channels/general.py
173f14379b98b108d4d8c05fecde57e59b2dc7a9
core
3
259,882
35
10
4
70
7
0
44
66
test_load_arff_from_gzip_file_error_parser
ENH improve ARFF parser using pandas (#21938) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Adrin Jalali <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_load_arff_from_gzip_file_error_parser(): # None of the input parameters are required to be accurate since the check # of the parser will be carried out first. err_msg = "Unknown parser: 'xxx'. Should be 'liac-arff' or 'pandas'" with pytest.raises(ValueError, match=err_msg): load_arff_from_gzip_file("xxx", "xxx", "xxx", "xxx", "xxx", "xxx")
34
test_arff_parser.py
Python
sklearn/datasets/tests/test_arff_parser.py
a47d569e670fd4102af37c3165c9b1ddf6fd3005
scikit-learn
1
195,437
24
15
13
111
12
0
26
116
extract_operations
ROSCOE suite of metrics (#4839) * ROSCOE suite of metrics * updating tests * lint * fixing protobuf version to stop cleaninstall failures * updating requirements * convert to absolute path * moving tests because of the dependency issues * adding new dependencies in tests * add test dependencies * fixing deps * updating task list * checklist deps can't be installed on circleci * actually fix protobuf version * protobuf range * protobuf conflict with google-api-core * return tests * convert imports to absolute path * trying checklist again * trying to avoid checklist failures * checklist to teacher tests * add user option to avoid installation failure * jupiter as well * typo * moving into virtual env setup * user param not allowed in virtual env * move spacy to circleCI because it's big * replace local model with HF * fixes based on comments * remove unused nli scores, fix tests * Added path to BART model Co-authored-by: Spencer Poff <[email protected]>
https://github.com/facebookresearch/ParlAI.git
def extract_operations(self) -> List[str]: if not self.step: return [] try: operations = re.findall(r'[-+*^/]', self.step) except TypeError as e: print(f"TYPE: {type(self.step)}") print(f"STEP: {self.step}") raise e return operations
54
step_by_step.py
Python
parlai/tasks/reasoning/reason_types/step_by_step.py
0f129e9c38b6b10d80982ecc412785db62842938
ParlAI
3