id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
131,209
4
5
60
22
5
2
4
7
test_whether_worker_leaked_when_task_finished_with_errors
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def test_whether_worker_leaked_when_task_finished_with_errors(ray_start_regular): driver_template =
driver_template = """ import ray import os import ray import numpy as np import time ray.init(address="{address}", namespace="test")@ray.remote
139
test_advanced_2.py
Python
python/ray/tests/test_advanced_2.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
2
271,883
4
6
40
20
4
0
4
7
check_array_lengths
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def check_array_lengths(inputs, targets, weights=None):
223
training_utils_v1.py
Python
keras/engine/training_utils_v1.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
13
270,982
54
12
18
138
19
0
68
238
add_update
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def add_update(self, updates): call_context = base_layer_utils.call_context() if ( tf.distribute.has_strategy() and tf.distribute.in_cross_replica_context() and # When saving the model, the distribution strategy context should be # ignored, following the default path for adding updates. not call_context.saving ): # Updates don't need to be run in a cross-replica context. return updates = generic_utils.to_list(updates) if call_context.in_call: relevant_inputs = call_context.inputs else: inbound_nodes = getattr(self, "_inbound_nodes", []) relevant_inputs = [node.input_tensors for node in inbound_nodes]
104
base_layer_v1.py
Python
keras/engine/base_layer_v1.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
7
336,889
18
14
23
95
14
0
18
68
test_textual_inversion
Add smoke tests for the training examples (#585) * Add smoke tests for the training examples * upd * use a dummy dataset * mark as slow * cleanup * Update test cases * naming
https://github.com/huggingface/diffusers.git
def test_textual_inversion(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "learned_embeds.bin")))
52
test_examples.py
Python
examples/test_examples.py
4f1c989ffbbfb14e1244dcdae975ab3b2f6aa966
diffusers
1
137,776
97
17
23
349
23
0
173
570
_get_step_results
[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)
https://github.com/ray-project/ray.git
def _get_step_results(self): obs = {} rewards = {} infos = {} for behavior_name in self.unity_env.behavior_specs: decision_steps, terminal_steps = self.unity_env.get_steps(behavior_name) # Important: Only update those sub-envs that are currently # available within _env_state. # Loop through all envs ("agents") and fill in, whatever # information we have. for agent_id, idx in decision_steps.agent_id_to_index.items(): key = behavior_name + "_{}".format(agent_id) os = tuple(o[idx] for o in decision_steps.obs) os = os[0] if len(os) == 1 else os obs[key] = os rewards[key] = ( decision_steps.reward[idx] + decision_steps.group_reward[idx] ) for agent_id, idx in terminal_steps.agent_id_to_index.items(): key = behavior_name + "_{}".format(agent_id) # Only overwrite rewards (last reward in episode), b/c obs # here is the last obs (which doesn't matter anyways). # Unless key does not exist in obs. if key not in obs: os = tuple(o[idx] for o in terminal_steps.obs) obs[key] = os = os[0] if len(os) == 1 else os rewards[key] = ( terminal_steps.reward[idx] + terminal_steps.group_reward[idx] ) # Only use dones if all agents are done, then we should do a reset. return obs, rewards, {"__all__": False}, {"__all__": False}, infos
220
unity3d_env.py
Python
rllib/env/wrappers/unity3d_env.py
8e680c483ce326cefc62e44f68ab1a6948b1c3d2
ray
9
47,756
14
9
5
62
11
0
14
46
iter_mapped_dependencies
Ensure TaskMap only checks "relevant" dependencies (#23053) When looking for "mapped dependants" of a task, we only want a task if it not only is a direct downstream of the task, but also it actually "uses" the task's pushed XCom for task mapping. So we need to peek into the mapped downstream task's expansion kwargs, and only count it as a mapped dependant if the upstream is referenced there.
https://github.com/apache/airflow.git
def iter_mapped_dependencies(self) -> Iterator["Operator"]: from airflow.models.xcom_arg import XComArg for ref in XComArg.iter_xcom_args(self._get_expansion_kwargs()): yield ref.operator
37
mappedoperator.py
Python
airflow/models/mappedoperator.py
197cff3194e855b9207c3c0da8ae093a0d5dda55
airflow
2
288,048
15
11
5
84
9
0
24
47
test_convert_invalid_unit
Remove argument validation in Unit Converter (#79107) * Remove argument validation in Unit Converter * Use HomeAssistantError * Adjust tests * Improve coverage
https://github.com/home-assistant/core.git
def test_convert_invalid_unit(): with pytest.raises(HomeAssistantError, match="is not a recognized .* unit"): distance_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL) with pytest.raises(HomeAssistantError, match="is not a recognized .* unit"): distance_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL)
49
test_distance.py
Python
tests/util/test_distance.py
c96c5bed7da446b094e45ef8969dd0ecee6ec85d
core
1
269,302
6
7
2
47
7
1
6
10
tanh
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def tanh(x): return tf.tanh(x) @keras_export("keras.activations.sigmoid") @tf.__internal__.dispatch.add_dispatch_support
@keras_export("keras.activations.sigmoid") @tf.__internal__.dispatch.add_dispatch_support
13
activations.py
Python
keras/activations.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
132,413
30
11
9
106
15
0
32
130
testOnCheckpointUnavailableAttribute
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def testOnCheckpointUnavailableAttribute(self): checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num=1) no_attr_checkpoint = Checkpoint(Checkpoint.PERSISTENT, 0, {}) with patch.object(logger, "error") as log_error_mock: checkpoint_manager.on_checkpoint(no_attr_checkpoint) log_error_mock.assert_called_once() # The newest checkpoint should still be set despite this error. self.assertEqual( checkpoint_manager.newest_persistent_checkpoint, no_attr_checkpoint )
62
test_checkpoint_manager.py
Python
python/ray/tune/tests/test_checkpoint_manager.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
19,892
37
10
12
75
10
0
45
52
_should_use_sysconfig
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def _should_use_sysconfig() -> bool: return bool(getattr(sysconfig, "_PIP_USE_SYSCONFIG", _USE_SYSCONFIG_DEFAULT)) _USE_SYSCONFIG = _should_use_sysconfig() # Be noisy about incompatibilities if this platforms "should" be using # sysconfig, but is explicitly opting out and using distutils instead. if _USE_SYSCONFIG_DEFAULT and not _USE_SYSCONFIG: _MISMATCH_LEVEL = logging.WARNING else: _MISMATCH_LEVEL = logging.DEBUG
19
__init__.py
Python
pipenv/patched/notpip/_internal/locations/__init__.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
1
303,229
18
8
7
83
13
0
24
45
test_device_name
MotionBlinds use device_name helper (#72438) * use device_name helper * fix typo * fix import * fix isort * add gateway_test * implement gateway test * correct test blind mac
https://github.com/home-assistant/core.git
async def test_device_name(hass): blind = Mock() blind.blind_type = BlindType.RollerBlind.name blind.mac = TEST_BLIND_MAC assert device_name(blind) == "RollerBlind 0001" blind.device_type = DEVICE_TYPES_WIFI[0] assert device_name(blind) == "RollerBlind"
47
test_gateway.py
Python
tests/components/motion_blinds/test_gateway.py
6cadd4f6657ee5f12b5a6d28f61455a3c94cefa0
core
1
53,343
18
10
5
55
8
0
20
66
k8s_hosted_orion
Kubernetes flow runner (PrefectHQ/orion#780) Add a Kubernetes flow runner
https://github.com/PrefectHQ/prefect.git
def k8s_hosted_orion(self): kubernetes = pytest.importorskip("kubernetes") # TODO: pytest flag to configure this URL k8s_api_url = "http://localhost:4205/api" with temporary_settings(PREFECT_ORION_HOST=k8s_api_url): yield k8s_api_url
27
test_flow_runners.py
Python
tests/test_flow_runners.py
be671cbecee46c621dc08ed47bb520f795b34a42
prefect
1
276,195
15
10
6
47
6
0
17
71
checkpointable_objects
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def checkpointable_objects(self): return { key: value for key, value in self._object_dict.items() if value is not None }
29
serialized_attributes.py
Python
keras/saving/saved_model/serialized_attributes.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
3
180,902
71
16
12
430
43
0
98
244
get_blocks_party_spaces
Adding a Playground Tab to the Website (#1860) * added playground with 12 demos * change name to recipes, restyle navbar * add explanatory text to page * fix demo mapping * categorize demos, clean up design * styling * cateogry naming and emojis * refactor and add text demos * add view code button * remove opening slash in embed * styling * add image demos * adding plot demos * remove see code button * removed submodules * changes * add audio models * remove fun section * remove tests in image semgentation demo repo * requested changes * add outbreak_forecast * fix broken demos * remove images and models, add new demos * remove readmes, change to run.py, add description as comment * move to /demos folder, clean up dict * add upload_to_spaces script * fix script, clean repos, and add to docker file * fix python versioning issue * env variable * fix * env fixes * spaces instead of tabs * revert to original networking.py * fix rate limiting in asr and autocomplete * change name to demos * clean up navbar * move url and description, remove code comments * add tabs to demos * remove margins and footer from embedded demo * font consistency Co-authored-by: Abubakar Abid <[email protected]>
https://github.com/gradio-app/gradio.git
def get_blocks_party_spaces(): r = requests.get(path) d = r.json() spaces = [SpaceInfo(**x) for x in d] blocks_spaces = {} for i in range(0,len(spaces)): if spaces[i].id.split('/')[0] == 'Gradio-Blocks' and hasattr(spaces[i], 'likes') and spaces[i].id != 'Gradio-Blocks/Leaderboard' and spaces[i].id != 'Gradio-Blocks/README': blocks_spaces[spaces[i].id]=spaces[i].likes df = pd.DataFrame( [{"Spaces_Name": Spaces, "likes": likes} for Spaces,likes in blocks_spaces.items()]) df = df.sort_values(by=['likes'],ascending=False) return df block = gr.Blocks() with block: gr.Markdown() with gr.Tabs(): with gr.TabItem("Blocks Party Leaderboard"): with gr.Row(): data = gr.outputs.Dataframe(type="pandas") with gr.Row(): data_run = gr.Button("Refresh") data_run.click(get_blocks_party_spaces, inputs=None, outputs=data) # running the function on page load in addition to when the button is clicked block.load(get_blocks_party_spaces, inputs=None, outputs=data) block.launch()
157
run.py
Python
demo/leaderboard/run.py
597337dcb8762cca6e718b59a4ab6f5e333645fd
gradio
8
36,203
9
12
3
67
11
0
10
31
_adapt_logits_for_beam_search
Replace all deprecated `jax.ops` operations with jnp's `at` (#16078) * Replace all deprecated `jax.ops` operations with jnp's `at` * np to jnp scores * suggested changes
https://github.com/huggingface/transformers.git
def _adapt_logits_for_beam_search(self, logits): logits = logits.at[jax.ops.index[:, :, self.config.pad_token_id]].set(float("-inf")) return logits
41
modeling_flax_marian.py
Python
src/transformers/models/marian/modeling_flax_marian.py
ee27b3d7df397a44dc88324e5aa639a20bf67e53
transformers
1
160,813
76
13
18
193
19
0
114
234
_get_machar
BUG: Fix discovered MachAr (still used within valgrind) This fixes the missing attributes. I tested the warning and fix on valgrind itself. These attributes were added in gh-18536 but the fallback path was not checked there. Replaces gh-21813, although something more like it may make sense if it allows us to just delete MachAr completely.
https://github.com/numpy/numpy.git
def _get_machar(ftype): params = _MACHAR_PARAMS.get(ftype) if params is None: raise ValueError(repr(ftype)) # Detect known / suspected types key = ftype('-0.1').newbyteorder('<').tobytes() ma_like = None if ftype == ntypes.longdouble: # Could be 80 bit == 10 byte extended precision, where last bytes can # be random garbage. # Comparing first 10 bytes to pattern first to avoid branching on the # random garbage. ma_like = _KNOWN_TYPES.get(key[:10]) if ma_like is None: ma_like = _KNOWN_TYPES.get(key) if ma_like is not None: return ma_like # Fall back to parameter discovery warnings.warn( f'Signature {key} for {ftype} does not match any known type: ' 'falling back to type probe function.\n' 'This warnings indicates broken support for the dtype!', UserWarning, stacklevel=2) return _discovered_machar(ftype)
105
getlimits.py
Python
numpy/core/getlimits.py
4756c54e13154bcbc7ae2612874f03a0ccbaa9af
numpy
5
130,354
25
13
12
135
14
0
38
138
describe_security_groups
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def describe_security_groups(self, vpc_id=None, tags=None): request = DescribeSecurityGroupsRequest() if vpc_id is not None: request.set_VpcId(vpc_id) if tags is not None: request.set_Tags(tags) response = self._send_request(request) if response is not None: security_groups = response.get("SecurityGroups").get("SecurityGroup") return security_groups logging.error("describe security group failed.") return None
80
utils.py
Python
python/ray/autoscaler/_private/aliyun/utils.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
4
260,083
91
11
28
310
31
0
118
264
test_calibrated_classifier_cv_zeros_sample_weights_equivalence
API Rename base_estimator in CalibratedClassifierCV (#22054) Co-authored-by: Kevin Roice <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_calibrated_classifier_cv_zeros_sample_weights_equivalence(method, ensemble): X, y = load_iris(return_X_y=True) # Scale the data to avoid any convergence issue X = StandardScaler().fit_transform(X) # Only use 2 classes and select samples such that 2-fold cross-validation # split will lead to an equivalence with a `sample_weight` of 0 X = np.vstack((X[:40], X[50:90])) y = np.hstack((y[:40], y[50:90])) sample_weight = np.zeros_like(y) sample_weight[::2] = 1 estimator = LogisticRegression() calibrated_clf_without_weights = CalibratedClassifierCV( estimator, method=method, ensemble=ensemble, cv=2, ) calibrated_clf_with_weights = clone(calibrated_clf_without_weights) calibrated_clf_with_weights.fit(X, y, sample_weight=sample_weight) calibrated_clf_without_weights.fit(X[::2], y[::2]) # Check that the underlying fitted estimators have the same coefficients for est_with_weights, est_without_weights in zip( calibrated_clf_with_weights.calibrated_classifiers_, calibrated_clf_without_weights.calibrated_classifiers_, ): assert_allclose( est_with_weights.estimator.coef_, est_without_weights.estimator.coef_, ) # Check that the predictions are the same y_pred_with_weights = calibrated_clf_with_weights.predict_proba(X) y_pred_without_weights = calibrated_clf_without_weights.predict_proba(X) assert_allclose(y_pred_with_weights, y_pred_without_weights) # TODO(1.4): Remove
197
test_calibration.py
Python
sklearn/tests/test_calibration.py
effdd6e215c67f2ae8ed1e378ea1661e936059a4
scikit-learn
2
109,028
39
10
11
194
23
0
50
133
sharez
Let Axes.clear iterate over Axises. ... so that the implementation also works for 3D. This means also adding a sharez() method, which is copy-pasted from sharex()/sharey() (note that it is resolves a slight inconsistency in the old Axes3D.clear, which would previously not copy the z-autoscaleness status of a z-shared axes). Using Axis._set_lim in the loop also means that we need to pass auto explicitly to it anyways (it is a required parameter), so we can get rid of the set_autoscalex/y/z calls. Also, note that Axis.clear() explicitly resets the scale to "linear", so the extra `_set_scale("linear")` calls are unneeded. Finally, remove the silencing of TypeErrors when setting limits in clear(), which was added in 917de33 to handle initialization order problems in Axes3D but which seem not needed anymore (and are rather unsightly, as it's not immediately clear what we're really catching).
https://github.com/matplotlib/matplotlib.git
def sharez(self, other): _api.check_isinstance(maxes._base._AxesBase, other=other) if self._sharez is not None and other is not self._sharez: raise ValueError("z-axis is already shared") self._shared_axes["z"].join(self, other) self._sharez = other self.zaxis.major = other.zaxis.major # Ticker instances holding self.zaxis.minor = other.zaxis.minor # locator and formatter. z0, z1 = other.get_zlim() self.set_zlim(z0, z1, emit=False, auto=other.get_autoscalez_on()) self.zaxis._scale = other.zaxis._scale
122
axes3d.py
Python
lib/mpl_toolkits/mplot3d/axes3d.py
716a35f05ea714acc0e562e37e3404150435da79
matplotlib
3
34,874
60
12
22
225
21
1
83
276
forward
Add ConvNeXT (#15277) * First draft * Add conversion script * Improve conversion script * Improve docs and implement tests * Define model output class * Fix tests * Fix more tests * Add model to README * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Apply more suggestions from code review * Apply suggestions from code review * Rename dims to hidden_sizes * Fix equivalence test * Rename gamma to gamma_parameter * Clean up conversion script * Add ConvNextFeatureExtractor * Add corresponding tests * Implement feature extractor correctly * Make implementation cleaner * Add ConvNextStem class * Improve design * Update design to also include encoder * Fix gamma parameter * Use sample docstrings * Finish conversion, add center cropping * Replace nielsr by facebook, make feature extractor tests smaller * Fix integration test Co-authored-by: Sylvain Gugger <[email protected]>
https://github.com/huggingface/transformers.git
def forward(self, pixel_values=None, output_hidden_states=None, return_dict=None): output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] # global average pooling, (N, C, H, W) -> (N, C) pooled_output = self.layernorm(last_hidden_state.mean([-2, -1])) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return ConvNextModelOutput( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, ) @add_start_docstrings( , CONVNEXT_START_DOCSTRING, )
@add_start_docstrings( """ ConvNext Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """, CONVNEXT_START_DOCSTRING, )
141
modeling_convnext.py
Python
src/transformers/models/convnext/modeling_convnext.py
84eec9e6ba55c5aceee2a92fd820fcca4b67c510
transformers
5
19,222
20
11
15
108
12
0
20
129
rev_list
add diff style check test (#617) * add diff style check test * add diff style check test * add diff style check test * add diff style check test * add license * add license
https://github.com/AtsushiSakai/PythonRobotics.git
def rev_list(branch, num_commits): res = subprocess.run( [ 'git', 'rev-list', '--max-count', f'{num_commits}', '--first-parent', branch ], stdout=subprocess.PIPE, encoding='utf-8', ) res.check_returncode() return res.stdout.rstrip('\n').split('\n')
59
test_diff_codestyle.py
Python
tests/test_diff_codestyle.py
0dfa274be3eaddb270b2bcee197f7d34acbc1363
PythonRobotics
1
259,492
90
10
27
460
41
1
153
427
test_decision_boundary_display
FEA Add DecisionBoundaryDisplay (#16061) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Loïc Estève <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_decision_boundary_display(pyplot, fitted_clf, response_method, plot_method): fig, ax = pyplot.subplots() eps = 2.0 disp = DecisionBoundaryDisplay.from_estimator( fitted_clf, X, grid_resolution=5, response_method=response_method, plot_method=plot_method, eps=eps, ax=ax, ) assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet) assert disp.ax_ == ax assert disp.figure_ == fig x0, x1 = X[:, 0], X[:, 1] x0_min, x0_max = x0.min() - eps, x0.max() + eps x1_min, x1_max = x1.min() - eps, x1.max() + eps assert disp.xx0.min() == pytest.approx(x0_min) assert disp.xx0.max() == pytest.approx(x0_max) assert disp.xx1.min() == pytest.approx(x1_min) assert disp.xx1.max() == pytest.approx(x1_max) fig2, ax2 = pyplot.subplots() # change plotting method for second plot disp.plot(plot_method="pcolormesh", ax=ax2, shading="auto") assert isinstance(disp.surface_, pyplot.matplotlib.collections.QuadMesh) assert disp.ax_ == ax2 assert disp.figure_ == fig2 @pytest.mark.parametrize( "response_method, msg", [ ( "predict_proba", "MyClassifier has none of the following attributes: predict_proba", ), ( "decision_function", "MyClassifier has none of the following attributes: decision_function", ), ( "auto", "MyClassifier has none of the following attributes: decision_function, " "predict_proba, predict", ), ( "bad_method", "MyClassifier has none of the following attributes: bad_method", ), ], )
@pytest.mark.parametrize( "response_method, msg", [ ( "predict_proba", "MyClassifier has none of the following attributes: predict_proba", ), ( "decision_function", "MyClassifier has none of the following attributes: decision_function", ), ( "auto", "MyClassifier has none of the following attributes: decision_function, " "predict_proba, predict", ), ( "bad_method", "MyClassifier has none of the following attributes: bad_method", ), ], )
251
test_boundary_decision_display.py
Python
sklearn/inspection/_plot/tests/test_boundary_decision_display.py
d400723a2112f15c5d5b4d40dfac2ed8a19cca5c
scikit-learn
1
127,466
72
11
35
352
12
0
96
380
to_dict
[RLlib; AIR; Tune] Replace all remaining `tune.run()` with `tune.Tuner(...).fit()`. (#28325)
https://github.com/ray-project/ray.git
def to_dict(self) -> AlgorithmConfigDict: config = copy.deepcopy(vars(self)) config.pop("algo_class") # Worst naming convention ever: NEVER EVER use reserved key-words... if "lambda_" in config: assert hasattr(self, "lambda_") config["lambda"] = getattr(self, "lambda_") config.pop("lambda_") if "input_" in config: assert hasattr(self, "input_") config["input"] = getattr(self, "input_") config.pop("input_") # Setup legacy multiagent sub-dict: config["multiagent"] = {} for k in [ "policies", "policy_map_capacity", "policy_map_cache", "policy_mapping_fn", "policies_to_train", "observation_fn", "replay_mode", "count_steps_by", ]: config["multiagent"][k] = config.pop(k) # Switch out deprecated vs new config keys. config["callbacks"] = config.pop("callbacks_class", DefaultCallbacks) config["create_env_on_driver"] = config.pop("create_env_on_local_worker", 1) config["custom_eval_function"] = config.pop("custom_evaluation_function", None) config["framework"] = config.pop("framework_str", None) config["num_cpus_for_driver"] = config.pop("num_cpus_for_local_worker", 1) return config
193
algorithm_config.py
Python
rllib/algorithms/algorithm_config.py
42864d711d1eb2013a83670efc284ad22a62b929
ray
4
320,113
16
9
10
81
13
0
18
100
test_scan_file_for_separating_barcodes_fax_decode
Adds specific handling for CCITT Group 4, which pikepdf decodes, but not correctly
https://github.com/paperless-ngx/paperless-ngx.git
def test_scan_file_for_separating_barcodes_fax_decode(self): test_file = os.path.join( self.BARCODE_SAMPLE_DIR, "barcode-fax-image.pdf", ) pdf_file, separator_page_numbers = barcodes.scan_file_for_separating_barcodes( test_file, ) self.assertEqual(pdf_file, test_file) self.assertListEqual(separator_page_numbers, [1])
50
test_barcodes.py
Python
src/documents/tests/test_barcodes.py
4cc2976614b3a350ef5483456bc028534075e194
paperless-ngx
1
133,379
56
16
23
143
6
0
86
327
choose_amp_backend
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def choose_amp_backend(use_fp16, native_amp=None, apex_amp=None): if use_fp16 not in (True, False, "apex"): raise ValueError("use_fp16 must be a bool or 'apex'.") if not use_fp16: return use_fp16 if use_fp16 == "apex": if not apex_amp: raise ImportError( "Please install apex from " "https://www.github.com/nvidia/apex to use fp16 training " "with apex." ) else: if not native_amp: use_fp16 = "apex" if not apex_amp: raise ImportError( "Neither native PyTorch amp nor apex are available." "Please either upgrade to PyTorch>=1.6 or install apex " "from https://www.github.com/nvidia/apex to use fp16" " training." ) return use_fp16
76
utils.py
Python
python/ray/util/sgd/torch/utils.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
7
275,065
32
11
8
91
11
0
36
83
create_sgd
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def create_sgd(base_optimizer_cls, *args, **kwargs): if base_optimizer_cls == optimizer_v2.OptimizerV2: return gradient_descent.SGD(*args, **kwargs) else: assert ( base_optimizer_cls == optimizer_experimental.Optimizer ), f"Got invalid base_optimizer_cls: {base_optimizer_cls}" return sgd_experimental.SGD(*args, **kwargs) # TODO(b/215568552): Remove this as the delegation is handled by metaclass.
54
loss_scale_optimizer_test.py
Python
keras/mixed_precision/loss_scale_optimizer_test.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
2
45,472
109
16
81
1,113
40
0
196
859
downgrade
Autogenerate migration reference doc (#21601) * document airflow version in each alembic migration module and use this to autogen the doc * update each migration module to have the same description used in migration ref (so it can be used in autogen)
https://github.com/apache/airflow.git
def downgrade(): dialect_name = op.get_bind().dialect.name dt_type = TIMESTAMP string_id_col_type = StringID() op.add_column('task_instance', sa.Column('execution_date', dt_type, nullable=True)) op.add_column('task_reschedule', sa.Column('execution_date', dt_type, nullable=True)) update_query = _multi_table_update(dialect_name, task_instance, task_instance.c.execution_date) op.execute(update_query) update_query = _multi_table_update(dialect_name, task_reschedule, task_reschedule.c.execution_date) op.execute(update_query) with op.batch_alter_table('task_reschedule', schema=None) as batch_op: batch_op.alter_column('execution_date', existing_type=dt_type, existing_nullable=True, nullable=False) # Can't drop PK index while there is a FK referencing it batch_op.drop_constraint('task_reschedule_ti_fkey', type_='foreignkey') batch_op.drop_constraint('task_reschedule_dr_fkey', type_='foreignkey') batch_op.drop_index('idx_task_reschedule_dag_task_run') with op.batch_alter_table('task_instance', schema=None) as batch_op: batch_op.drop_constraint('task_instance_pkey', type_='primary') batch_op.alter_column('execution_date', existing_type=dt_type, existing_nullable=True, nullable=False) batch_op.alter_column( 'dag_id', existing_type=string_id_col_type, existing_nullable=True, nullable=True ) batch_op.create_primary_key('task_instance_pkey', ['dag_id', 'task_id', 'execution_date']) batch_op.drop_constraint('task_instance_dag_run_fkey', type_='foreignkey') batch_op.drop_index('ti_dag_run') batch_op.drop_index('ti_state_lkp') batch_op.create_index('ti_state_lkp', ['dag_id', 'task_id', 'execution_date', 'state']) batch_op.create_index('ti_dag_date', ['dag_id', 'execution_date'], unique=False) batch_op.drop_column('run_id') with op.batch_alter_table('task_reschedule', schema=None) as batch_op: batch_op.drop_column('run_id') batch_op.create_index( 'idx_task_reschedule_dag_task_date', ['dag_id', 'task_id', 'execution_date'], unique=False, ) # Can only create FK once there is an index on these columns batch_op.create_foreign_key( 'task_reschedule_dag_task_date_fkey', 'task_instance', ['dag_id', 'task_id', 'execution_date'], ['dag_id', 'task_id', 'execution_date'], ondelete='CASCADE', ) if dialect_name == "mssql": with op.batch_alter_table('dag_run', schema=None) as batch_op: batch_op.drop_constraint('dag_run_dag_id_execution_date_key', 'unique') batch_op.drop_constraint('dag_run_dag_id_run_id_key', 'unique') batch_op.drop_index('dag_id_state') batch_op.drop_index('idx_dag_run_running_dags') batch_op.drop_index('idx_dag_run_queued_dags') batch_op.alter_column('dag_id', existing_type=string_id_col_type, nullable=True) batch_op.alter_column('execution_date', existing_type=dt_type, nullable=True) batch_op.alter_column('run_id', existing_type=string_id_col_type, nullable=True) batch_op.create_index('dag_id_state', ['dag_id', 'state'], unique=False) batch_op.create_index('idx_dag_run_dag_id', ['dag_id']) batch_op.create_index( 'idx_dag_run_running_dags', ["state", "dag_id"], mssql_where=sa.text("state='running'"), ) batch_op.create_index( 'idx_dag_run_queued_dags', ["state", "dag_id"], mssql_where=sa.text("state='queued'"), ) op.execute( ) op.execute( ) else: with op.batch_alter_table('dag_run', schema=None) as batch_op: batch_op.drop_index('dag_id_state') batch_op.alter_column('run_id', existing_type=sa.VARCHAR(length=250), nullable=True) batch_op.alter_column('execution_date', existing_type=dt_type, nullable=True) batch_op.alter_column('dag_id', existing_type=sa.VARCHAR(length=250), nullable=True) batch_op.create_index('dag_id_state', ['dag_id', 'state'], unique=False)
633
7b2661a43ba3_taskinstance_keyed_to_dagrun.py
Python
airflow/migrations/versions/7b2661a43ba3_taskinstance_keyed_to_dagrun.py
69f6f9e01b6df76c3c8fa266d460324163957887
airflow
2
153,838
51
15
50
176
27
0
58
134
_read
REFACTOR-#4510: Align experimental and regular IO modules initializations (#4511) Signed-off-by: alexander3774 <[email protected]>
https://github.com/modin-project/modin.git
def _read(**kwargs) -> DataFrame: Engine.subscribe(_update_engine) from modin.core.execution.dispatching.factories.dispatcher import FactoryDispatcher try: pd_obj = FactoryDispatcher.read_csv_glob(**kwargs) except AttributeError: raise AttributeError("read_csv_glob() is only implemented for pandas on Ray.") # This happens when `read_csv` returns a TextFileReader object for iterating through if isinstance(pd_obj, pandas.io.parsers.TextFileReader): reader = pd_obj.read pd_obj.read = lambda *args, **kwargs: DataFrame( query_compiler=reader(*args, **kwargs) ) return pd_obj return DataFrame(query_compiler=pd_obj) read_csv_glob = _make_parser_func(sep=",")
100
io.py
Python
modin/experimental/pandas/io.py
dcee13d57ebf9a006460deedb734c15791acae7a
modin
3
263,468
35
12
15
227
19
1
53
162
run
Bootloader: Building: Unpack waf's lib archive. Doing so makes it easier to modify. This is a temporary measure until the next waf version is released (although I'm tempted to keep it since it's much more IDE completion friendly).
https://github.com/pyinstaller/pyinstaller.git
def run(self): bld = self.generator.bld get = self.env.get_flat cmd1 = "%s %s --prefix=%s --header > %s" % ( get('GLIB_GENMARSHAL'), self.inputs[0].srcpath(), get('GLIB_GENMARSHAL_PREFIX'), self.outputs[0].abspath() ) ret = bld.exec_command(cmd1) if ret: return ret c = % self.outputs[0].name self.outputs[1].write(c) cmd2 = "%s %s --prefix=%s --body >> %s" % ( get('GLIB_GENMARSHAL'), self.inputs[0].srcpath(), get('GLIB_GENMARSHAL_PREFIX'), self.outputs[1].abspath() ) return bld.exec_command(cmd2) @taskgen_method
@taskgen_method
136
glib2.py
Python
bootloader/waflib/Tools/glib2.py
64ccb7aea824fbec57f7ed1bbe483ec486183c13
pyinstaller
2
85,468
37
11
14
150
19
0
43
157
get_next_event_id
feat(perf issues): Return prev/next for transaction events (#38274) * feat(perf issues): Return prev/next for transaction events
https://github.com/getsentry/sentry.git
def get_next_event_id(self, event, filter): assert filter, "You must provide a filter" if not event: return None filter = deepcopy(filter) filter.conditions = filter.conditions or [] filter.conditions.extend(get_after_event_condition(event)) filter.start = event.datetime dataset = ( snuba.Dataset.Transactions if event.get_event_type() == "transaction" else snuba.Dataset.Discover ) return self.__get_event_id_from_filter(filter=filter, orderby=ASC_ORDERING, dataset=dataset)
94
backend.py
Python
src/sentry/eventstore/snuba/backend.py
6d7681529f68a87e41d4c11a4aa1e6732cb15ade
sentry
4
129,004
7
8
4
34
5
0
8
40
address
[GCS][Bootstrap n/n] Do not start Redis in GCS bootstrapping mode (#21232) After this change in GCS bootstrapping mode, Redis no longer starts and `address` is treated as the GCS address of the Ray cluster. Co-authored-by: Yi Cheng <[email protected]> Co-authored-by: Yi Cheng <[email protected]>
https://github.com/ray-project/ray.git
def address(self): if use_gcs_for_bootstrap(): return self._gcs_address return self._redis_address
19
node.py
Python
python/ray/node.py
70db5c5592d94b611fee0a334414f1f4f5cc151a
ray
2
133,007
11
10
14
46
8
0
11
25
reduce
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def reduce(self, tensors, reduce_options=ReduceOptions()): root_rank = len(tensors) * reduce_options.root_rank + reduce_options.root_tensor
40
nccl_collective_group.py
Python
python/ray/util/collective/collective_group/nccl_collective_group.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
108,519
15
8
2
22
2
0
15
21
spring
Cleanup documentation generation for pyplot - remove the awkward `pyplot.plotting()` function, which only served as a namespace to take up the docs for pyplot and output them via `.. autofunction` - Instead generate the same information using `.. autosummary::`. We have to list the desired methods here explicitly. I've added a test that these are the same as previously auto-generated in the `plotting()` docstring. If we change anything in pyplot, we'll be notified through the test failure that we have to adapt the autosummary list. - Removed the docstring generation logic `_setup_pyplot_info_docstrings()`. Apart from generating the `plotting()` docstring, this added docstrings to the pyplot colormap setters. Instead, we now add these docstrings directly via boilerplate.py Co-authored-by: Elliott Sales de Andrade <[email protected]>
https://github.com/matplotlib/matplotlib.git
def spring(): set_cmap('spring') # Autogenerated by boilerplate.py. Do not edit as changes will be lost.
9
pyplot.py
Python
lib/matplotlib/pyplot.py
032316bc6c7798fca6c82de24167c975f237687f
matplotlib
1
130,986
103
12
51
937
37
0
224
401
test_deploy_with_partial_constructor_failure
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def test_deploy_with_partial_constructor_failure(mock_deployment_state): deployment_state, timer, goal_manager = mock_deployment_state b_info_1, b_version_1 = deployment_info(num_replicas=2) create_goal, updating = deployment_state.deploy(b_info_1) goal_obj = goal_manager.get_goal(create_goal) deleted = _constructor_failure_loop_two_replica(deployment_state, 2) assert not deleted deployment_state.update() check_counts(deployment_state, total=2, by_state=[(ReplicaState.STARTING, 2)]) assert deployment_state._replica_constructor_retry_counter == 4 # Let one replica reach RUNNING state while the other still fails replica_1 = deployment_state._replicas.get()[0] replica_2 = deployment_state._replicas.get()[1] replica_1._actor.set_ready() replica_2._actor.set_failed_to_start() deployment_state.update() check_counts(deployment_state, total=2, by_state=[(ReplicaState.RUNNING, 1)]) check_counts(deployment_state, total=2, by_state=[(ReplicaState.STOPPING, 1)]) # Ensure failed to start replica is removed deployment_state.update() check_counts(deployment_state, total=2, by_state=[(ReplicaState.RUNNING, 1)]) check_counts(deployment_state, total=2, by_state=[(ReplicaState.STOPPING, 1)]) replica_2._actor.set_done_stopping() deployment_state.update() check_counts(deployment_state, total=1, by_state=[(ReplicaState.RUNNING, 1)]) check_counts(deployment_state, total=1, by_state=[(ReplicaState.STARTING, 0)]) # New update cycle should spawn new replica after previous one is removed deployment_state.update() check_counts(deployment_state, total=2, by_state=[(ReplicaState.RUNNING, 1)]) check_counts(deployment_state, total=2, by_state=[(ReplicaState.STARTING, 1)]) # Set the starting one to fail again and trigger retry limit starting_replica = deployment_state._replicas.get(states=[ReplicaState.STARTING])[0] starting_replica._actor.set_failed_to_start() deployment_state.update() # Ensure our goal returned with construtor start counter reset assert deployment_state._replica_constructor_retry_counter == -1 # Deploy() goal should NOT be considered complete yet assert not goal_manager.check_complete(create_goal) check_counts(deployment_state, total=2, by_state=[(ReplicaState.RUNNING, 1)]) check_counts(deployment_state, total=2, by_state=[(ReplicaState.STOPPING, 1)]) deployment_state.update() check_counts(deployment_state, total=2, by_state=[(ReplicaState.RUNNING, 1)]) check_counts(deployment_state, total=2, by_state=[(ReplicaState.STOPPING, 1)]) starting_replica = deployment_state._replicas.get(states=[ReplicaState.STOPPING])[0] starting_replica._actor.set_done_stopping() deployment_state.update() check_counts(deployment_state, total=1, by_state=[(ReplicaState.RUNNING, 1)]) check_counts(deployment_state, total=1, by_state=[(ReplicaState.STARTING, 0)]) deployment_state.update() check_counts(deployment_state, total=2, by_state=[(ReplicaState.RUNNING, 1)]) check_counts(deployment_state, total=2, by_state=[(ReplicaState.STARTING, 1)]) starting_replica = deployment_state._replicas.get(states=[ReplicaState.STARTING])[0] starting_replica._actor.set_ready() deployment_state.update() check_counts(deployment_state, total=2, by_state=[(ReplicaState.RUNNING, 2)]) # Deploy() goal should be considered complete assert goal_manager.check_complete(create_goal) # No except set on the AsyncGoal object assert goal_obj.exception is None
619
test_deployment_state.py
Python
python/ray/serve/tests/test_deployment_state.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
84,291
75
12
22
247
31
0
94
275
test_public_only_export_files_private_uploads_not_included
test_import_export: Test exclusion of private uploads from realm export.
https://github.com/zulip/zulip.git
def test_public_only_export_files_private_uploads_not_included(self) -> None: user_profile = self.example_user("hamlet") realm = user_profile.realm # We create an attachment tied to a personal message. That means it shouldn't be # included in a public export, as it's private data. personal_message_id = self.send_personal_message(user_profile, self.example_user("othello")) url = upload_message_file( "dummy.txt", len(b"zulip!"), "text/plain", b"zulip!", user_profile ) attachment_path_id = url.replace("/user_uploads/", "") attachment = claim_attachment( user_profile=user_profile, path_id=attachment_path_id, message=Message.objects.get(id=personal_message_id), is_message_realm_public=True, ) self.export_realm(realm, public_only=True) # The attachment row shouldn't have been exported: self.assertEqual((read_json("attachment.json")["zerver_attachment"]), []) # Aside of the attachment row, we also need to verify that the file itself # isn't included. fn = export_fn(f"uploads/{attachment.path_id}") self.assertFalse(os.path.exists(fn))
142
test_import_export.py
Python
zerver/tests/test_import_export.py
e6264cd85b0617bae2fa37c1672eaf3260576e4a
zulip
1
260,303
15
11
4
50
5
0
19
51
_check_infrequent_enabled
MAINT validate parameter in OneHotEncoder and OrdinalEncoder (#23579) Co-authored-by: jeremie du boisberranger <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def _check_infrequent_enabled(self): self._infrequent_enabled = ( self.max_categories is not None and self.max_categories >= 1 ) or self.min_frequency is not None
31
_encoders.py
Python
sklearn/preprocessing/_encoders.py
8a8d0687eed25481db39aa5c5b85148b2933d0a7
scikit-learn
3
102,169
27
7
9
32
4
0
30
58
test_valid_with_autograd_ops
Revert "Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels" (#69950) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/69950 This reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa. Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D33113545 Pulled By: bdhirsh fbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288
https://github.com/pytorch/pytorch.git
def test_valid_with_autograd_ops(self) -> None: yaml_str = # External codegen on a yaml file with no operators is effectively a no-op, # so there's no reason to parse the backend self.assert_success_from_gen_backend_stubs(yaml_str)
16
test_gen_backend_stubs.py
Python
tools/test/test_gen_backend_stubs.py
bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d
pytorch
1
245,586
26
14
19
133
14
0
34
91
assert_params_all_zeros
[Fix] Fix UT and remove delete mmcv ops. (#8623) * Remove get_root_logger * Fix UT * Update
https://github.com/open-mmlab/mmdetection.git
def assert_params_all_zeros(module) -> bool: weight_data = module.weight.data is_weight_zero = weight_data.allclose( weight_data.new_zeros(weight_data.size())) if hasattr(module, 'bias') and module.bias is not None: bias_data = module.bias.data is_bias_zero = bias_data.allclose( bias_data.new_zeros(bias_data.size())) else: is_bias_zero = True return is_weight_zero and is_bias_zero
80
test_resnet.py
Python
tests/test_models/test_backbones/test_resnet.py
73a12e6508d4ba0331b84b1313027a511ba26fe3
mmdetection
4
100,830
6
6
4
22
4
0
6
20
model_name
Refactoring and TravisCI to Github Actions (#1239) * refactor training * travis to actions
https://github.com/deepfakes/faceswap.git
def model_name(self) -> str: return self.name
12
model.py
Python
plugins/train/model/_base/model.py
ff6b0209dd5ad57b81b0aca570df7f39a7119bfb
faceswap
1
90,847
94
17
37
410
48
0
120
754
notify_if_ready
ref(models): `ActivityType` (#34978) ## Objective: We want to separate enum logic from Model logic. This breaks a lot of circular dependencies.
https://github.com/getsentry/sentry.git
def notify_if_ready(cls, deploy_id, fetch_complete=False): from sentry.models import Activity, Environment, ReleaseCommit, ReleaseHeadCommit lock_key = cls.get_lock_key(deploy_id) lock = locks.get(lock_key, duration=30) with TimedRetryPolicy(10)(lock.acquire): deploy = cls.objects.filter(id=deploy_id).select_related("release").get() if deploy.notified: return release = deploy.release environment = Environment.objects.get( organization_id=deploy.organization_id, id=deploy.environment_id ) if not fetch_complete: release_has_commits = ReleaseCommit.objects.filter( organization_id=release.organization_id, release=release ).exists() if not release_has_commits: # check if we have head commits, which # would indicate that we're waiting for # fetch_commits to complete if ReleaseHeadCommit.objects.filter( organization_id=release.organization_id, release=release ).exists(): return activity = None for project in deploy.release.projects.all(): activity = Activity.objects.create( type=ActivityType.DEPLOY.value, project=project, ident=Activity.get_version_ident(release.version), data={ "version": release.version, "deploy_id": deploy.id, "environment": environment.name, }, datetime=deploy.date_finished, ) # Somewhat hacky, only send notification for one # Deploy Activity record because it will cover all projects if activity is not None: activity.send_notification() deploy.update(notified=True)
256
deploy.py
Python
src/sentry/models/deploy.py
b9f5a910dc841b85f58d46266ec049ae5a7fd305
sentry
7
22,224
43
12
13
181
17
1
52
109
get_finder
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
https://github.com/pypa/pipenv.git
def get_finder(sources=None, pip_command=None, pip_options=None): # type: (List[Dict[S, Union[S, bool]]], Optional[Command], Any) -> PackageFinder if not pip_command: pip_command = shims.InstallCommand() if not sources: sources = [{"url": "https://pypi.org/simple", "name": "pypi", "verify_ssl": True}] if not pip_options: pip_options = get_pip_options(sources=sources, pip_command=pip_command) session = pip_command._build_session(pip_options) atexit.register(session.close) finder = shims.get_package_finder( shims.InstallCommand(), options=pip_options, session=session ) return session, finder @contextlib.contextmanager
@contextlib.contextmanager
104
dependencies.py
Python
pipenv/vendor/requirementslib/models/dependencies.py
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
4
249,451
16
9
9
65
8
0
17
89
setup
Share some metrics between the Prometheus exporter and the phone home stats (#13671)
https://github.com/matrix-org/synapse.git
async def setup(self) -> None: await self._update_gauges() self._clock.looping_call( run_as_background_process, 5 * 60 * 1000, desc="common_usage_metrics_update_gauges", func=self._update_gauges, )
39
common_usage_metrics.py
Python
synapse/metrics/common_usage_metrics.py
898fef2789c9b1a20ef53c7d588f536f51f0fe2f
synapse
1
78,845
4
8
2
24
4
0
4
18
clean_name
Update panel templates for new designs (EditHandler rewrite) Co-authored-by: Thibaud Colas <[email protected]>
https://github.com/wagtail/wagtail.git
def clean_name(self): return safe_snake_case(self.heading)
13
panels.py
Python
wagtail/admin/panels.py
5521e3b59f45af830ebac3c5686e092616eb82e4
wagtail
1
156,128
140
15
87
909
45
0
315
1,061
fuse_linear
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
https://github.com/dask/dask.git
def fuse_linear(dsk, keys=None, dependencies=None, rename_keys=True): if keys is not None and not isinstance(keys, set): if not isinstance(keys, list): keys = [keys] keys = set(flatten(keys)) if dependencies is None: dependencies = {k: get_dependencies(dsk, k, as_list=True) for k in dsk} # locate all members of linear chains child2parent = {} unfusible = set() for parent in dsk: deps = dependencies[parent] has_many_children = len(deps) > 1 for child in deps: if keys is not None and child in keys: unfusible.add(child) elif child in child2parent: del child2parent[child] unfusible.add(child) elif has_many_children: unfusible.add(child) elif child not in unfusible: child2parent[child] = parent # construct the chains from ancestor to descendant chains = [] parent2child = dict(map(reversed, child2parent.items())) while child2parent: child, parent = child2parent.popitem() chain = [child, parent] while parent in child2parent: parent = child2parent.pop(parent) del parent2child[parent] chain.append(parent) chain.reverse() while child in parent2child: child = parent2child.pop(child) del child2parent[child] chain.append(child) chains.append(chain) dependencies = {k: set(v) for k, v in dependencies.items()} if rename_keys is True: key_renamer = default_fused_linear_keys_renamer elif rename_keys is False: key_renamer = None else: key_renamer = rename_keys # create a new dask with fused chains rv = {} fused = set() aliases = set() is_renamed = False for chain in chains: if key_renamer is not None: new_key = key_renamer(chain) is_renamed = ( new_key is not None and new_key not in dsk and new_key not in rv ) child = chain.pop() val = dsk[child] while chain: parent = chain.pop() dependencies[parent].update(dependencies.pop(child)) dependencies[parent].remove(child) val = subs(dsk[parent], child, val) fused.add(child) child = parent fused.add(child) if is_renamed: rv[new_key] = val rv[child] = new_key dependencies[new_key] = dependencies[child] dependencies[child] = {new_key} aliases.add(child) else: rv[child] = val for key, val in dsk.items(): if key not in fused: rv[key] = val if aliases: for key, deps in dependencies.items(): for old_key in deps & aliases: new_key = rv[old_key] deps.remove(old_key) deps.add(new_key) rv[key] = subs(rv[key], old_key, new_key) if keys is not None: for key in aliases - keys: del rv[key] del dependencies[key] return rv, dependencies
581
optimization.py
Python
dask/optimization.py
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
32
322,360
11
8
8
48
9
0
11
39
split_sentences
Add model Prohetnet (#1698) * add Prohetnet model * update prohetnet * update format * pre commit * add prophetnet example * update tokenizer.py,run_train.sh,train_prophetnet.py * remove evaluate/gigaword/__init__.py Co-authored-by: smallv0221 <[email protected]>
https://github.com/PaddlePaddle/PaddleNLP.git
def split_sentences(self): from pyrouge.utils.sentence_splitter import PunktSentenceSplitter self.log.info("Splitting sentences.") ss = PunktSentenceSplitter()
47
bs_pyrouge.py
Python
examples/text_summarization/prophetnet/evaluate/cnndm/bs_pyrouge.py
487162262196bead8d9b4c2306f313b8f64edf9b
PaddleNLP
1
32,802
38
14
9
176
24
0
53
99
prepare_video
Add VideoMAE (#17821) * First draft * Add VideoMAEForVideoClassification * Improve conversion script * Add VideoMAEForPreTraining * Add VideoMAEFeatureExtractor * Improve VideoMAEFeatureExtractor * Improve docs * Add first draft of model tests * Improve VideoMAEForPreTraining * Fix base_model_prefix * Make model take pixel_values of shape (B, T, C, H, W) * Add loss computation of VideoMAEForPreTraining * Improve tests * Improve model testsé * Make all tests pass * Add VideoMAE to main README * Add tests for VideoMAEFeatureExtractor * Add integration test * Improve conversion script * Rename patch embedding class * Remove VideoMAELayer from init * Update design of patch embeddings * Improve comments * Improve conversion script * Improve conversion script * Add conversion of pretrained model * Add loss verification of pretrained model * Add loss verification of unnormalized targets * Add integration test for pretraining model * Apply suggestions from code review * Fix bug to make feature extractor resize only shorter edge * Address more comments * Improve normalization of videos * Add doc examples * Move constants to dedicated script * Remove scripts * Transfer checkpoints, fix docs * Update script * Update image mean and std * Fix doc tests * Set return_tensors to NumPy by default * Revert the previous change Co-authored-by: Niels Rogge <[email protected]>
https://github.com/huggingface/transformers.git
def prepare_video(feature_extract_tester, width=10, height=10, numpify=False, torchify=False): video = [] for i in range(feature_extract_tester.num_frames): video.append(np.random.randint(255, size=(feature_extract_tester.num_channels, width, height), dtype=np.uint8)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension video = [Image.fromarray(np.moveaxis(frame, 0, -1)) for frame in video] if torchify: video = [torch.from_numpy(frame) for frame in video] return video
117
test_feature_extraction_common.py
Python
tests/test_feature_extraction_common.py
f9a0008d2d3082a665f711b24f5314e4a8205fab
transformers
7
181,890
18
13
8
89
7
0
25
61
positive_integer
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
https://github.com/EpistasisLab/tpot.git
def positive_integer(value): try: value = int(value) except Exception: raise argparse.ArgumentTypeError('Invalid int value: \'{}\''.format(value)) if value < 0: raise argparse.ArgumentTypeError('Invalid positive int value: \'{}\''.format(value)) return value
48
driver.py
Python
tpot/driver.py
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
3
273,881
11
9
5
61
8
0
12
27
function_register
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def function_register(func, *args, **kwargs): concrete_func = func.get_concrete_function(*args, **kwargs) concrete_func.add_to_graph() concrete_func.add_gradient_functions_to_graph() return concrete_func
36
gru_lstm_utils.py
Python
keras/layers/rnn/gru_lstm_utils.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
124,605
4
5
27
30
6
2
4
7
test_pip_no_working_dir
[Serve] [runtime_env] [CI] Skip flaky Ray Client test (#26400)
https://github.com/ray-project/ray.git
def test_pip_no_working_dir(ray_start): driver =
driver = """ import ray from ray import serve import requests ray.init(address="auto")@serve.deployment
18
test_runtime_env_2.py
Python
python/ray/serve/tests/test_runtime_env_2.py
0914e5602d387fe46b0c27b41bb65bd593f5e393
ray
1
293,550
7
9
3
47
7
0
8
29
async_turn_off
Add switch platform to the Mazda integration (#68025)
https://github.com/home-assistant/core.git
async def async_turn_off(self, **kwargs): await self.client.stop_charging(self.vehicle_id) await self.refresh_status_and_write_state()
26
switch.py
Python
homeassistant/components/mazda/switch.py
0bd65db31cfcc07b79882e44f555b22899471ec6
core
1
153,213
8
13
2
54
6
0
8
22
to_numpy
REFACTOR-#3990: remove code duplication in PandasDataframePartition hierarchy (#3991) Signed-off-by: Anatoly Myachev <[email protected]>
https://github.com/modin-project/modin.git
def to_numpy(self, **kwargs): return self.apply(lambda df, **kwargs: df.to_numpy(**kwargs)).get()
32
partition.py
Python
modin/core/dataframe/pandas/partitioning/partition.py
4f69410776e74196ee749e66794be48d68b8f869
modin
1
259,457
58
12
12
170
17
0
78
199
test_poisson_glmnet
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_poisson_glmnet(): # library("glmnet") # options(digits=10) # df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2)) # x <- data.matrix(df[,c("a", "b")]) # y <- df$y # fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson", # standardize=F, thresh=1e-10, nlambda=10000) # coef(fit, s=1) # (Intercept) -0.12889386979 # a 0.29019207995 # b 0.03741173122 X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T y = np.array([0, 1, 1, 2]) glm = PoissonRegressor( alpha=1, fit_intercept=True, tol=1e-7, max_iter=300, ) glm.fit(X, y) assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5) assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5)
123
test_glm.py
Python
sklearn/linear_model/_glm/tests/test_glm.py
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
scikit-learn
1
311,205
7
11
3
40
7
0
7
21
_refresh
Implement coordinator class for Tradfri integration (#64166) * Initial commit coordinator * More coordinator implementation * More coordinator implementation * Allow integration reload * Move API calls to try/catch block * Move back fixture * Remove coordinator test file * Ensure unchanged file * Ensure unchanged conftest.py file * Remove coordinator key check * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> * Import RequestError * Move async_setup_platforms to end of setup_entry * Remove centralised handling of device data and device controllers * Remove platform_type argument * Remove exception * Remove the correct exception * Refactor coordinator error handling * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> * Remove platform type from base class * Remove timeout context manager * Refactor exception callback * Simplify starting device observation * Update test * Move observe start into update method * Remove await self.coordinator.async_request_refresh() * Refactor cover.py * Uncomment const.py * Add back extra_state_attributes * Update homeassistant/components/tradfri/coordinator.py Co-authored-by: Martin Hjelmare <[email protected]> * Refactor switch platform * Expose switch state * Refactor sensor platform * Put back accidentally deleted code * Add set_hub_available * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> * Fix tests for fan platform * Update homeassistant/components/tradfri/base_class.py Co-authored-by: Martin Hjelmare <[email protected]> * Update homeassistant/components/tradfri/base_class.py Co-authored-by: Martin Hjelmare <[email protected]> * Fix non-working tests * Refresh sensor state * Remove commented line * Add group coordinator * Add groups during setup * Refactor light platform * Fix tests * Move outside of try...except * Remove error handler * Remove unneeded methods * Update sensor * Update .coveragerc * Move signal * Add signals for groups * Fix signal Co-authored-by: Martin Hjelmare <[email protected]>
https://github.com/home-assistant/core.git
def _refresh(self) -> None: self._device_data = self.coordinator.data.blind_control.blinds[0]
24
cover.py
Python
homeassistant/components/tradfri/cover.py
9d404b749a0aa0d0527e145c911559df5ecf2afd
core
1
153,816
9
10
2
44
7
0
9
24
mode
REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514) Co-authored-by: Rehan Sohail Durrani <[email protected]> Signed-off-by: jeffreykennethli <[email protected]>
https://github.com/modin-project/modin.git
def mode(self, **kwargs): # noqa: PR02 return DataFrameDefault.register(pandas.DataFrame.mode)(self, **kwargs)
26
query_compiler.py
Python
modin/core/storage_formats/base/query_compiler.py
57e29bc5d82348006c5170ef9ac0a9eedcd9acf9
modin
1
137,385
24
10
10
69
9
0
27
76
to_json
[Jobs] [Job Status refactor 1/n] Use JSON for JobInfo instead of pickled Python class (#30955) Updates the JobInfo storage to use JSON instead of a pickled Python class. This allows it to be read by the GCS (which is in C++). Related issue number This a first step towards resolving #29621. The goal is to unify the GCS APIs and Ray Jobs API for getting the status of jobs. Plan copied from that issue: [This PR] Have the job server store extended JobInfo in a proto or JSON form, instead of pickled Python data. The proto/JSON JobInfo can still be stored in the internal KV. Enhance ListAllJobs to pre-join the data from job_submission_id in the internal KV. This means that GCS will need to lookup the extended JobInfo for jobs that have job_submission_id defined. Add a limit field to ListAllJobs to bound the number of returned jobs.
https://github.com/ray-project/ray.git
def to_json(self) -> Dict[str, Any]: json_dict = asdict(self) # Convert enum values to strings. json_dict["status"] = str(json_dict["status"]) # Assert that the dictionary is JSON-serializable. json.dumps(json_dict) return json_dict
39
common.py
Python
dashboard/modules/job/common.py
b028e6b35d56baa2c19b4353f2e2605968bc93b6
ray
1
269,370
7
10
2
34
6
0
7
13
round_repeats
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def round_repeats(repeats, depth_coefficient): return int(math.ceil(depth_coefficient * repeats))
20
efficientnet_v2.py
Python
keras/applications/efficientnet_v2.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
178,311
15
9
7
45
4
0
15
51
createPreModuleLoadCode
Plugins: Massive cleanup and API improvements and Kivy support * Added method to locate a DLL and to create a DLL entry point as expected, removing need for imports and making it more clear as an API. * The location of modules had already an API, but it wasn' used where it could be. * Moved implicit imports and DLL usage for Gi to its plugin, solving a TODO for it. * Make sure sure to only yield, and not return, that is just more error prone. * Also allow generators for implicit dependencies, such that generators work in a yield from fashion. * With this, Kivy apps work on at least Linux.
https://github.com/Nuitka/Nuitka.git
def createPreModuleLoadCode(module): if module.getFullName() == "gi": code = r return code, "Set typelib search path"
23
GiPlugin.py
Python
nuitka/plugins/standard/GiPlugin.py
56eb59d93f13815e66d0dea07e7669dfe275fa10
Nuitka
2
241,681
11
11
8
51
9
0
12
44
train_batch_idx
Integrate progress tracking into the progress bar (#11213)
https://github.com/Lightning-AI/lightning.git
def train_batch_idx(self) -> int: if self.trainer is None: return 0 return self.trainer.fit_loop.epoch_loop.batch_progress.current.processed
31
base.py
Python
pytorch_lightning/callbacks/progress/base.py
8a549a550cb10189ff1db382f546a40cd1c6c5b3
lightning
2
225,775
7
6
8
20
2
0
7
13
_mock_query_select
add unit test to tree index (#44) Co-authored-by: Jerry Liu <[email protected]>
https://github.com/jerryjliu/llama_index.git
def _mock_query_select() -> str: return "ANSWER: 1"
9
mock_predict.py
Python
tests/mock_utils/mock_predict.py
606baf1cd456dd6d735da61d72bba847f749dbf7
llama_index
1
140,580
30
12
11
164
22
0
42
147
synchronize
Clean up docstyle in python modules and add LINT rule (#25272)
https://github.com/ray-project/ray.git
def synchronize(local_filters, remotes, update_remote=True): remote_filters = ray.get( [r.get_filters.remote(flush_after=True) for r in remotes] ) for rf in remote_filters: for k in local_filters: local_filters[k].apply_changes(rf[k], with_buffer=False) if update_remote: copies = {k: v.as_serializable() for k, v in local_filters.items()} remote_copy = ray.put(copies) [r.sync_filters.remote(remote_copy) for r in remotes]
107
filter_manager.py
Python
rllib/utils/filter_manager.py
905258dbc19753c81039f993477e7ab027960729
ray
7
246,865
52
15
38
438
19
0
94
482
test_aggregation_get_event_for_thread
Replace assertEquals and friends with non-deprecated versions. (#12092)
https://github.com/matrix-org/synapse.git
def test_aggregation_get_event_for_thread(self): channel = self._send_relation(RelationTypes.THREAD, "m.room.test") self.assertEqual(200, channel.code, channel.json_body) thread_id = channel.json_body["event_id"] # Annotate the annotation. channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id ) self.assertEqual(200, channel.code, channel.json_body) channel = self.make_request( "GET", f"/rooms/{self.room}/event/{thread_id}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) self.assertEqual( channel.json_body["unsigned"].get("m.relations"), { RelationTypes.ANNOTATION: { "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] }, }, ) # It should also be included when the entire thread is requested. channel = self.make_request( "GET", f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) self.assertEqual(len(channel.json_body["chunk"]), 1) thread_message = channel.json_body["chunk"][0] self.assertEqual( thread_message["unsigned"].get("m.relations"), { RelationTypes.ANNOTATION: { "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] }, }, )
249
test_relations.py
Python
tests/rest/client/test_relations.py
02d708568b476f2f7716000b35c0adfa4cbd31b3
synapse
1
130,567
49
17
16
182
17
0
61
257
wait_for_nodes
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def wait_for_nodes(self, timeout=30): start_time = time.time() while time.time() - start_time < timeout: clients = self.global_state.node_table() live_clients = [client for client in clients if client["Alive"]] expected = len(self.list_all_nodes()) if len(live_clients) == expected: logger.debug("All nodes registered as expected.") return else: logger.debug( f"{len(live_clients)} nodes are currently registered, " f"but we are expecting {expected}" ) time.sleep(0.1) raise TimeoutError("Timed out while waiting for nodes to join.")
100
cluster_utils.py
Python
python/ray/cluster_utils.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
5
82,360
30
12
11
190
26
0
35
153
test_cache_limit_ttl_greater_than_default_cache_ttl
feat: add cache ttl extension point (#7299) Adds the setting `CMS_CACHE_LIMIT_TTL_CLASS` that should have a `limit_page_cache_ttl` method that would be called to limit the cache ttl of a page using business logic. Closes #7296
https://github.com/django-cms/django-cms.git
def test_cache_limit_ttl_greater_than_default_cache_ttl(self): page1 = create_page('test page 1', 'nav_playground.html', 'en', published=True) page1_url = page1.get_absolute_url() limit_page_cache_ttl_function = ".".join([PlaceholderCacheTestCase.__module__, limit_page_cache_ttl_test_500.__name__]) with self.settings(CMS_LIMIT_TTL_CACHE_FUNCTION=limit_page_cache_ttl_function): page1.publish('en') request = self.get_request(page1_url) request.current_page = Page.objects.get(pk=page1.pk) response = self.client.get(page1_url) self.assertTrue('max-age=40' in response['Cache-Control'], response['Cache-Control']) # noqa
109
test_cache.py
Python
cms/tests/test_cache.py
c412e97acba65a2a68e70ca15ea950bd31f90d3e
django-cms
1
250,280
41
13
35
237
12
0
65
486
test_update_version
Add missing type hints to tests.handlers. (#14680) And do not allow untyped defs in tests.handlers.
https://github.com/matrix-org/synapse.git
def test_update_version(self) -> None: version = self.get_success( self.handler.create_version( self.local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }, ) ) self.assertEqual(version, "1") res = self.get_success( self.handler.update_version( self.local_user, version, { "algorithm": "m.megolm_backup.v1", "auth_data": "revised_first_version_auth_data", "version": version, }, ) ) self.assertDictEqual(res, {}) # check we can retrieve it as the current version res = self.get_success(self.handler.get_version_info(self.local_user)) del res["etag"] self.assertDictEqual( res, { "algorithm": "m.megolm_backup.v1", "auth_data": "revised_first_version_auth_data", "version": version, "count": 0, }, )
137
test_e2e_room_keys.py
Python
tests/handlers/test_e2e_room_keys.py
652d1669c5a103b1c20478770c4aaf18849c09a3
synapse
1
101,055
28
11
13
144
13
0
42
125
_get_rgb_xyz_map
Add Flip Loss Function - Add Flip for AMD and TF - Split Perceptual Loss functions to own modules - Fix allowed input shape for models - Allow GUI tooltip to display at higher width
https://github.com/deepfakes/faceswap.git
def _get_rgb_xyz_map(cls) -> Tuple[Tensor, Tensor]: mapping = np.array([[10135552 / 24577794, 8788810 / 24577794, 4435075 / 24577794], [2613072 / 12288897, 8788810 / 12288897, 887015 / 12288897], [1425312 / 73733382, 8788810 / 73733382, 70074185 / 73733382]]) inverse = np.linalg.inv(mapping) return (K.constant(mapping, dtype="float32"), K.constant(inverse, dtype="float32"))
97
keras_utils.py
Python
lib/keras_utils.py
582c2ce40c11ef235dd3f9100f70e1e2832f8dd3
faceswap
1
309,854
11
10
3
48
9
0
11
20
test_null_name
Avoid removing zwave_js devices for non-ready nodes (#59964) * Only replace a node if the mfgr id / prod id / prod type differ * Prefer original device name for unready node * move register_node_in_dev_reg into async_setup_entry * simplify get_device_id_ext * Don't need hex ids * Revert "move register_node_in_dev_reg into async_setup_entry" This reverts commit f900e5fb0c67cc81657a1452b51c313bccb6f9e1. * Revert Callable change * Revert device backup name * Add test fixtures * Update existing not ready test with new fixture data * Check device properties after node added event * Add entity check * Check for extended device id * better device info checks * Use receive_event to properly setup components * Cleanup tests * improve test_replace_different_node * improve test_replace_same_node * add test test_node_model_change * Clean up long comments and strings * Format * Reload integration to detect node device config changes * update assertions * Disable entities on "value removed" event * Disable node status sensor on node replacement * Add test for disabling entities on remove value event * Add test for disabling node status sensor on node replacement * disable entity -> remove entity Co-authored-by: Martin Hjelmare <[email protected]>
https://github.com/home-assistant/core.git
async def test_null_name(hass, client, null_name_check, integration): node = null_name_check assert hass.states.get(f"switch.node_{node.node_id}")
25
test_init.py
Python
tests/components/zwave_js/test_init.py
cb89c23c0ffd7beba1ecc0cb84d80e8842f9a571
core
1
142,825
17
10
5
46
6
0
19
73
get_staged_trial
[tune/structure] Introduce execution package (#26015) Execution-specific packages are moved to tune.execution. Co-authored-by: Xiaowei Jiang <[email protected]>
https://github.com/ray-project/ray.git
def get_staged_trial(self): # TODO(xwjiang): This method should consider `self._cached_actor_pg`. for trial in self._staged_trials: if self._pg_manager.has_ready(trial): return trial return None
27
ray_trial_executor.py
Python
python/ray/tune/execution/ray_trial_executor.py
0959f44b6fc217a4f2766ed46a721eb79b067b2c
ray
3
156,083
12
10
4
60
7
0
16
32
quote
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
https://github.com/dask/dask.git
def quote(x): if istask(x) or type(x) is list or type(x) is dict: return (literal(x),) return x
36
core.py
Python
dask/core.py
cccb9d8d8e33a891396b1275c2448c352ef40c27
dask
4
82,439
126
18
35
515
49
0
222
515
assign_plugins
ci: Added codespell (#7355) Co-authored-by: Christian Clauss <[email protected]> * ci: codespell config taken from #7292
https://github.com/django-cms/django-cms.git
def assign_plugins(request, placeholders, template=None, lang=None, is_fallback=False): if not placeholders: return placeholders = tuple(placeholders) lang = lang or get_language_from_request(request) qs = get_cmsplugin_queryset(request) qs = qs.filter(placeholder__in=placeholders, language=lang) plugins = list(qs.order_by('placeholder', 'path')) fallbacks = defaultdict(list) # If no plugin is present in the current placeholder we loop in the fallback languages # and get the first available set of plugins if not is_fallback and not (hasattr(request, 'toolbar') and request.toolbar.edit_mode_active): disjoint_placeholders = ( ph for ph in placeholders if all(ph.pk != p.placeholder_id for p in plugins) ) for placeholder in disjoint_placeholders: if get_placeholder_conf("language_fallback", placeholder.slot, template, True): for fallback_language in get_fallback_languages(lang): assign_plugins(request, (placeholder,), template, fallback_language, is_fallback=True) fallback_plugins = placeholder._plugins_cache if fallback_plugins: fallbacks[placeholder.pk] += fallback_plugins break # These placeholders have no fallback non_fallback_phs = [ph for ph in placeholders if ph.pk not in fallbacks] # If no plugin is present in non fallback placeholders, create default plugins if enabled) if not plugins: plugins = create_default_plugins(request, non_fallback_phs, template, lang) plugins = downcast_plugins(plugins, non_fallback_phs, request=request) # split the plugins up by placeholder # Plugins should still be sorted by placeholder plugin_groups = {key: list(plugins) for key, plugins in groupby(plugins, attrgetter('placeholder_id'))} all_plugins_groups = plugin_groups.copy() for group in plugin_groups: plugin_groups[group] = build_plugin_tree(plugin_groups[group]) groups = fallbacks.copy() groups.update(plugin_groups) for placeholder in placeholders: # This is all the plugins. setattr(placeholder, '_all_plugins_cache', all_plugins_groups.get(placeholder.pk, [])) # This one is only the root plugins. setattr(placeholder, '_plugins_cache', groups.get(placeholder.pk, []))
329
plugins.py
Python
cms/utils/plugins.py
c1290c9ff89cb00caa5469129fd527e9d82cd820
django-cms
19
86,182
10
9
6
41
6
0
10
35
get
feat: Add arroyo compatible synchronized consumer (#39340) This brings over the synchronized consumer code that was previously part of arroyo. It was removed in https://github.com/getsentry/arroyo/pull/81 since it was no longer being used by Snuba and should not live in the shared library. This implementation differs from the current synchronized consumer code however it is functionally the same. This code has been fairly extensively tested (as it was previously used with Snuba subscriptions) It is currently not used but will be used when converting the post process forwarder to use Arroyo.
https://github.com/getsentry/sentry.git
def get(self) -> Generator[T, None, None]: with self.__lock: yield self.__value
24
synchronized.py
Python
src/sentry/eventstream/kafka/synchronized.py
3caca8f2eace81edf9a18fd825f16186e53eb755
sentry
1
246,857
36
14
24
216
18
0
47
343
test_send_relation
Replace assertEquals and friends with non-deprecated versions. (#12092)
https://github.com/matrix-org/synapse.git
def test_send_relation(self): channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") self.assertEqual(200, channel.code, channel.json_body) event_id = channel.json_body["event_id"] channel = self.make_request( "GET", "/rooms/%s/event/%s" % (self.room, event_id), access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) self.assert_dict( { "type": "m.reaction", "sender": self.user_id, "content": { "m.relates_to": { "event_id": self.parent_id, "key": "👍", "rel_type": RelationTypes.ANNOTATION, } }, }, channel.json_body, )
130
test_relations.py
Python
tests/rest/client/test_relations.py
02d708568b476f2f7716000b35c0adfa4cbd31b3
synapse
1
110,569
53
16
18
304
13
0
112
206
_get_aligned_offsets
Reparametrize offsetbox calculations in terms of bboxes. Passing a single bbox instead of (xdescent, ydescent, width, height) separately is easier to follow (see e.g. the changes in VPacker and HPacker, which no longer have to repeatedly pack/unpack whd_list), and avoids having to figure out e.g. the sign of the descents and whether width/height includes the descents, for example. Currently get_offset keeps a back compatible signature (we *could* consider killing the old signature but let's not do that for now), and _get_bbox_and_child_offsets is private because I *may* want to later also change the convention to make offsets relative to the bbox (0, 0) point rather than the bbox lower-left corner.
https://github.com/matplotlib/matplotlib.git
def _get_aligned_offsets(yspans, height, align="baseline"): _api.check_in_list( ["baseline", "left", "top", "right", "bottom", "center"], align=align) if height is None: height = max(y1 - y0 for y0, y1 in yspans) if align == "baseline": yspan = (min(y0 for y0, y1 in yspans), max(y1 for y0, y1 in yspans)) offsets = [0] * len(yspans) elif align in ["left", "bottom"]: yspan = (0, height) offsets = [-y0 for y0, y1 in yspans] elif align in ["right", "top"]: yspan = (0, height) offsets = [height - y1 for y0, y1 in yspans] elif align == "center": yspan = (0, height) offsets = [(height - (y1 - y0)) * .5 - y0 for y0, y1 in yspans] return yspan, offsets
190
offsetbox.py
Python
lib/matplotlib/offsetbox.py
de2192589f8ea50c9dc90be87b649399ff623feb
matplotlib
12
132,999
96
14
30
285
22
0
145
418
_check_gpu_tensors
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def _check_gpu_tensors(tensors): if not tensors or not isinstance(tensors, list): raise RuntimeError("'tensors' must be a nonempty list.") if len(tensors) > nccl_util.get_num_gpus(): raise RuntimeError( "Tensor list cannot be larger than the number" "of available GPUs. Got {} > {}.".format( len(tensors), nccl_util.get_num_gpus() ) ) t0 = tensors[0] dt = nccl_util.get_nccl_tensor_dtype(t0) s = nccl_util.get_tensor_shape(t0) d = nccl_util.get_tensor_device(t0) for i, t in enumerate(tensors): if i == 0: continue # We need to check the following: # (1) tensor is cuda (already checked during API) # (2) tensor dtype # (3) tensor shape match # (4) each tensor is on a different GPU dtype = nccl_util.get_nccl_tensor_dtype(t) if dt != dtype: raise RuntimeError( "Tensors must have identical dtype. Got: '{}'.".format(dtype) ) shape = nccl_util.get_tensor_shape(t) if s != shape: raise RuntimeError( "Tensor must have identical shape. Got: '{}'.".format(shape) ) device = nccl_util.get_tensor_device(t) if device == d: raise RuntimeError("Tensor must be on distinct GPUs.")
165
nccl_collective_group.py
Python
python/ray/util/collective/collective_group/nccl_collective_group.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
9
301,842
32
10
13
167
13
0
48
91
test_device_trackers_numerical_name
Add test for mikrotik device tracker with numerical device name (#72808) Add mikrotik test for numerical device name
https://github.com/home-assistant/core.git
async def test_device_trackers_numerical_name(hass, mock_device_registry_devices): await setup_mikrotik_entry( hass, dhcp_data=[DEVICE_3_DHCP_NUMERIC_NAME], wireless_data=[DEVICE_3_WIRELESS] ) device_3 = hass.states.get("device_tracker.123") assert device_3 is not None assert device_3.state == "home" assert device_3.attributes["friendly_name"] == "123" assert device_3.attributes["ip"] == "0.0.0.3" assert "ip_address" not in device_3.attributes assert device_3.attributes["mac"] == "00:00:00:00:00:03" assert device_3.attributes["host_name"] == 123 assert "mac_address" not in device_3.attributes
96
test_device_tracker.py
Python
tests/components/mikrotik/test_device_tracker.py
9ac0c5907f8e7a4a0906559b482ac450f42892c0
core
1
200,374
15
7
2
26
3
0
15
36
arguments
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
https://github.com/sympy/sympy.git
def arguments(self): # Will be changed to self.args[1:] after args overriding is removed return self._args[1:]
14
assume.py
Python
sympy/assumptions/assume.py
24f1e7730119fe958cc8e28411f790c9a5ec04eb
sympy
1
176,531
101
12
12
300
30
0
135
200
test_draw_networkx_edges_undirected_selfloop_colors
added extra condition for fancy arrow colors (#5407) * added extra condition for fancy arrow colors * keep fancy edges ids to get a correct prop's value * Add test for selfloop color mapping in undirected graphs. Co-authored-by: Ross Barnowski <[email protected]>
https://github.com/networkx/networkx.git
def test_draw_networkx_edges_undirected_selfloop_colors(): fig, ax = plt.subplots() # Edge list and corresponding colors edgelist = [(1, 3), (1, 2), (2, 3), (1, 1), (3, 3), (2, 2)] edge_colors = ["pink", "cyan", "black", "red", "blue", "green"] G = nx.Graph(edgelist) pos = {n: (n, n) for n in G.nodes} nx.draw_networkx_edges(G, pos, ax=ax, edgelist=edgelist, edge_color=edge_colors) # Verify that there are three fancy arrow patches (1 per self loop) assert len(ax.patches) == 3 # These are points that should be contained in the self loops. For example, # sl_points[0] will be (1, 1.1), which is inside the "path" of the first # self-loop but outside the others sl_points = np.array(edgelist[-3:]) + np.array([0, 0.1]) # Check that the mapping between self-loop locations and their colors is # correct for fap, clr, slp in zip(ax.patches, edge_colors[-3:], sl_points): assert fap.get_path().contains_point(slp) assert mpl.colors.same_color(fap.get_edgecolor(), clr)
197
test_pylab.py
Python
networkx/drawing/tests/test_pylab.py
1a38c171a560d9cdddd0d4bf1f1d613d80c5af7c
networkx
3
250,743
29
12
8
132
15
0
31
75
test_warn_no_nextlayer
exit for all tools on startup error, fix #4544 (#5187)
https://github.com/mitmproxy/mitmproxy.git
async def test_warn_no_nextlayer(): ps = Proxyserver() with taddons.context(ps) as tctx: tctx.configure(ps, listen_host="127.0.0.1", listen_port=0) await ps.running() await tctx.master.await_log("Proxy server listening at", level="info") assert tctx.master.has_log("Warning: Running proxyserver without nextlayer addon!", level="warn") await ps.shutdown_server()
72
test_proxyserver.py
Python
test/mitmproxy/addons/test_proxyserver.py
e2f42ddb301737a1d8179c1034226a838ccd74f1
mitmproxy
1
130,363
16
8
6
73
11
0
16
58
stop_instance
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def stop_instance(self, instance_id, force_stop=False): request = StopInstanceRequest() request.set_InstanceId(instance_id) request.set_ForceStop(force_stop) logging.info("Stop %s command submit successfully.", instance_id) self._send_request(request)
43
utils.py
Python
python/ray/autoscaler/_private/aliyun/utils.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
281,550
36
15
15
226
22
0
47
204
call_list
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: james <[email protected]> Co-authored-by: jose-donato <[email protected]>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def call_list(self, other_args): parser = argparse.ArgumentParser( add_help=False, prog="list", description=, ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: length = max(len(self.calls), len(self.puts)) - 1 console.print("#\tcall\tput") for i in range(length): call = self.calls[i][0] if i < len(self.calls) else "" put = self.puts[i][0] if i < len(self.puts) else "" console.print(f"{i}\t{call}\t{put}") console.print("")
129
payoff_controller.py
Python
gamestonk_terminal/stocks/options/payoff_controller.py
82747072c511beb1b2672846ae2ee4aec53eb562
OpenBBTerminal
5
45,034
22
11
12
83
14
0
25
47
_get_function_params
Add params dag_id, task_id etc to XCom.serialize_value (#19505) When implementing a custom XCom backend, in order to store XCom objects organized by dag_id, run_id etc, we need to pass those params to `serialize_value`.
https://github.com/apache/airflow.git
def _get_function_params(function) -> List[str]: parameters = inspect.signature(function).parameters bound_arguments = [ name for name, p in parameters.items() if p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD) ] return bound_arguments
53
xcom.py
Python
airflow/models/xcom.py
56285eee04285d8b6fac90911248d7e9dd5504d8
airflow
3
118,561
27
11
16
141
22
0
30
174
start
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
https://github.com/streamlit/streamlit.git
def start(self): if self._script_thread is not None: raise Exception("ScriptRunner was already started") self._script_thread = threading.Thread( target=self._process_request_queue, name="ScriptRunner.scriptThread", ) script_run_ctx = ScriptRunContext( session_id=self._session_id, enqueue=self._enqueue_forward_msg, query_string=self._client_state.query_string, session_state=self._session_state, uploaded_file_mgr=self._uploaded_file_mgr, ) add_script_run_ctx(self._script_thread, script_run_ctx) self._script_thread.start()
90
script_runner.py
Python
lib/streamlit/script_runner.py
704eab3478cf69847825b23dabf15813a8ac9fa2
streamlit
2
104,792
14
9
16
58
8
0
14
35
cache_files
Add code examples for DatasetDict (#4245) * 📝 add code examples for DatasetDict * 🖍 apply quentin review
https://github.com/huggingface/datasets.git
def cache_files(self) -> Dict[str, Dict]: self._check_values_type() return {k: dataset.cache_files for k, dataset in self.items()}
36
dataset_dict.py
Python
src/datasets/dataset_dict.py
1904d0c0a3a96330d9b870cdca3e9a3a137f2977
datasets
2
100,848
62
12
28
215
19
0
86
353
_get_mask_channels
Refactoring and TravisCI to Github Actions (#1239) * refactor training * travis to actions
https://github.com/deepfakes/faceswap.git
def _get_mask_channels(self) -> List[int]: eye_multiplier = self._config["eye_multiplier"] mouth_multiplier = self._config["mouth_multiplier"] if not self._config["penalized_mask_loss"] and (eye_multiplier > 1 or mouth_multiplier > 1): logger.warning("You have selected eye/mouth loss multipliers greater than 1x, but " "Penalized Mask Loss is disabled. Disabling all multipliers.") eye_multiplier = 1 mouth_multiplier = 1 uses_masks = (self._config["penalized_mask_loss"], eye_multiplier > 1, mouth_multiplier > 1) mask_channels = [-1 for _ in range(len(uses_masks))] current_channel = 3 for idx, mask_required in enumerate(uses_masks): if mask_required: mask_channels[idx] = current_channel current_channel += 1 logger.debug("uses_masks: %s, mask_channels: %s", uses_masks, mask_channels) return mask_channels
130
settings.py
Python
plugins/train/model/_base/settings.py
ff6b0209dd5ad57b81b0aca570df7f39a7119bfb
faceswap
7
64,692
66
15
17
266
22
0
146
118
split_invoices
fix(pos): cannot close the pos if sr. no. is sold & returned
https://github.com/frappe/erpnext.git
def split_invoices(invoices): # Input # invoices = [ # {'pos_invoice': 'Invoice with SR#1 & SR#2', 'is_return': 0}, # {'pos_invoice': 'Invoice with SR#1', 'is_return': 1}, # {'pos_invoice': 'Invoice with SR#2', 'is_return': 0} # ] # Output # _invoices = [ # [{'pos_invoice': 'Invoice with SR#1 & SR#2', 'is_return': 0}], # [{'pos_invoice': 'Invoice with SR#1', 'is_return': 1}, {'pos_invoice': 'Invoice with SR#2', 'is_return': 0}], # ] _invoices = [] special_invoices = [] pos_return_docs = [frappe.get_cached_doc("POS Invoice", d.pos_invoice) for d in invoices if d.is_return and d.return_against] for pos_invoice in pos_return_docs: for item in pos_invoice.items: if not item.serial_no: continue return_against_is_added = any(d for d in _invoices if d.pos_invoice == pos_invoice.return_against) if return_against_is_added: break return_against_is_consolidated = frappe.db.get_value('POS Invoice', pos_invoice.return_against, 'status', cache=True) == 'Consolidated' if return_against_is_consolidated: break pos_invoice_row = [d for d in invoices if d.pos_invoice == pos_invoice.return_against] _invoices.append(pos_invoice_row) special_invoices.append(pos_invoice.return_against) break _invoices.append([d for d in invoices if d.pos_invoice not in special_invoices]) return _invoices
160
pos_invoice_merge_log.py
Python
erpnext/accounts/doctype/pos_invoice_merge_log/pos_invoice_merge_log.py
cf51a0a1b8ec45bf653c9478bd57cee676b384d9
erpnext
15
9,832
87
10
68
249
21
0
129
370
mixin_base_pod_parser
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
https://github.com/jina-ai/jina.git
def mixin_base_pod_parser(parser): gp = add_arg_group(parser, title='Pod') gp.add_argument( '--uses-before', type=str, help='The executor attached after the Peas described by --uses, typically before sending to all ' 'shards, accepted type follows `--uses`', ) gp.add_argument( '--uses-after', type=str, help='The executor attached after the Peas described by --uses, typically used for receiving from ' 'all shards, accepted type follows `--uses`', ) gp.add_argument( '--scheduling', type=SchedulerType.from_string, choices=list(SchedulerType), default=SchedulerType.LOAD_BALANCE, help='The strategy of scheduling workload among Peas', ) gp.add_argument( '--external', action='store_true', default=False, help='The Pod will be considered an external Pod that has been started independently from the Flow.' 'This Pod will not be context managed by the Flow.', ) gp.add_argument( '--peas-hosts', nargs='+', type=str, help=, ) # hidden CLI used for internal only gp.add_argument( '--pod-role', type=PodRoleType.from_string, choices=list(PodRoleType), help='The role of this pod in the flow' if _SHOW_ALL_ARGS else argparse.SUPPRESS, )
219
pod.py
Python
jina/parsers/peapods/pod.py
933415bfa1f9eb89f935037014dfed816eb9815d
jina
2
96,631
150
17
78
871
79
0
236
1,125
apply_rule
feat(issue_alert_status): Start writing issue alert history (#32045) This adds a postgres backend for issue alert history and calls record after an issue alert fires.
https://github.com/getsentry/sentry.git
def apply_rule(self, rule, status): condition_match = rule.data.get("action_match") or Rule.DEFAULT_CONDITION_MATCH filter_match = rule.data.get("filter_match") or Rule.DEFAULT_FILTER_MATCH rule_condition_list = rule.data.get("conditions", ()) frequency = rule.data.get("frequency") or Rule.DEFAULT_FREQUENCY if ( rule.environment_id is not None and self.event.get_environment().id != rule.environment_id ): return now = timezone.now() freq_offset = now - timedelta(minutes=frequency) if status.last_active and status.last_active > freq_offset: return state = self.get_state() condition_list = [] filter_list = [] for rule_cond in rule_condition_list: if self.get_rule_type(rule_cond) == "condition/event": condition_list.append(rule_cond) else: filter_list.append(rule_cond) # Sort `condition_list` so that most expensive conditions run last. condition_list.sort( key=lambda condition: any( condition_match in condition["id"] for condition_match in SLOW_CONDITION_MATCHES ) ) for predicate_list, match, name in ( (filter_list, filter_match, "filter"), (condition_list, condition_match, "condition"), ): if not predicate_list: continue predicate_iter = (self.condition_matches(f, state, rule) for f in predicate_list) predicate_func = self.get_match_function(match) if predicate_func: if not predicate_func(predicate_iter): return else: self.logger.error( f"Unsupported {name}_match {match!r} for rule {rule.id}", filter_match, rule.id ) return updated = ( GroupRuleStatus.objects.filter(id=status.id) .exclude(last_active__gt=freq_offset) .update(last_active=now) ) if not updated: return if randrange(10) == 0: analytics.record( "issue_alert.fired", issue_id=self.group.id, project_id=rule.project.id, organization_id=rule.project.organization.id, rule_id=rule.id, ) history.record(rule, self.group) for action in rule.data.get("actions", ()): action_cls = rules.get(action["id"]) if action_cls is None: self.logger.warning("Unregistered action %r", action["id"]) continue action_inst = action_cls(self.project, data=action, rule=rule) results = safe_execute( action_inst.after, event=self.event, state=state, _with_transaction=False ) if results is None: self.logger.warning("Action %s did not return any futures", action["id"]) continue for future in results: key = future.key if future.key is not None else future.callback rule_future = RuleFuture(rule=rule, kwargs=future.kwargs) if key not in self.grouped_futures: self.grouped_futures[key] = (future.callback, [rule_future]) else: self.grouped_futures[key][1].append(rule_future)
541
processor.py
Python
src/sentry/rules/processor.py
f6dd9b183a170a6813a41ccee656d61e14670d80
sentry
24
42,778
30
11
10
116
14
0
36
149
oss_read
SSL Bucket, Light Logic Refactor and Docstring Update for Alibaba Provider (#23891)
https://github.com/apache/airflow.git
def oss_read(self, remote_log_location, return_error=False): try: oss_remote_log_location = f'{self.base_folder}/{remote_log_location}' self.log.info("read remote log: %s", oss_remote_log_location) return self.hook.read_key(self.bucket_name, oss_remote_log_location) except Exception: msg = f'Could not read logs from {oss_remote_log_location}' self.log.exception(msg) # return error if needed if return_error: return msg
61
oss_task_handler.py
Python
airflow/providers/alibaba/cloud/log/oss_task_handler.py
d19cb86660d40e665d8c4fe2b07d76b88532bd8b
airflow
3
118,603
4
10
3
39
6
0
4
25
test_without_spinner
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
https://github.com/streamlit/streamlit.git
def test_without_spinner(self): function_without_spinner() self.assertTrue(self.forward_msg_queue.is_empty())
21
cache_spinner_test.py
Python
lib/tests/streamlit/cache_spinner_test.py
704eab3478cf69847825b23dabf15813a8ac9fa2
streamlit
1
287,887
34
13
15
173
17
0
46
215
_build_observations_by_entity
Fix Bayesian sensor to use negative observations (#67631) Co-authored-by: Diogo Gomes <[email protected]>
https://github.com/home-assistant/core.git
def _build_observations_by_entity(self): observations_by_entity: dict[str, list[OrderedDict]] = {} for i, obs in enumerate(self._observations): obs["id"] = i if "entity_id" not in obs: continue observations_by_entity.setdefault(obs["entity_id"], []).append(obs) for li_of_dicts in observations_by_entity.values(): if len(li_of_dicts) == 1: continue for ord_dict in li_of_dicts: if ord_dict["platform"] != "state": continue ord_dict["platform"] = "multi_state" return observations_by_entity
102
binary_sensor.py
Python
homeassistant/components/bayesian/binary_sensor.py
49eeeae51da329284070eb7b91ed6cc8078d2f19
core
7
156,775
24
12
31
207
14
0
59
97
test_series_format
Change repr methods to avoid Layer materialization (#9289) * change task count to layer count in DataFrame and Array reprs * add test * address doctest failure * simplify test * support pluralization * use 'graph layers' instead of 'layers' to be more explicit
https://github.com/dask/dask.git
def test_series_format(): s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=list("ABCDEFGH")) ds = dd.from_pandas(s, 3) exp = assert repr(ds) == exp assert str(ds) == exp exp = assert ds.to_string() == exp s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=list("ABCDEFGH"), name="XXX") ds = dd.from_pandas(s, 3) exp = assert repr(ds) == exp assert str(ds) == exp
135
test_format.py
Python
dask/dataframe/tests/test_format.py
ddcb841903f8f180aa359bd8db0054aa3b5964e3
dask
1
290,182
29
10
6
71
11
0
31
78
available
Improve MQTT type hints part 1 (#80523) * Improve typing alarm_control_panel * Improve typing binary_sensor * Improve typing button * Add misssed annotation * Move CONF_EXPIRE_AFTER to _setup_from_config * Use CALL_BACK type * Remove assert, improve code style
https://github.com/home-assistant/core.git
def available(self) -> bool: expire_after: int | None = self._config.get(CONF_EXPIRE_AFTER) # mypy doesn't know about fget: https://github.com/python/mypy/issues/6185 return MqttAvailability.available.fget(self) and ( # type: ignore[attr-defined] expire_after is None or not self._expired )
42
binary_sensor.py
Python
homeassistant/components/mqtt/binary_sensor.py
b4ad03784f1d02995da39f3094c80adb4a60492b
core
3
108,847
57
13
21
142
13
1
77
167
_tex_escape
Tweak pgf escapes. - We don't need to escape underscores manually, but can rely on the underscore package like we already do for usetex. - We don't actually escape dollars (we parse them as math delimiters first). - Slightly tweak error message generation. - Move escaping tests before the big `create_figure` definition, which is used for further tests below.
https://github.com/matplotlib/matplotlib.git
def _tex_escape(text): r # Sometimes, matplotlib adds the unknown command \mathdefault. # Not using \mathnormal instead since this looks odd for the latex cm font. text = _replace_mathdefault(text) text = text.replace("\N{MINUS SIGN}", r"\ensuremath{-}") # split text into normaltext and inline math parts parts = _split_math(text) for i, s in enumerate(parts): if not i % 2: # textmode replacements s = _replace_escapetext(s) else: # mathmode replacements s = r"\(\displaystyle %s\)" % s parts[i] = s return "".join(parts) @_api.deprecated("3.6")
@_api.deprecated("3.6")
73
backend_pgf.py
Python
lib/matplotlib/backends/backend_pgf.py
b5c535b3cf08a15115637035a67a2a730856090b
matplotlib
3
53,536
22
13
36
158
20
0
40
126
generate_welcome_blub
Fix escape warnings; use config set in welcome blurb to connect to orion
https://github.com/PrefectHQ/prefect.git
def generate_welcome_blub(base_url): blurb = textwrap.dedent( r ).format(api_url=base_url + "/api") visit_dashboard = textwrap.dedent( f ) dashboard_not_built = textwrap.dedent( ) dashboard_disabled = textwrap.dedent( ) if not os.path.exists(prefect.__ui_static_path__): blurb += dashboard_not_built elif not prefect.settings.from_env().orion.ui.enabled: blurb += dashboard_disabled else: blurb += visit_dashboard return blurb
90
orion.py
Python
src/prefect/cli/orion.py
487fc4fb7ae2db5c767ef0bc68b14598e3b7e643
prefect
3
27,099
13
8
4
42
9
1
14
30
subscription_app_deleted_webhook
New events related to apps changes. (#9698) * New events related to apps changes. * Schema update after rebase * CHANGELOG.md update * New events description fix * Missing app event added to CHANGELOG.md
https://github.com/saleor/saleor.git
def subscription_app_deleted_webhook(subscription_webhook): return subscription_webhook( APP_DELETED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.APP_DELETED ) APP_STATUS_CHANGED_SUBSCRIPTION_QUERY = ( APP_DETAILS_FRAGMENT + ) @pytest.fixture
@pytest.fixture
14
fixtures.py
Python
saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py
b5e414c98a1535d287721c859994424cf0eea081
saleor
1
212,822
35
11
8
85
11
0
38
116
get_previous_focus
Added key and widget Element properties, new focus methods Element.get_next_focus, Element.get_previous_focus. New Window method Window.widget_to_element
https://github.com/PySimpleGUI/PySimpleGUI.git
def get_previous_focus(self): if not self._widget_was_created(): # if widget hasn't been created yet, then don't allow return None try: next_widget_focus = self.widget.tk_focusPrev() # tkinter.Widget return self.ParentForm.widget_to_element(next_widget_focus) except Exception as e: _error_popup_with_traceback("Exception getting previous focus. Check your element's Widget", e)
47
PySimpleGUI.py
Python
PySimpleGUI.py
9b814f003b0685757d76ce56ee9c98eae114d346
PySimpleGUI
3
183,565
8
7
7
25
5
0
8
22
mode_is_supported
[terminal buffering] Add support for the "mode 2026" That task is definitely way more complicated that it seemed to be 😅
https://github.com/Textualize/textual.git
def mode_is_supported(self) -> bool: return self.report_parameter in MODE_REPORTS_PARAMETERS_INDICATING_SUPPORT
14
events.py
Python
src/textual/events.py
d14659c1a3760eade2dd3479b66eb8b2e7711db0
textual
1
303,208
23
10
9
89
12
0
24
102
async_added_to_hass
Keep track of a context for each listener (#72702) * Remove async_remove_listener This avoids the ambuigity as to what happens if same callback is added multiple times. * Keep track of a context for each listener This allow a update coordinator to adapt what data to request on update from the backing service based on which entities are enabled. * Clone list before calling callbacks The callbacks can end up unregistering and modifying the dict while iterating. * Only yield actual values * Add a test for update context * Factor out iteration of _listeners to helper * Verify context is passed to coordinator * Switch to Any as type instead of object * Remove function which use was dropped earliers The use was removed in 8bee25c938a123f0da7569b4e2753598d478b900
https://github.com/home-assistant/core.git
async def async_added_to_hass(self): await super().async_added_to_hass() self.coordinator.entities.append(self) # Sensors should also register callbacks to HA when their state changes self.coordinator.musiccast.register_group_update_callback( self.update_all_mc_entities ) self.async_on_remove( self.coordinator.async_add_listener(self.async_schedule_check_client_list) )
51
media_player.py
Python
homeassistant/components/yamaha_musiccast/media_player.py
8910d265d6cf15fed4e6e98b4344031019c1016d
core
1
247,482
73
12
28
391
39
0
94
318
test_single_private_joined_room
Add some type hints to the tests.handlers module. (#12207)
https://github.com/matrix-org/synapse.git
def test_single_private_joined_room(self) -> None: room_id = self.helper.create_room_as(self.user1, tok=self.token1) self.helper.send_state( room_id, EventTypes.RoomHistoryVisibility, body={"history_visibility": "joined"}, tok=self.token1, ) self.helper.send(room_id, body="Hello!", tok=self.token1) self.helper.join(room_id, self.user2, tok=self.token2) self.helper.send(room_id, body="Hello again!", tok=self.token1) writer = Mock() self.get_success(self.admin_handler.export_user_data(self.user2, writer)) writer.write_events.assert_called() # Since we can't see all events there should be one extremity. writer.write_state.assert_called_once() # Collect all events that were written written_events = [] for (called_room_id, events), _ in writer.write_events.call_args_list: self.assertEqual(called_room_id, room_id) written_events.extend(events) # Check that the right number of events were written counter = Counter( (event.type, getattr(event, "state_key", None)) for event in written_events ) self.assertEqual(counter[(EventTypes.Message, None)], 1) self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) self.assertEqual(counter[(EventTypes.Member, self.user2)], 1)
254
test_admin.py
Python
tests/handlers/test_admin.py
e10a2fe0c28ec9206c0e2275df492f61ff5025f2
synapse
3
189,964
6
16
5
147
35
10
6
13
add_sound
fix: SoundExample video autoplaying with sound (#2911)
https://github.com/ManimCommunity/manim.git
def add_sound(self, sound_file, time_offset=0, gain=None, **kwargs):
""" This method is used to add a sound to the animation. Parameters ---------- sound_file : str The path to the sound file. time_offset : int,float, optional The offset in the sound file after which""" Thisused to add a sound to thesound_file :The path to the sound: intthe sound can beof the sound.. manim:::no_autoplay:
53
scene.py
Python
manim/scene/scene.py
aeeb6da3572e89fac605872288988823c93ea146
manim
2
291,460
30
18
16
143
17
0
39
227
select_source
Use _attr_state in ziggo mediabox xl media player (#82844)
https://github.com/home-assistant/core.git
def select_source(self, source): if str(source).isdigit(): digits = str(source) else: digits = next( ( key for key, value in self._mediabox.channels().items() if value == source ), None, ) if digits is None: return self.send_keys([f"NUM_{digit}" for digit in str(digits)]) self._attr_state = MediaPlayerState.PLAYING
86
media_player.py
Python
homeassistant/components/ziggo_mediabox_xl/media_player.py
cee716b89287cd76c0b50ce562487a0cc4ba0481
core
6