id
int64
20
338k
vocab_size
int64
2
671
ast_levels
int64
4
32
nloc
int64
1
451
n_ast_nodes
int64
12
5.6k
n_identifiers
int64
1
186
n_ast_errors
int64
0
10
n_words
int64
2
2.17k
n_whitespaces
int64
2
13.8k
fun_name
stringlengths
2
73
commit_message
stringlengths
51
15.3k
url
stringlengths
31
59
code
stringlengths
51
31k
ast_errors
stringlengths
0
1.46k
token_counts
int64
6
3.32k
file_name
stringlengths
5
56
language
stringclasses
1 value
path
stringlengths
7
134
commit_id
stringlengths
40
40
repo
stringlengths
3
28
complexity
int64
1
153
309,437
11
9
5
38
6
0
11
43
is_on
Add support for setting RGB and RGBW values for Twinkly lights (#62337) * Change library to ttls * Add rgbw support * Add client session to config flow * Fix config flow * Adjust tests 1 * Fix more tests * Fix last tests * Add new tests * Update test for coverage * Update test for coverage 2 * Update test for coverage 3 * Change brightness to attribute * Set RGBW mode only when available * Add RGB support
https://github.com/home-assistant/core.git
async def is_on(self) -> bool: if self.is_offline: raise ClientConnectionError() return self.state
21
__init__.py
Python
tests/components/twinkly/__init__.py
49a32c398c2b094975a0b8abe3ce356948c911bd
core
2
130,358
16
10
7
84
12
0
18
71
stop_instances
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def stop_instances(self, instance_ids, stopped_mode="StopCharging"): request = StopInstancesRequest() request.set_InstanceIds(instance_ids) request.set_StoppedMode(stopped_mode) response = self._send_request(request) if response is None: logging.error("stop_instances failed")
48
utils.py
Python
python/ray/autoscaler/_private/aliyun/utils.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
2
105,945
8
9
2
36
5
0
8
14
_version_tuple_to_str
Make `Version` hashable (#5238) * Make Version hashable * Remove Version.match (unused method)
https://github.com/huggingface/datasets.git
def _version_tuple_to_str(version_tuple): return ".".join(str(v) for v in version_tuple)
20
version.py
Python
src/datasets/utils/version.py
bde7504fbafa9a0cc9ae847ed55aafd4c0dbc9de
datasets
2
249,805
28
10
19
178
13
0
48
205
test_medium_does_not_exist
Add an Admin API endpoint for looking up users based on 3PID (#14405)
https://github.com/matrix-org/synapse.git
def test_medium_does_not_exist(self) -> None: # test for unknown medium url = "/_synapse/admin/v1/threepid/publickey/users/unknown-key" channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) # test for unknown user with a known medium url = "/_synapse/admin/v1/threepid/email/users/unknown" channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
110
test_user.py
Python
tests/rest/admin/test_user.py
a3623af74e0af0d2f6cbd37b47dc54a1acd314d5
synapse
1
188,426
78
13
21
220
24
0
112
323
authenticate
Fix rbac (#7699) * perf: 优化 suggesstion * perf: 修改 migrations * feat: 添加OIDC认证逻辑 * perf: 修改 backend * perf: 优化认证backends * perf: 优化认证backends * perf: 优化CAS认证, 用户多域名进行访问时回调到各自域名 Co-authored-by: ibuler <[email protected]>
https://github.com/jumpserver/jumpserver.git
def authenticate(request=None, **credentials): username = credentials.get('username') for backend, backend_path in _get_backends(return_tuples=True): # 预先检查,不浪费认证时间 if not backend.username_can_authenticate(username): continue # 原生 backend_signature = inspect.signature(backend.authenticate) try: backend_signature.bind(request, **credentials) except TypeError: # This backend doesn't accept these credentials as arguments. Try the next one. continue try: user = backend.authenticate(request, **credentials) except PermissionDenied: # This backend says to stop in our tracks - this user should not be allowed in at all. break if user is None: continue # 再次检查遇检查中遗漏的用户 if not backend.user_can_authenticate(user): continue # Annotate the user object with the path of the backend. user.backend = backend_path return user # The credentials supplied are invalid to all backends, fire signal user_login_failed.send(sender=__name__, credentials=_clean_credentials(credentials), request=request) auth.authenticate = authenticate
125
mixins.py
Python
apps/authentication/mixins.py
edfca5eb2486c2f006257723ffeda6f56b170170
jumpserver
7
191,410
53
9
11
124
7
0
97
139
test_document_lookup
Harrison/add react chain (#24) from https://arxiv.org/abs/2210.03629 still need to think if docstore abstraction makes sense
https://github.com/hwchase17/langchain.git
def test_document_lookup() -> None: page = Document(page_content=_PAGE_CONTENT) # Start with lookup on "LangChain". output = page.lookup("LangChain") assert output == "(Result 1/2) This is a page about LangChain." # Now switch to looking up "framework". output = page.lookup("framework") assert output == "(Result 1/1) It is a really cool framework." # Now switch back to looking up "LangChain", should reset. output = page.lookup("LangChain") assert output == "(Result 1/2) This is a page about LangChain." # Lookup "LangChain" again, should go to the next mention. output = page.lookup("LangChain") assert output == "(Result 2/2) What isn't there to love about langchain?"
63
test_document.py
Python
tests/unit_tests/docstore/test_document.py
ce7b14b84381c766ae42a0f71953b2a56c024dbb
langchain
1
259,900
93
12
10
434
23
1
147
564
test_fetch_openml_inactive
ENH improve ARFF parser using pandas (#21938) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Adrin Jalali <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_fetch_openml_inactive(monkeypatch, gzip_response, dataset_params): data_id = 40675 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response) msg = "Version 1 of dataset glass2 is inactive," with pytest.warns(UserWarning, match=msg): glass2 = fetch_openml( cache=False, as_frame=False, parser="liac-arff", **dataset_params ) assert glass2.data.shape == (163, 9) assert glass2.details["id"] == "40675" @pytest.mark.parametrize("gzip_response", [True, False]) @pytest.mark.parametrize( "data_id, params, err_type, err_msg", [ (40675, {"name": "glass2"}, ValueError, "No active dataset glass2 found"), ( 61, {"data_id": 61, "target_column": ["sepalwidth", "class"]}, ValueError, "Can only handle homogeneous multi-target datasets", ), ( 40945, {"data_id": 40945, "as_frame": False}, ValueError, "STRING attributes are not supported for array representation. Try" " as_frame=True", ), ( 2, {"data_id": 2, "target_column": "family", "as_frame": True}, ValueError, "Target column 'family'", ), ( 2, {"data_id": 2, "target_column": "family", "as_frame": False}, ValueError, "Target column 'family'", ), ( 61, {"data_id": 61, "target_column": "undefined"}, KeyError, "Could not find target_column='undefined'", ), ( 61, {"data_id": 61, "target_column": ["undefined", "class"]}, KeyError, "Could not find target_column='undefined'", ), ], ) @pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
@pytest.mark.parametrize("gzip_response", [True, False]) @pytest.mark.parametrize( "data_id, params, err_type, err_msg", [ (40675, {"name": "glass2"}, ValueError, "No active dataset glass2 found"), ( 61, {"data_id": 61, "target_column": ["sepalwidth", "class"]}, ValueError, "Can only handle homogeneous multi-target datasets", ), ( 40945, {"data_id": 40945, "as_frame": False}, ValueError, "STRING attributes are not supported for array representation. Try" " as_frame=True", ), ( 2, {"data_id": 2, "target_column": "family", "as_frame": True}, ValueError, "Target column 'family'", ), ( 2, {"data_id": 2, "target_column": "family", "as_frame": False}, ValueError, "Target column 'family'", ), ( 61, {"data_id": 61, "target_column": "undefined"}, KeyError, "Could not find target_column='undefined'", ), ( 61, {"data_id": 61, "target_column": ["undefined", "class"]}, KeyError, "Could not find target_column='undefined'", ), ], ) @pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
76
test_openml.py
Python
sklearn/datasets/tests/test_openml.py
a47d569e670fd4102af37c3165c9b1ddf6fd3005
scikit-learn
1
211,327
40
14
13
223
20
0
67
126
choose_best_pointorder_fit_another
Refactor rbox (#6704) * refactor rbox * modify the code of save results * fix some problem * add .gitignore in dataset/dota * fix test anno path
https://github.com/PaddlePaddle/PaddleDetection.git
def choose_best_pointorder_fit_another(poly1, poly2): x1, y1, x2, y2, x3, y3, x4, y4 = poly1 combinate = [ np.array([x1, y1, x2, y2, x3, y3, x4, y4]), np.array([x2, y2, x3, y3, x4, y4, x1, y1]), np.array([x3, y3, x4, y4, x1, y1, x2, y2]), np.array([x4, y4, x1, y1, x2, y2, x3, y3]) ] dst_coordinate = np.array(poly2) distances = np.array( [np.sum((coord - dst_coordinate)**2) for coord in combinate]) sorted = distances.argsort() return combinate[sorted[0]]
168
slicebase.py
Python
configs/rotate/tools/slicebase.py
e55e41945d42db787a0f7c557d53d06a6b24536b
PaddleDetection
2
158,396
44
12
15
197
22
0
54
114
train_batch_ch13
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
https://github.com/d2l-ai/d2l-zh.git
def train_batch_ch13(net, X, y, loss, trainer, devices): if isinstance(X, list): # Required for BERT fine-tuning (to be covered later) X = [x.to(devices[0]) for x in X] else: X = X.to(devices[0]) y = y.to(devices[0]) net.train() trainer.zero_grad() pred = net(X) l = loss(pred, y) l.sum().backward() trainer.step() train_loss_sum = l.sum() train_acc_sum = d2l.accuracy(pred, y) return train_loss_sum, train_acc_sum
124
torch.py
Python
d2l/torch.py
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
d2l-zh
3
86,689
78
19
52
436
53
0
92
672
__fetch_randomly_sampled_transactions
feat(dynamic-sampling): Improve empty transaction breakdown message [TET-338] (#39539) This PR add new attribute parentProjectBreakdown to /api/0/projects/<organization_slug>/<project_slug>/dynamic-sampling/distribution/ api: ``` { "projectBreakdown": null, "sampleSize": 0, "startTimestamp": null, "endTimestamp": null, "parentProjectBreakdown": [ { "projectId": 1, "percentage": 0.9, "project": "sentry" }, { "projectId": 2, "percentage": 0.1, "project": "javascript" } ] } ``` TODO: - [x] Update src/sentry/snuba/referrer.py https://github.com/getsentry/sentry/blob/0fbbf1626f86399b1ca4a2781d66ef96aac69de7/src/sentry/snuba/referrer.py#L208-L210 - [x] Add missing tests Co-authored-by: Andrii Soldatenko <[email protected]> Co-authored-by: ahmedetefy <[email protected]>
https://github.com/getsentry/sentry.git
def __fetch_randomly_sampled_transactions(self, project, query, sample_size, query_time_range): sampling_factor = self.__generate_transactions_sampling_factor( project=project, query=query, sample_size=sample_size, query_time_range=query_time_range, ) builder = QueryBuilder( Dataset.Discover, params={ "start": query_time_range.start_time, "end": query_time_range.end_time, "project_id": [project.id], "organization_id": project.organization.id, }, query=f"{query} event.type:transaction", selected_columns=[ "id", "trace", "random_number() as rand_num", f"modulo(rand_num, {sampling_factor}) as modulo_num", ], equations=[], orderby=None, auto_fields=True, auto_aggregations=True, use_aggregate_conditions=True, functions_acl=["random_number", "modulo"], limit=sample_size, offset=0, equation_config={"auto_add": False}, ) builder.add_conditions([Condition(lhs=Column("modulo_num"), op=Op.EQ, rhs=0)]) snuba_query = builder.get_snql_query().query snuba_query = snuba_query.set_select( snuba_query.select + [ Function( "not", [Function("has", [Column("contexts.key"), TRACE_PARENT_SPAN_CONTEXT])], alias="is_root", ) ] ) snuba_query = snuba_query.set_groupby( snuba_query.groupby + [Column("modulo_num"), Column("contexts.key")] ) data = raw_snql_query( SnubaRequest(dataset=Dataset.Discover.value, app_id="default", query=snuba_query), referrer=Referrer.DYNAMIC_SAMPLING_DISTRIBUTION_FETCH_TRANSACTIONS.value, )["data"] return data
275
project_dynamic_sampling.py
Python
src/sentry/api/endpoints/project_dynamic_sampling.py
ceee9dfd8d6fed70d34546e7b46ebb7bf1d49745
sentry
1
247,315
43
15
42
412
15
0
69
500
test_search_filter_not_labels
Add type hints to `tests/rest/client` (#12108) * Add type hints to `tests/rest/client` * newsfile * fix imports * add `test_account.py` * Remove one type hint in `test_report_event.py` * change `on_create_room` to `async` * update new functions in `test_third_party_rules.py` * Add `test_filter.py` * add `test_rooms.py` * change to `assertEquals` to `assertEqual` * lint
https://github.com/matrix-org/synapse.git
def test_search_filter_not_labels(self) -> None: request_data = json.dumps( { "search_categories": { "room_events": { "search_term": "label", "filter": self.FILTER_NOT_LABELS, } } } ) self._send_labelled_messages_in_room() channel = self.make_request( "POST", "/search?access_token=%s" % self.tok, request_data ) results = channel.json_body["search_categories"]["room_events"]["results"] self.assertEqual( len(results), 4, [result["result"]["content"] for result in results], ) self.assertEqual( results[0]["result"]["content"]["body"], "without label", results[0]["result"]["content"]["body"], ) self.assertEqual( results[1]["result"]["content"]["body"], "without label", results[1]["result"]["content"]["body"], ) self.assertEqual( results[2]["result"]["content"]["body"], "with wrong label", results[2]["result"]["content"]["body"], ) self.assertEqual( results[3]["result"]["content"]["body"], "with two wrong labels", results[3]["result"]["content"]["body"], )
236
test_rooms.py
Python
tests/rest/client/test_rooms.py
2ffaf30803f93273a4d8a65c9e6c3110c8433488
synapse
2
271,596
71
13
14
89
9
0
87
274
_assert_weights_created
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _assert_weights_created(self): if self.dynamic: return if ( "build" in self.__class__.__dict__ and self.__class__ != Model and not self.built ): # For any model that has customized build() method but hasn't # been invoked yet, this will cover both sequential and subclass model. # Also make sure to exclude Model class itself which has build() defined. raise ValueError( f"Weights for model {self.name} have not yet been " "created. " "Weights are created when the Model is first called on " "inputs or `build()` is called with an `input_shape`." )
43
training.py
Python
keras/engine/training.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
5
47,758
10
6
28
44
10
0
12
33
_iter_all_mapped_downstreams
Ensure TaskMap only checks "relevant" dependencies (#23053) When looking for "mapped dependants" of a task, we only want a task if it not only is a direct downstream of the task, but also it actually "uses" the task's pushed XCom for task mapping. So we need to peek into the mapped downstream task's expansion kwargs, and only count it as a mapped dependant if the upstream is referenced there.
https://github.com/apache/airflow.git
def _iter_all_mapped_downstreams(self) -> Iterator["MappedOperator"]: from airflow.models.mappedoperator import MappedOperator from airflow.utils.task_group import TaskGroup
82
taskmixin.py
Python
airflow/models/taskmixin.py
197cff3194e855b9207c3c0da8ae093a0d5dda55
airflow
6
245,836
12
11
3
41
7
0
12
37
_forward
[Feature] Support training detection models in detectron2 (#8672) * [Feature]Support using mmengine to train detectron2 * update * del unnecessary comments * minor fix * minor fix * Support mask rcnn and retinanet * minor fix * minor fix * minor fix * minor fix * minor fix * minor fix * chinese doc * update * minor fix * minor fix * update docs
https://github.com/open-mmlab/mmdetection.git
def _forward(self, *args, **kwargs): raise NotImplementedError( f'`_forward` is not implemented in {self.__class__.__name__}')
18
d2_wrapper.py
Python
mmdet/models/detectors/d2_wrapper.py
9c1b26726eebe4a196d213249dc22e8017761fab
mmdetection
1
188,886
242
17
100
940
48
0
464
2,148
get_bookmark_data
Automated upgrade of code to python 3.7+ Done by https://github.com/asottile/pyupgrade Consists mainly of moving string formatting to f-strings and removing encoding declarations
https://github.com/kovidgoyal/calibre.git
def get_bookmark_data(self): user_notes = {} self.timestamp = os.path.getmtime(self.path) cursor = self.db_connection.cursor() book_query_values = (self.contentId,) kepub_chapter_query = ( 'SELECT c.ContentID, c.BookTitle, c.Title, c.VolumeIndex, c.___NumPages, c.MimeType ' 'FROM content c ' 'WHERE ContentType = 899 ' 'AND c.BookID = ? ' 'ORDER BY c.VolumeIndex' ) bookmark_query = ( 'SELECT bm.BookmarkID, bm.ContentID, bm.Text, bm.Annotation, ' 'bm.ChapterProgress, bm.StartContainerChildIndex, bm.StartOffset, ' 'c.BookTitle, c.Title, c.volumeIndex, c.MimeType ' 'FROM Bookmark bm LEFT OUTER JOIN Content c ON c.ContentID = bm.ContentID ' 'WHERE bm.Hidden = "false" AND bm.volumeid = ? ' 'ORDER BY bm.ContentID, bm.chapterprogress' ) debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub chapters: contentId={self.contentId}") cursor.execute(kepub_chapter_query, book_query_values) kepub_chapters = {} if self.kepub: try: for chapter_row in cursor: chapter_contentID = chapter_row['ContentID'] chapter_contentID = chapter_contentID[:chapter_contentID.rfind('-')] kepub_chapters[chapter_contentID] = { 'chapter_title': chapter_row['Title'], 'chapter_index': chapter_row['VolumeIndex'] } debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub chapter: kepub chapters={kepub_chapters}") except: debug_print("Kobo::Bookmark::get_bookmark_data - No chapters found") cursor.execute(bookmark_query, book_query_values) previous_chapter = 0 bm_count = 0 for row in cursor: current_chapter = row['VolumeIndex'] if row['VolumeIndex'] is not None else 0 chapter_title = row['Title'] # For kepubs on newer firmware, the title needs to come from an 899 row. if self.kepub: chapter_contentID = row['ContentID'] debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub: chapter chapter_contentID='{chapter_contentID}'") filename_index = chapter_contentID.find('!') book_contentID_part = chapter_contentID[:filename_index] debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub: chapter book_contentID_part='{book_contentID_part}'") file_contentID_part = chapter_contentID[filename_index + 1:] filename_index = file_contentID_part.find('!') opf_reference = file_contentID_part[:filename_index] debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub: chapter opf_reference='{opf_reference}'") file_contentID_part = file_contentID_part[filename_index + 1:] debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub: chapter file_contentID_part='{file_contentID_part}'") # from urllib import quote # file_contentID_part = quote(file_contentID_part) chapter_contentID = book_contentID_part + "!" + opf_reference + "!" + file_contentID_part debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub chapter chapter_contentID='{chapter_contentID}'") kepub_chapter = kepub_chapters.get(chapter_contentID, None) if kepub_chapter is not None: chapter_title = kepub_chapter['chapter_title'] current_chapter = kepub_chapter['chapter_index'] else: chapter_title = '' current_chapter = 0 if previous_chapter == current_chapter: bm_count = bm_count + 1 else: bm_count = 0 text = row['Text'] annotation = row['Annotation'] # A dog ear (bent upper right corner) is a bookmark if row['StartContainerChildIndex'] == row['StartOffset'] == 0: # StartContainerChildIndex = StartOffset = 0 e_type = 'Bookmark' text = row['Title'] # highlight is text with no annotation elif text is not None and (annotation is None or annotation == ""): e_type = 'Highlight' elif text and annotation: e_type = 'Annotation' else: e_type = 'Unknown annotation type' note_id = current_chapter * 1000 + bm_count # book_title = row[8] chapter_progress = min(round(float(100*row['ChapterProgress']),2),100) user_notes[note_id] = dict(id=self.id, displayed_location=note_id, type=e_type, text=text, annotation=annotation, chapter=current_chapter, chapter_title=chapter_title, chapter_progress=chapter_progress) previous_chapter = current_chapter # debug_print("e_type:" , e_type, '\t', 'loc: ', note_id, 'text: ', text, # 'annotation: ', annotation, 'chapter_title: ', chapter_title, # 'chapter_progress: ', chapter_progress, 'date: ') cursor.execute('SELECT DateLastRead, ___PercentRead, ReadStatus ' 'FROM content ' 'WHERE bookid IS NULL ' 'AND ReadStatus > 0 ' 'AND ContentID = ? ' 'ORDER BY DateLastRead, ReadStatus', book_query_values) for row in cursor: self.last_read = row['DateLastRead'] self.percent_read = 100 if (row['ReadStatus'] == 2) else row['___PercentRead'] # print row[1] cursor.close() # self.last_read_location = self.last_read - self.pdf_page_offset self.user_notes = user_notes
518
bookmark.py
Python
src/calibre/devices/kobo/bookmark.py
eb78a761a99ac20a6364f85e12059fec6517d890
calibre
17
177,015
40
13
12
183
22
0
57
177
assert_lca_dicts_same
Naive lowest common ancestor implementation (#5736) * Add naive lca methods * Naive algorithm implementation for LCA * Modify naive lca functions * Correct parameters of nx.ancestors * Update lowest_common_ancestors.py * Parametrize tests * Apply suggestions from code review Co-authored-by: Dan Schult <[email protected]> * Yield instead of append * Tests for naive lca * Correct test cases for naive lca algorithms * Apply suggestions from code review Co-authored-by: Mridul Seth <[email protected]> * Fix function name -when calling * Make requested changes * Inlining _get_a_lowest_common_ancestor Co-authored-by: dtuncturk <[email protected]> Co-authored-by: Dan Schult <[email protected]> Co-authored-by: Mridul Seth <[email protected]>
https://github.com/networkx/networkx.git
def assert_lca_dicts_same(self, d1, d2, G=None): if G is None: G = self.DG root_distance = self.root_distance else: roots = [n for n, deg in G.in_degree if deg == 0] assert len(roots) == 1 root_distance = nx.shortest_path_length(G, source=roots[0]) for a, b in ((min(pair), max(pair)) for pair in chain(d1, d2)): assert ( root_distance[get_pair(d1, a, b)] == root_distance[get_pair(d2, a, b)] )
124
test_lowest_common_ancestors.py
Python
networkx/algorithms/tests/test_lowest_common_ancestors.py
b2f91c34a23058dd70b41784af0d87890216026a
networkx
6
14,007
9
6
12
38
7
0
9
16
stream
refactor: make server link to gateway so that custom gateway can inherit (#5526)
https://github.com/jina-ai/jina.git
async def stream(self, request_iterator, context=None, *args, **kwargs) -> AsyncIterator['Request']:
50
gateway.py
Python
jina/serve/runtimes/gateway/grpc/gateway.py
f854d5ddc10f6d4392a7da8722482463af56be9b
jina
2
68,196
47
14
21
225
19
0
72
51
get_actual_start_end_datetime_of_shift
refactor: consider timeslots in `get_employee_shift`
https://github.com/frappe/erpnext.git
def get_actual_start_end_datetime_of_shift(employee, for_datetime, consider_default_shift=False): actual_shift_start = actual_shift_end = shift_details = None shift_timings_as_per_timestamp = get_employee_shift_timings(employee, for_datetime, consider_default_shift) timestamp_list = [] for shift in shift_timings_as_per_timestamp: if shift: timestamp_list.extend([shift.actual_start, shift.actual_end]) else: timestamp_list.extend([None, None]) timestamp_index = None for index, timestamp in enumerate(timestamp_list): if timestamp and for_datetime <= timestamp: timestamp_index = index break if timestamp_index and timestamp_index%2 == 1: shift_details = shift_timings_as_per_timestamp[int((timestamp_index-1)/2)] actual_shift_start = shift_details.actual_start actual_shift_end = shift_details.actual_end elif timestamp_index: shift_details = shift_timings_as_per_timestamp[int(timestamp_index/2)] return actual_shift_start, actual_shift_end, shift_details
145
shift_assignment.py
Python
erpnext/hr/doctype/shift_assignment/shift_assignment.py
625a9f69f592be8c50c9b1bd1a16e0b7b9157988
erpnext
9
286,290
25
14
7
130
12
0
41
76
get_last_time_market_was_open
[SDK] Allow silencing verbose output in commands that use stocks/load (#3180) * remove verbose on load * Revert implementation of the verbosity setting in stocks controller * Edit docstrings to comply with pydocstyle linting rules * Fix typos in variable names and help text * Add verbosity setting to forex load helper as it uses the stocks helper * Update docstrings to comply with pydocstyle linting rules * Update tests * Fix test relying on local sources settings * Remove old test cassettes * Add new test data * WIP: Fix futures tests * Clean up test file * Fix futures tests having a time component * Fix futures model tests Co-authored-by: James Maslek <[email protected]> Co-authored-by: Theodore Aptekarev <[email protected]>
https://github.com/OpenBB-finance/OpenBBTerminal.git
def get_last_time_market_was_open(dt): # Check if it is a weekend if dt.date().weekday() > 4: dt = get_last_time_market_was_open(dt - timedelta(hours=24)) # Check if it is a holiday if dt.strftime("%Y-%m-%d") in us_holidays(): dt = get_last_time_market_was_open(dt - timedelta(hours=24)) dt = dt.replace(hour=21, minute=0, second=0) return dt
77
helper_funcs.py
Python
openbb_terminal/helper_funcs.py
47549cbd9f52a436c06b040fda5b88a7d2bf700a
OpenBBTerminal
3
32,820
7
8
5
48
5
0
12
27
_patch_hf_hub_tqdm
Use new huggingface_hub tools for download models (#18438) * Draft new cached_file * Initial draft for config and model * Small fixes * Fix first batch of tests * Look in cache when internet is down * Fix last tests * Bad black, not fixing all quality errors * Make diff less * Implement change for TF and Flax models * Add tokenizer and feature extractor * For compatibility with main * Add utils to move the cache and auto-do it at first use. * Quality * Deal with empty commit shas * Deal with empty etag * Address review comments
https://github.com/huggingface/transformers.git
def _patch_hf_hub_tqdm(): old_tqdm = huggingface_hub.file_download.tqdm huggingface_hub.file_download.tqdm = tqdm yield huggingface_hub.file_download.tqdm = old_tqdm
27
hub.py
Python
src/transformers/utils/hub.py
5cd40323684c183c30b34758aea1e877996a7ac9
transformers
1
85,460
39
11
9
158
16
1
54
95
test_spanner_indexer_implementation_bulk_insert_twice_gives_same_result
feat(indexer-spanner): Implementation of core api's (#37802) Implementation of all the api's of `RawCloudSpannerIndexer`. The `bulk_record` implementation uses DML instead of mutations. Did not implement the `bulk_record` implementation using mutations since this PR is already big. The test cases run when setup correctly with our cloud instance.
https://github.com/getsentry/sentry.git
def test_spanner_indexer_implementation_bulk_insert_twice_gives_same_result(testing_indexer): record = {"org_id": 55555, "string": get_random_string(10)} record1_int = testing_indexer.record( use_case_id=UseCaseKey.PERFORMANCE, org_id=record["org_id"], string=record["string"] ) # Insert the record again to validate that the returned id is the one we # got from the first insert. record2_int = testing_indexer.record( use_case_id=UseCaseKey.PERFORMANCE, org_id=record["org_id"], string=record["string"] ) assert record1_int == record2_int @patch( "sentry.sentry_metrics.indexer.cloudspanner.cloudspanner.RawCloudSpannerIndexer._insert_collisions_handled" ) @pytest.mark.skip(reason="TODO: Implement it correctly")
@patch( "sentry.sentry_metrics.indexer.cloudspanner.cloudspanner.RawCloudSpannerIndexer._insert_collisions_handled" ) @pytest.mark.skip(reason="TODO: Implement it correctly")
76
test_cloudspanner.py
Python
tests/sentry/sentry_metrics/test_cloudspanner.py
21bf2ff99d3352c7cc8b7901fb3b4c264a71a8e8
sentry
1
176,383
22
11
6
122
15
0
24
42
test_to_numpy_array_structured_dtype_attrs_from_fields
Add structured dtypes to `to_numpy_array` (#5324) * Add basic test for supporting multi-attr adjacency. * WIP: sloppy implementation of multiattr adjacency in to_numpy_array. Conditionals could be improved. * Reorg conditionals. * Test to_numpy_array raises with structured dtype for multigraphs. * Fix default value handling for structured types. * Add tests for dtypes with single field. * Parametrize field tests for directed/undirected inputs. * Handle ambiguous case: structured dtype + specified weight. * Add test for multiple fields that may/not have corresponding edge attrs. * Updated docstring. * Add tests with nonedge values + structured dtypes.
https://github.com/networkx/networkx.git
def test_to_numpy_array_structured_dtype_attrs_from_fields(G, expected): G.add_edge(0, 1, weight=10, cost=5.0) dtype = np.dtype([("weight", int), ("cost", int)]) A = nx.to_numpy_array(G, dtype=dtype, weight=None) expected = np.asarray(expected, dtype=dtype) npt.assert_array_equal(A, expected)
82
test_convert_numpy.py
Python
networkx/tests/test_convert_numpy.py
d2278b4c3402c735a31e266adde75ecc2eeb98eb
networkx
1
123,994
25
11
14
78
9
0
26
91
_garbage_collect
[workflow] Major refactoring - new async workflow executor (#25618) * major workflow refactoring
https://github.com/ray-project/ray.git
def _garbage_collect(self) -> None: state = self._state while state.free_outputs: # garbage collect all free outputs immediately gc_task_id = state.free_outputs.pop() assert state.get_input(gc_task_id) is not None state.output_map.pop(gc_task_id, None)
47
workflow_executor.py
Python
python/ray/workflow/workflow_executor.py
ddd63aba77b0e4da699e358beba37cd907f7cb37
ray
2
91,381
26
12
14
198
30
0
32
142
test_upgrade_org_config_no_dsn
ref: replace self.assertRaises with pytest.raises (#35685) * add flake8 plugin to detect assertRaises * ref: replace self.assertRaises with pytest.raises * non-sed fixes
https://github.com/getsentry/sentry.git
def test_upgrade_org_config_no_dsn(self): with self.tasks(): self.assert_setup_flow() project_id = self.project.id org = self.organization data = { "project_mappings": [[project_id, "Qme9NXBpguaRxcXssZ1NWHVaM98MAL6PHDXUs1jPrgiM8H"]] } integration = Integration.objects.get(provider=self.provider.key) installation = integration.get_installation(org.id) dsn = ProjectKey.get_default(project=Project.objects.get(id=project_id)) dsn.update(id=dsn.id, status=ProjectKeyStatus.INACTIVE) with pytest.raises(ValidationError): installation.update_organization_config(data)
118
test_integration.py
Python
tests/sentry/integrations/vercel/test_integration.py
284e980df0018f8baee659999268bdd4c7d08255
sentry
1
259,862
25
13
10
131
20
0
27
69
test_partial_fit_validate_feature_names
FIX partial_fit from SelectFromModel doesn't validate the parameters (#23299) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_partial_fit_validate_feature_names(as_frame): pytest.importorskip("pandas") X, y = datasets.load_iris(as_frame=as_frame, return_X_y=True) selector = SelectFromModel(estimator=SGDClassifier(), max_features=4).partial_fit( X, y, classes=[0, 1, 2] ) if as_frame: assert_array_equal(selector.feature_names_in_, X.columns) else: assert not hasattr(selector, "feature_names_in_")
82
test_from_model.py
Python
sklearn/feature_selection/tests/test_from_model.py
eace47aea7431b4b6ea08e4fb33bd73805d1f1b0
scikit-learn
2
125,200
7
12
2
47
8
0
7
13
_multiline_width
[State Observability] Use a table format by default (#26159) NOTE: tabulate is copied/pasted to the codebase for table formatting. This PR changes the default layout to be the table format for both summary and list APIs.
https://github.com/ray-project/ray.git
def _multiline_width(multiline_s, line_width_fn=len): return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
27
tabulate.py
Python
python/ray/_private/thirdparty/tabulate/tabulate.py
adf24bfa9723b0621183bb27f0c889b813c06e8a
ray
1
260,243
94
25
29
315
20
0
145
504
validate_parameter_constraints
MNT Param validation: Allow to skip validation of a parameter (#23602)
https://github.com/scikit-learn/scikit-learn.git
def validate_parameter_constraints(parameter_constraints, params, caller_name): if params.keys() != parameter_constraints.keys(): raise ValueError( f"The parameter constraints {list(parameter_constraints.keys())} do not " f"match the parameters to validate {list(params.keys())}." ) for param_name, param_val in params.items(): constraints = parameter_constraints[param_name] if constraints == "no_validation": continue constraints = [make_constraint(constraint) for constraint in constraints] for constraint in constraints: if constraint.is_satisfied_by(param_val): # this constraint is satisfied, no need to check further. break else: # No constraint is satisfied, raise with an informative message. # Ignore constraints that we don't want to expose in the error message, # i.e. options that are for internal purpose or not officially supported. constraints = [ constraint for constraint in constraints if not constraint.hidden ] if len(constraints) == 1: constraints_str = f"{constraints[0]}" else: constraints_str = ( f"{', '.join([str(c) for c in constraints[:-1]])} or" f" {constraints[-1]}" ) raise ValueError( f"The {param_name!r} parameter of {caller_name} must be" f" {constraints_str}. Got {param_val!r} instead." )
126
_param_validation.py
Python
sklearn/utils/_param_validation.py
d7c38282839d09676c49ac60fdd67af89d61e79c
scikit-learn
10
311,432
75
11
10
160
25
0
113
247
_entry_from_accessory
Remove deprecated helper functions from homekit_controller pairing flow (#65270)
https://github.com/home-assistant/core.git
async def _entry_from_accessory(self, pairing): # The bulk of the pairing record is stored on the config entry. # A specific exception is the 'accessories' key. This is more # volatile. We do cache it, but not against the config entry. # So copy the pairing data and mutate the copy. pairing_data = pairing.pairing_data.copy() # Use the accessories data from the pairing operation if it is # available. Otherwise request a fresh copy from the API. # This removes the 'accessories' key from pairing_data at # the same time. if not (accessories := pairing_data.pop("accessories", None)): accessories = await pairing.list_accessories_and_characteristics() parsed = Accessories.from_list(accessories) accessory_info = parsed.aid(1).services.first( service_type=ServicesTypes.ACCESSORY_INFORMATION ) name = accessory_info.value(CharacteristicsTypes.NAME, "") return self.async_create_entry(title=name, data=pairing_data)
92
config_flow.py
Python
homeassistant/components/homekit_controller/config_flow.py
cc94af2872945667d80f8f76512260ae6205d739
core
2
269,753
19
15
10
118
6
0
33
95
generate_benchmark_params_cpu_gpu
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def generate_benchmark_params_cpu_gpu(*params_list): benchmark_params = [] for params in params_list: benchmark_params.extend( [((param[0] + "_CPU",) + param[1:]) for param in params] ) benchmark_params.extend( [((param[0] + "_GPU",) + param[1:]) for param in params] ) return benchmark_params
74
benchmark_util.py
Python
keras/benchmarks/benchmark_util.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
4
190,115
30
10
24
217
21
0
48
272
add_bases
Replaced renderer strings with :class:`.RendererType` enum entries (#3017) * remove unused constants * remove deprecated --use_opengl_renderer flag * remove unnecessary workaround with class initialization * add OpenGLMobject.name to get rid of one renderer check * add VMobject.n_points_per_curve property to get rid of more renderer checks * replace renderer string checks with enum check * added mobject.utils module with renderer-dependent class getters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ensure that capitalization of passed renderer type is irrelevant * remove unused entries from mobject.utils.__all__ * fixed isort ignore in manim.__init__ * fixed lower-case casting of passed renderer * fixed doctests * more documentation + doctests for mobject.utils * removed incorrect paragraph about ConverToOpenGL metaclass * added docstring for RendererType enum * renderer compatibility section in plugin dev documentation * added mobject.utils to reference manual * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Remove actual doctest (it ran the compatibility code) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Naveen M K <[email protected]>
https://github.com/ManimCommunity/manim.git
def add_bases(self): if config.renderer == RendererType.OPENGL: color = self.color opacity = self.opacity elif config.renderer == RendererType.CAIRO: color = self.fill_color opacity = self.fill_opacity self.base_top = Circle( radius=self.radius, color=color, fill_opacity=opacity, shade_in_3d=True, stroke_width=0, ) self.base_top.shift(self.u_range[1] * IN) self.base_bottom = Circle( radius=self.radius, color=color, fill_opacity=opacity, shade_in_3d=True, stroke_width=0, ) self.base_bottom.shift(self.u_range[0] * IN) self.add(self.base_top, self.base_bottom)
144
three_dimensions.py
Python
manim/mobject/three_d/three_dimensions.py
bd844f46d804c8cad50d06ad20ab5bebaee9987b
manim
3
290,428
25
10
12
97
15
0
25
133
async_return_to_base
Use `_attr_` for MQTT vacuum (#81534) * Use `_attr_` for MQTT vacuum * Remove unneeded properties * Follow-up comment * Remove default value
https://github.com/home-assistant/core.git
async def async_return_to_base(self, **kwargs): if self.supported_features & VacuumEntityFeature.RETURN_HOME == 0: return None await self.async_publish( self._command_topic, self._payloads[CONF_PAYLOAD_RETURN_TO_BASE], self._qos, self._retain, self._encoding, ) self._attr_status = "Returning home..." self.async_write_ha_state()
61
schema_legacy.py
Python
homeassistant/components/mqtt/vacuum/schema_legacy.py
b364ef98a073214aad8deff4ff9b91e9ff041557
core
2
131,041
12
11
27
77
12
2
12
25
test_working_dir_basic
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def test_working_dir_basic(ray_start, tmp_dir, use_ray_client): with open("hello", "w") as f: f.write("world") driver =
driver = """ import ray from ray import serve job_config = ray.job_config.JobConfig(runtime_env={{"working_dir": "."}}) if {use_ray_client}: ray.util.connect("{client_addr}", job_config=job_config) else: ray.init(address="auto", job_config=job_config)@serve.deployment
43
test_runtime_env.py
Python
python/ray/serve/tests/test_runtime_env.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
1
259,452
110
12
27
604
42
1
168
315
test_glm_sample_weight_consistency
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator): rng = np.random.RandomState(0) n_samples, n_features = 10, 5 X = rng.rand(n_samples, n_features) y = rng.rand(n_samples) glm_params = dict(alpha=alpha, fit_intercept=fit_intercept) glm = GLMEstimator(**glm_params).fit(X, y) coef = glm.coef_.copy() # sample_weight=np.ones(..) should be equivalent to sample_weight=None sample_weight = np.ones(y.shape) glm.fit(X, y, sample_weight=sample_weight) assert_allclose(glm.coef_, coef, rtol=1e-12) # sample_weight are normalized to 1 so, scaling them has no effect sample_weight = 2 * np.ones(y.shape) glm.fit(X, y, sample_weight=sample_weight) assert_allclose(glm.coef_, coef, rtol=1e-12) # setting one element of sample_weight to 0 is equivalent to removing # the corresponding sample sample_weight = np.ones(y.shape) sample_weight[-1] = 0 glm.fit(X, y, sample_weight=sample_weight) coef1 = glm.coef_.copy() glm.fit(X[:-1], y[:-1]) assert_allclose(glm.coef_, coef1, rtol=1e-12) # check that multiplying sample_weight by 2 is equivalent # to repeating corresponding samples twice X2 = np.concatenate([X, X[: n_samples // 2]], axis=0) y2 = np.concatenate([y, y[: n_samples // 2]]) sample_weight_1 = np.ones(len(y)) sample_weight_1[: n_samples // 2] = 2 glm1 = GLMEstimator(**glm_params).fit(X, y, sample_weight=sample_weight_1) glm2 = GLMEstimator(**glm_params).fit(X2, y2, sample_weight=None) assert_allclose(glm1.coef_, glm2.coef_) @pytest.mark.parametrize("fit_intercept", [True, False]) @pytest.mark.parametrize( "estimator", [ PoissonRegressor(), GammaRegressor(), TweedieRegressor(power=3.0), TweedieRegressor(power=0, link="log"), TweedieRegressor(power=1.5), TweedieRegressor(power=4.5), ], )
@pytest.mark.parametrize("fit_intercept", [True, False]) @pytest.mark.parametrize( "estimator", [ PoissonRegressor(), GammaRegressor(), TweedieRegressor(power=3.0), TweedieRegressor(power=0, link="log"), TweedieRegressor(power=1.5), TweedieRegressor(power=4.5), ], )
314
test_glm.py
Python
sklearn/linear_model/_glm/tests/test_glm.py
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
scikit-learn
1
200,594
182
22
107
1,238
59
0
550
2,634
contract_metric
TensMul.contract_metric: correctly handle case where expr.canon_bp() == 0 Earlier, contract_metric would complain that S.Zero has no attribute contract_metric. https://github.com/sympy/sympy/issues/24354
https://github.com/sympy/sympy.git
def contract_metric(self, g): expr = self.expand() if self != expr: expr = expr.canon_bp() if expr == S.Zero: return expr else: return expr.contract_metric(g) pos_map = self._get_indices_to_args_pos() args = list(self.args) #antisym = g.index_types[0].metric_antisym if g.symmetry == TensorSymmetry.fully_symmetric(-2): antisym = 1 elif g.symmetry == TensorSymmetry.fully_symmetric(2): antisym = 0 elif g.symmetry == TensorSymmetry.no_symmetry(2): antisym = None else: raise NotImplementedError # list of positions of the metric ``g`` inside ``args`` gpos = [i for i, x in enumerate(self.args) if isinstance(x, Tensor) and x.component == g] if not gpos: return self # Sign is either 1 or -1, to correct the sign after metric contraction # (for spinor indices). sign = 1 dum = self.dum[:] free = self.free[:] elim = set() for gposx in gpos: if gposx in elim: continue free1 = [x for x in free if pos_map[x[1]] == gposx] dum1 = [x for x in dum if pos_map[x[0]] == gposx or pos_map[x[1]] == gposx] if not dum1: continue elim.add(gposx) # subs with the multiplication neutral element, that is, remove it: args[gposx] = 1 if len(dum1) == 2: if not antisym: dum10, dum11 = dum1 if pos_map[dum10[1]] == gposx: # the index with pos p0 contravariant p0 = dum10[0] else: # the index with pos p0 is covariant p0 = dum10[1] if pos_map[dum11[1]] == gposx: # the index with pos p1 is contravariant p1 = dum11[0] else: # the index with pos p1 is covariant p1 = dum11[1] dum.append((p0, p1)) else: dum10, dum11 = dum1 # change the sign to bring the indices of the metric to contravariant # form; change the sign if dum10 has the metric index in position 0 if pos_map[dum10[1]] == gposx: # the index with pos p0 is contravariant p0 = dum10[0] if dum10[1] == 1: sign = -sign else: # the index with pos p0 is covariant p0 = dum10[1] if dum10[0] == 0: sign = -sign if pos_map[dum11[1]] == gposx: # the index with pos p1 is contravariant p1 = dum11[0] sign = -sign else: # the index with pos p1 is covariant p1 = dum11[1] dum.append((p0, p1)) elif len(dum1) == 1: if not antisym: dp0, dp1 = dum1[0] if pos_map[dp0] == pos_map[dp1]: # g(i, -i) typ = g.index_types[0] sign = sign*typ.dim else: # g(i0, i1)*p(-i1) if pos_map[dp0] == gposx: p1 = dp1 else: p1 = dp0 ind, p = free1[0] free.append((ind, p1)) else: dp0, dp1 = dum1[0] if pos_map[dp0] == pos_map[dp1]: # g(i, -i) typ = g.index_types[0] sign = sign*typ.dim if dp0 < dp1: # g(i, -i) = -D with antisymmetric metric sign = -sign else: # g(i0, i1)*p(-i1) if pos_map[dp0] == gposx: p1 = dp1 if dp0 == 0: sign = -sign else: p1 = dp0 ind, p = free1[0] free.append((ind, p1)) dum = [x for x in dum if x not in dum1] free = [x for x in free if x not in free1] # shift positions: shift = 0 shifts = [0]*len(args) for i in range(len(args)): if i in elim: shift += 2 continue shifts[i] = shift free = [(ind, p - shifts[pos_map[p]]) for (ind, p) in free if pos_map[p] not in elim] dum = [(p0 - shifts[pos_map[p0]], p1 - shifts[pos_map[p1]]) for i, (p0, p1) in enumerate(dum) if pos_map[p0] not in elim and pos_map[p1] not in elim] res = sign*TensMul(*args).doit() if not isinstance(res, TensExpr): return res im = _IndexStructure.from_components_free_dum(res.components, free, dum) return res._set_new_index_structure(im)
788
tensor.py
Python
sympy/tensor/tensor.py
6f23dae79adad542f4b47f47c2e2f4d6f5bfef1c
sympy
46
32,875
22
14
17
118
12
0
31
80
get_memory_footprint
`bitsandbytes` - `Linear8bitLt` integration into `transformers` models (#17901) * first commit * correct replace function * add final changes - works like charm! - cannot implement tests yet - tested * clean up a bit * add bitsandbytes dependencies * working version - added import function - added bitsandbytes utils file * small fix * small fix - fix import issue * fix import issues * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * refactor a bit - move bitsandbytes utils to utils - change comments on functions * reformat docstring - reformat docstring on init_empty_weights_8bit * Update src/transformers/__init__.py Co-authored-by: Sylvain Gugger <[email protected]> * revert bad formatting * change to bitsandbytes * refactor a bit - remove init8bit since it is useless * more refactoring - fixed init empty weights issue - added threshold param * small hack to make it work * Update src/transformers/modeling_utils.py * Update src/transformers/modeling_utils.py * revmoe the small hack * modify utils file * make style + refactor a bit * create correctly device map * add correct dtype for device map creation * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * apply suggestions - remove with torch.grad - do not rely on Python bool magic! * add docstring - add docstring for new kwargs * add docstring - comment `replace_8bit_linear` function - fix weird formatting * - added more documentation - added new utility function for memory footprint tracking - colab demo to add * few modifs - typo doc - force cast into float16 when load_in_8bit is enabled * added colab link * add test architecture + docstring a bit * refactor a bit testing class * make style + refactor a bit * enhance checks - add more checks - start writing saving test * clean up a bit * male style * add more details on doc * add more tests - still needs to fix 2 tests * replace by "or" - could not fix it from GitHub GUI Co-authored-by: Sylvain Gugger <[email protected]> * refactor a bit testing code + add readme * make style * fix import issue * Update src/transformers/modeling_utils.py Co-authored-by: Michael Benayoun <[email protected]> * add few comments * add more doctring + make style * more docstring * raise error when loaded in 8bit * make style * add warning if loaded on CPU * add small sanity check * fix small comment * add bitsandbytes on dockerfile * Improve documentation - improve documentation from comments * add few comments * slow tests pass on the VM but not on the CI VM * Fix merge conflict * make style * another test should pass on a multi gpu setup * fix bad import in testing file * Fix slow tests - remove dummy batches - no more CUDA illegal memory errors * odify dockerfile * Update docs/source/en/main_classes/model.mdx * Update Dockerfile * Update model.mdx * Update Dockerfile * Apply suggestions from code review * few modifications - lm head can stay on disk/cpu - change model name so that test pass * change test value - change test value to the correct output - torch bmm changed to baddmm in bloom modeling when merging * modify installation guidelines * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * replace `n`by `name` * merge `load_in_8bit` and `low_cpu_mem_usage` * first try - keep the lm head in full precision * better check - check the attribute `base_model_prefix` instead of computing the number of parameters * added more tests * Update src/transformers/utils/bitsandbytes.py Co-authored-by: Sylvain Gugger <[email protected]> * Merge branch 'integration-8bit' of https://github.com/younesbelkada/transformers into integration-8bit * improve documentation - fix typos for installation - change title in the documentation Co-authored-by: Sylvain Gugger <[email protected]> Co-authored-by: Michael Benayoun <[email protected]>
https://github.com/huggingface/transformers.git
def get_memory_footprint(self, return_buffers=True): r mem = sum([param.nelement() * param.element_size() for param in self.parameters()]) if return_buffers: mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()]) mem = mem + mem_bufs return mem
73
modeling_utils.py
Python
src/transformers/modeling_utils.py
4a51075a96d2049f368b5f3dd6c0e9f08f599b62
transformers
4
286,761
10
9
10
59
9
1
13
24
sdk_dt_format
Fixing Forecast SDK with data from other menus (#3554) * minor check incase using sdk * fixes combine * datetime formating * Update forecast.md * fix testing
https://github.com/OpenBB-finance/OpenBBTerminal.git
def sdk_dt_format(x) -> str: x = pd.to_datetime(x) x = x.strftime("%Y-%m-%d") return x @log_start_end(log=logger)
@log_start_end(log=logger)
26
forecast_model.py
Python
openbb_terminal/forecast/forecast_model.py
7c57fb71c5bc1032dfd4ef4f3edfc74afa8c0a2e
OpenBBTerminal
1
304,033
6
6
3
21
4
0
6
12
_get_manager
Rework bluetooth to support scans from multiple sources (#76900)
https://github.com/home-assistant/core.git
def _get_manager() -> BluetoothManager: return models.MANAGER
11
__init__.py
Python
tests/components/bluetooth/__init__.py
3bcc274dfa90d7d3c01ace83137c46a0898c107f
core
1
320,776
78
18
41
494
38
0
114
608
completion_item_focus
mypy: Upgrade to PyQt5-stubs 5.15.6.0 For some unknown reason, those new stubs cause a *lot* of things now to be checked by mypy which formerly probably got skipped due to Any being implied somewhere. The stubs themselves mainly improved, with a couple of regressions too. In total, there were some 337 (!) new mypy errors. This commit fixes almost all of them, and the next commit improves a fix to get things down to 0 errors again. Overview of the changes: ==== qutebrowser/app.py - Drop type ignore due to improved stubs. ==== qutebrowser/browser/browsertab.py - Specify the type of _widget members more closely than just QWidget. This is debatable: I suppose the abstract stuff shouldn't need to know anything about the concrete backends at all. But it seems like we cut some corners when initially implementing things, and put some code in browsertab.py just because the APIs of both backends happened to be compatible. Perhaps something to reconsider once we drop QtWebKit and hopefully implement a dummy backend. - Add an additional assertion in AbstractAction.run_string. This is already covered by the isinstance(member, self.action_base) above it, but that's too dynamic for mypy to understand. - Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x and y components), not a single int. - Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x and y components), not a single int. - Fix the argument types of AbstractScroller.to_perc, as it's possible to pass fractional percentages too. - Specify the type for AbstractHistoryPrivate._history. See above (_widget) re this being debatable. - Fix the return type of AbstractTabPrivate.event_target(), which can be None (see #3888). - Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS return value), not None. - Fix the argument type for AbstractTabPrivate.toggle_inspector: position can be None to use the last used position. - Declare the type of sub-objects of AbstractTab. - Fix the return value of AbstractTab.icon(), which is the QIcon, not None. ==== qutebrowser/browser/commands.py - Make sure the active window is a MainWindow (with a .win_id attribute). ==== qutebrowser/browser/downloadview.py - Add _model() which makes sure that self.model() is a DownloadModel, not None or any other model. This is needed because other methods access a variety of custom attributes on it, e.g. last_index(). ==== qutebrowser/browser/greasemonkey.py - Add an ignore for AbstractDownload.requested_url which we patch onto the downloads. Probably would be nicer to add it as a proper attribute which always gets set by the DownloadManager. ==== qutebrowser/browser/hints.py - Remove type ignores for QUrl.toString(). - Add a new type ignore for combining different URL flags (which works, but is not exactly type safe... still probably a regression in the stubs). - Make sure the things we get back from self._get_keyparser are what we actually expect. Probably should introduce a TypedDict (and/or overloads for _get_keyparser with typing.Literal) to teach mypy about the exact return value. See #7098. This is needed because we access Hint/NormalKeyParser-specific attributes such as .set_inhibited_timout() or .update_bindings(). ==== qutebrowser/browser/inspector.py - Similar changes than in browsertab.py to make some types where we share API (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next commit. ==== qutebrowser/browser/network/pac.py - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/qtnetworkdownloads.py - Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an AbstractDownload), so that we can call ._uses_nam() on it. ==== qutebrowser/browser/qutescheme.py - Remove now unneeded type ignore for QUrl flags. ==== qutebrowser/browser/urlmarks.py - Specify the type of UrlMarkManager._lineparser, as those only get initialized in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist. ==== qutebrowser/browser/webelem.py - New casts to turn single KeyboardModifier (enum) entries into KeyboardModifiers (flags). Might not be needed anymore with Qt 6. - With that, casting the final value is now unneeded. ==== qutebrowser/browser/webengine/notification.py - Remove now unneeded type ignore for signal. - Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished() is a QProcess, not just any QObject. ==== qutebrowser/browser/webengine/webenginedownloads.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webengine/webengineelem.py - Specify the type of WebEngineElement._tab. - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webengineinspector.py - See changes to inspector.py and next commit. - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/webengine/webenginequtescheme.py - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webenginesettings.py - Ignore access of .setter attribute which we patch onto QWebEngineProfile. Would be nice to have a subclass or wrapper-class instead. ==== qutebrowser/browser/webengine/webenginetab.py - Specified the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Remove some now-unneeded type ignores for creating FindFlags. - Specify more concrete types for WebEngineTab members where we actually need to access WebEngine-specific attributes. - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webengine/webview.py - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webkit/network/networkreply.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webkit/webkitinspector.py - See changes to inspector.py and next commit. ==== qutebrowser/browser/webkit/webkittab.py - Specify the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Add a type ignore for WebKitAction because our workaround needs to treat them as ints (which is allowed by PyQt, even if not type-safe). - Add new ignores for findText calls: The text is a QString and can be None; the flags are valid despite mypy thinking they aren't (stubs regression?). - Specify the type for WebKitHistoryPrivate._history, because we access WebKit-specific attributes. See above (_widget) re this being debatable. - Make mypy aware that .currentFrame() and .frameAt() can return None (stubs regression?). - Make sure the .page() and .page().networkAccessManager() are our subclasses rather than the more generic QtWebKit objects, as we use custom attributes. - Add new type ignores for signals (stubs regression!) ==== qutebrowser/browser/webkit/webpage.py - Make sure the .networkAccessManager() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. - Replace a cast by a type ignore. The cast didn't work anymore. ==== qutebrowser/browser/webkit/webview.py - Make sure the .page() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. ==== qutebrowser/commands/userscripts.py - Remove now unneeded type ignore for signal. ==== qutebrowser/completion/completer.py - Add a new _completion() getter (which ensures it actually gets the completion view) rather than accessing the .parent() directly (which could be any QObject). ==== qutebrowser/completion/completiondelegate.py - Make sure self.parent() is a CompletionView (no helper method as there is only one instance). - Remove a now-unneeded type ignore for adding QSizes. ==== qutebrowser/completion/completionwidget.py - Add a ._model() getter which ensures that we get a CompletionModel (with custom attributes) rather than Qt's .model() which can be any QAbstractItemModel (or None). - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/completion/models/completionmodel.py - Remove now unneeded type ignores for signals. - Ignore a complaint about .set_pattern() not being defined. Completion categories don't share any common parent class, so it would be good to introduce a typing.Protocol for this. See #7098. ==== qutebrowser/components/misccommands.py - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/components/readlinecommands.py - Make sure QApplication.instance() is a QApplication (and not just a QCoreApplication). This includes the former "not None" check. ==== qutebrowser/components/scrollcommands.py - Add basic annotation for "funcs" dict. Could have a callable protocol to specify it needs a count kwarg, see #7098. ==== qutebrowser/config/stylesheet.py - Correctly specify that stylesheet apply to QWidgets, not any QObject. - Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy about this with overloads and protocols (stylesheet for set_register being None => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not worth the troble. See #7098. ==== qutebrowser/keyinput/keyutils.py - Remove some now-unneeded type ignores and add a cast for using a single enum value as flags. Might need to look at this again with Qt 6 support. ==== qutebrowser/keyinput/modeman.py - Add a FIXME for using a TypedDict, see comments for hints.py above. ==== qutebrowser/mainwindow/mainwindow.py - Remove now-unneeded type ignores for calling with OR-ed flags. - Improve where we cast from WindowType to WindowFlags, no int needed - Use new .tab_bar() getter, see below. ==== qutebrowser/mainwindow/prompt.py - Remove now-unneeded type ignores for calling with OR-ed flags. ==== qutebrowser/mainwindow/statusbar/bar.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/statusbar/command.py - Fix type for setText() override (from QLineEdit): text can be None (QString in C++). ==== qutebrowser/mainwindow/statusbar/url.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/tabbedbrowser.py - Specify that TabDeque manages browser tabs, not any QWidgets. It accesses AbstractTab-specific attributes. - Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access .maybe_hide. - Fix the annotations for stored marks: Scroll positions are a QPoint, not int. - Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and .widget(), which ensures that the return values are valid AbstractTabs (or None for _tab_by_idx). This is needed because we access AbstractTab-specific attributes. - For some places, where the tab can be None, continue using .currentTab() but add asserts. - Remove some now-unneeded [unreachable] ignores, as mypy knows about the None possibility now. ==== qutebrowser/mainwindow/tabwidget.py - Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and .widget() are of type TabBar and AbstractTab, respectively. - Add additional assertions where we expect ._tab_by_idx() to never be None. - Remove dead code in get_tab_fields for handling a None y scroll position. I was unable to find any place in the code where this could be set to None. - Remove some now-unneeded type ignores and casts, as mypy now knows that _type_by_idx() could be None. - Work around a strange instance where mypy complains about not being able to find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility, despite it clearly being shown as a bool *inside* that class without any annotation. - Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in fact a TabWidget. ==== qutebrowser/misc/crashsignal.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/editor.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/ipc.py - Remove now unneeded type ignores for signals. - Add new type ignores for .error() which is both a signal and a getter (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was renamed to errorOccurred in 5.15. ==== qutebrowser/misc/objects.py - Make sure mypy knows that objects.app is our custom Application (with custom attributes) rather than any QApplication. ==== qutebrowser/utils/objreg.py - Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol, but ideally, the whole objreg stuff should die one day anyways. ==== tests/unit/completion/test_completer.py - Make CompletionWidgetStub inherit from CompletionView so that it passes the new isinstance() asserts in completer.py (see above).
https://github.com/qutebrowser/qutebrowser.git
def completion_item_focus(self, which, history=False): if history: if (self._cmd.text() == ':' or self._cmd.history.is_browsing() or not self._active): if which == 'next': self._cmd.command_history_next() return elif which == 'prev': self._cmd.command_history_prev() return else: raise cmdutils.CommandError("Can't combine --history with " "{}!".format(which)) if not self._active: return selmodel = self.selectionModel() indices = { 'next': lambda: self._next_idx(upwards=False), 'prev': lambda: self._next_idx(upwards=True), 'next-category': lambda: self._next_category_idx(upwards=False), 'prev-category': lambda: self._next_category_idx(upwards=True), 'next-page': lambda: self._next_page(upwards=False), 'prev-page': lambda: self._next_page(upwards=True), } idx = indices[which]() if not idx.isValid(): return selmodel.setCurrentIndex( idx, QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows) # if the last item is focused, try to fetch more next_idx = self.indexBelow(idx) if not self.visualRect(next_idx).isValid(): self.expandAll() count = self._model().count() if count == 0: self.hide() elif count == 1 and config.val.completion.quick: self.hide() elif config.val.completion.show == 'auto': self.show()
292
completionwidget.py
Python
qutebrowser/completion/completionwidget.py
a20bb67a878b2e68abf8268c1b0a27f018d01352
qutebrowser
14
291,932
102
18
51
564
53
0
170
866
async_send_message
Replace discord.py with nextcord (#66540) * Replace discord.py with nextcord * Typing tweak * Another pip check decrease :)
https://github.com/home-assistant/core.git
async def async_send_message(self, message, **kwargs): nextcord.VoiceClient.warn_nacl = False discord_bot = nextcord.Client() images = None embedding = None if ATTR_TARGET not in kwargs: _LOGGER.error("No target specified") return None data = kwargs.get(ATTR_DATA) or {} embeds: list[nextcord.Embed] = [] if ATTR_EMBED in data: embedding = data[ATTR_EMBED] fields = embedding.get(ATTR_EMBED_FIELDS) or [] if embedding: embed = nextcord.Embed(**embedding) for field in fields: embed.add_field(**field) if ATTR_EMBED_FOOTER in embedding: embed.set_footer(**embedding[ATTR_EMBED_FOOTER]) if ATTR_EMBED_AUTHOR in embedding: embed.set_author(**embedding[ATTR_EMBED_AUTHOR]) if ATTR_EMBED_THUMBNAIL in embedding: embed.set_thumbnail(**embedding[ATTR_EMBED_THUMBNAIL]) embeds.append(embed) if ATTR_IMAGES in data: images = [] for image in data.get(ATTR_IMAGES, []): image_exists = await self.hass.async_add_executor_job( self.file_exists, image ) if image_exists: images.append(image) else: _LOGGER.warning("Image not found: %s", image) await discord_bot.login(self.token) try: for channelid in kwargs[ATTR_TARGET]: channelid = int(channelid) try: channel = await discord_bot.fetch_channel(channelid) except nextcord.NotFound: try: channel = await discord_bot.fetch_user(channelid) except nextcord.NotFound: _LOGGER.warning("Channel not found for ID: %s", channelid) continue # Must create new instances of File for each channel. files = [nextcord.File(image) for image in images] if images else [] await channel.send(message, files=files, embeds=embeds) except (nextcord.HTTPException, nextcord.NotFound) as error: _LOGGER.warning("Communication error: %s", error) await discord_bot.close()
347
notify.py
Python
homeassistant/components/discord/notify.py
cb03db8df4bf8b50945b36a4b0debcaaed1190a8
core
19
260,724
52
13
26
251
32
0
71
310
fit
MAINT Parameter Validation for Lars, LarsCV, LassoLars, LassoLarsCV and LassoLarsIC (#24033) Co-authored-by: jeremie du boisberranger <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def fit(self, X, y, Xy=None): self._validate_params() X, y = self._validate_data(X, y, y_numeric=True, multi_output=True) _normalize = _deprecate_normalize( self.normalize, default=True, estimator_name=self.__class__.__name__ ) alpha = getattr(self, "alpha", 0.0) if hasattr(self, "n_nonzero_coefs"): alpha = 0.0 # n_nonzero_coefs parametrization takes priority max_iter = self.n_nonzero_coefs else: max_iter = self.max_iter if self.jitter is not None: rng = check_random_state(self.random_state) noise = rng.uniform(high=self.jitter, size=len(y)) y = y + noise self._fit( X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path, normalize=_normalize, Xy=Xy, ) return self
169
_least_angle.py
Python
sklearn/linear_model/_least_angle.py
6c0e0b2e4723d11e29057635c7061a36bc1a8512
scikit-learn
3
290,687
8
6
137
23
5
0
8
14
test_statistic_during_period_hole
Fix statistic_during_period for data with holes (#81847)
https://github.com/home-assistant/core.git
async def test_statistic_during_period_hole(recorder_mock, hass, hass_ws_client): id = 1
900
test_websocket_api.py
Python
tests/components/recorder/test_websocket_api.py
9b8f94363c0b4ecd1434ac1ac3bb82febd3889d0
core
17
111,333
61
17
19
260
27
0
100
356
set_annotations
Add SpanRuler component (#9880) * Add SpanRuler component Add a `SpanRuler` component similar to `EntityRuler` that saves a list of matched spans to `Doc.spans[spans_key]`. The matches from the token and phrase matchers are deduplicated and sorted before assignment but are not otherwise filtered. * Update spacy/pipeline/span_ruler.py Co-authored-by: Sofie Van Landeghem <[email protected]> * Fix cast * Add self.key property * Use number of patterns as length * Remove patterns kwarg from init * Update spacy/tests/pipeline/test_span_ruler.py Co-authored-by: Sofie Van Landeghem <[email protected]> * Add options for spans filter and setting to ents * Add `spans_filter` option as a registered function' * Make `spans_key` optional and if `None`, set to `doc.ents` instead of `doc.spans[spans_key]`. * Update and generalize tests * Add test for setting doc.ents, fix key property type * Fix typing * Allow independent doc.spans and doc.ents * If `spans_key` is set, set `doc.spans` with `spans_filter`. * If `annotate_ents` is set, set `doc.ents` with `ents_fitler`. * Use `util.filter_spans` by default as `ents_filter`. * Use a custom warning if the filter does not work for `doc.ents`. * Enable use of SpanC.id in Span * Support id in SpanRuler as Span.id * Update types * `id` can only be provided as string (already by `PatternType` definition) * Update all uses of Span.id/ent_id in Doc * Rename Span id kwarg to span_id * Update types and docs * Add ents filter to mimic EntityRuler overwrite_ents * Refactor `ents_filter` to take `entities, spans` args for more filtering options * Give registered filters more descriptive names * Allow registered `filter_spans` filter (`spacy.first_longest_spans_filter.v1`) to take any number of `Iterable[Span]` objects as args so it can be used for spans filter or ents filter * Implement future entity ruler as span ruler Implement a compatible `entity_ruler` as `future_entity_ruler` using `SpanRuler` as the underlying component: * Add `sort_key` and `sort_reverse` to allow the sorting behavior to be customized. (Necessary for the same sorting/filtering as in `EntityRuler`.) * Implement `overwrite_overlapping_ents_filter` and `preserve_existing_ents_filter` to support `EntityRuler.overwrite_ents` settings. * Add `remove_by_id` to support `EntityRuler.remove` functionality. * Refactor `entity_ruler` tests to parametrize all tests to test both `entity_ruler` and `future_entity_ruler` * Implement `SpanRuler.token_patterns` and `SpanRuler.phrase_patterns` properties. Additional changes: * Move all config settings to top-level attributes to avoid duplicating settings in the config vs. `span_ruler/cfg`. (Also avoids a lot of casting.) * Format * Fix filter make method name * Refactor to use same error for removing by label or ID * Also provide existing spans to spans filter * Support ids property * Remove token_patterns and phrase_patterns * Update docstrings * Add span ruler docs * Fix types * Apply suggestions from code review Co-authored-by: Sofie Van Landeghem <[email protected]> * Move sorting into filters * Check for all tokens in seen tokens in entity ruler filters * Remove registered sort key * Set Token.ent_id in a backwards-compatible way in Doc.set_ents * Remove sort options from API docs * Update docstrings * Rename entity ruler filters * Fix and parameterize scoring * Add id to Span API docs * Fix typo in API docs * Include explicit labeled=True for scorer Co-authored-by: Sofie Van Landeghem <[email protected]>
https://github.com/explosion/spaCy.git
def set_annotations(self, doc, matches): entities = list(doc.ents) new_entities = [] seen_tokens = set() for match_id, start, end in matches: if any(t.ent_type for t in doc[start:end]) and not self.overwrite: continue # check for end - 1 here because boundaries are inclusive if start not in seen_tokens and end - 1 not in seen_tokens: if match_id in self._ent_ids: label, ent_id = self._ent_ids[match_id] span = Span(doc, start, end, label=label, span_id=ent_id) else: span = Span(doc, start, end, label=match_id) new_entities.append(span) entities = [ e for e in entities if not (e.start < end and e.end > start) ] seen_tokens.update(range(start, end)) doc.ents = entities + new_entities
171
entityruler.py
Python
spacy/pipeline/entityruler.py
a322d6d5f2f85c2da6cded4fcd6143d41b5a9e96
spaCy
11
166,951
8
9
2
61
9
1
8
13
data_missing
DOC: Added docstrings to fixtures defined in array module (#47211)
https://github.com/pandas-dev/pandas.git
def data_missing(dtype): return pd.array([np.nan, 1], dtype=dtype) @pytest.fixture(params=["data", "data_missing"])
@pytest.fixture(params=["data", "data_missing"])
23
conftest.py
Python
pandas/tests/arrays/integer/conftest.py
89be1f053b695c4ce1c0569f737caf3f03c12128
pandas
1
124,040
51
12
24
174
18
0
64
268
save
[RLlib] Save serialized PolicySpec. Extract `num_gpus` related logics into a util function. (#25954)
https://github.com/ray-project/ray.git
def save(self) -> bytes: filters = self.get_filters(flush_after=True) state = {} policy_specs = {} connector_enabled = self.policy_config.get("enable_connectors", False) for pid in self.policy_map: state[pid] = self.policy_map[pid].get_state() policy_spec = self.policy_map.policy_specs[pid] # If connectors are enabled, try serializing the policy spec # instead of picking the spec object. policy_specs[pid] = ( policy_spec.serialize() if connector_enabled else policy_spec ) return pickle.dumps( { "filters": filters, "state": state, "policy_specs": policy_specs, } )
106
rollout_worker.py
Python
rllib/evaluation/rollout_worker.py
d83bbda2816b1781eb61342b4539578149eeb686
ray
3
276,780
40
13
15
176
27
0
48
181
test_get_file_with_failed_integrity_check
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def test_get_file_with_failed_integrity_check(self): orig_dir = self.get_temp_dir() file_path = os.path.join(orig_dir, "test.txt") with open(file_path, "w") as text_file: text_file.write("Float like a butterfly, sting like a bee.") hashval = "0" * 64 origin = urllib.parse.urljoin( "file://", urllib.request.pathname2url(os.path.abspath(file_path)) ) with self.assertRaisesRegex( ValueError, "Incomplete or corrupted file.*" ): _ = keras.utils.data_utils.get_file( "test.txt", origin, file_hash=hashval )
100
data_utils_test.py
Python
keras/utils/data_utils_test.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
1
266,327
65
14
14
215
17
0
88
231
_get_user_property
user: Update logic to check if user exists in macOS (#76592) 'dscl -list' returns 0 even if the user does not exists. This leads to errorenous condition in user module. Using 'dscl -read UniqueID' can return if user exists or not. Signed-off-by: Abhijeet Kasurde <[email protected]>
https://github.com/ansible/ansible.git
def _get_user_property(self, property): cmd = self._get_dscl() cmd += ['-read', '/Users/%s' % self.name, property] (rc, out, err) = self.execute_command(cmd, obey_checkmode=False) if rc != 0: return None # from dscl(1) # if property contains embedded spaces, the list will instead be # displayed one entry per line, starting on the line after the key. lines = out.splitlines() # sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines)) if len(lines) == 1: return lines[0].split(': ')[1] if len(lines) > 2: return '\n'.join([lines[1].strip()] + lines[2:]) if len(lines) == 2: return lines[1].strip() return None
130
user.py
Python
lib/ansible/modules/user.py
66e392d4e219bf93d690c1fb3d3c854069efa27f
ansible
5
256,951
12
9
6
50
8
0
12
21
is_telemetry_enabled
Add basic telemetry features (#2314) * add basic telemetry features * change pipeline_config to _component_config * Update Documentation & Code Style * add super().__init__() calls to error classes * make posthog mock work with python 3.7 * Update Documentation & Code Style * update link to docs web page * log exceptions, send event for raised HaystackErrors, refactor Path(CONFIG_PATH) * add comment on send_event in BaseComponent.init() and fix mypy * mock NonPrivateParameters and fix pylint undefined-variable * Update Documentation & Code Style * check model path contains multiple / * add test for writing to file * add test for en-/disable telemetry * Update Documentation & Code Style * merge file deletion methods and ignore pylint global statement * Update Documentation & Code Style * set env variable in demo to activate telemetry * fix mock of HAYSTACK_TELEMETRY_ENABLED * fix mypy and linter * add CI as env variable to execution contexts * remove threading, add test for custom error event * Update Documentation & Code Style * simplify config/log file deletion * add test for final event being sent * force writing config file in test * make test compatible with python 3.7 * switch to posthog production server * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
https://github.com/deepset-ai/haystack.git
def is_telemetry_enabled() -> bool: telemetry_environ = os.environ.get(HAYSTACK_TELEMETRY_ENABLED, "True") return telemetry_environ.lower() != "false"
27
telemetry.py
Python
haystack/telemetry.py
ac5617e757e9ace6f30b7291686d9dbbc339f433
haystack
1
150,410
14
10
7
100
8
0
17
66
start_leader_mode
initial concept for replicate, basic leader and follower logic
https://github.com/freqtrade/freqtrade.git
def start_leader_mode(self): logger.info("Running rpc.replicate in Leader mode") logger.info("-" * 15) logger.info(f"API_KEY: {self.secret_api_key}") logger.info("-" * 15) self.register_leader_endpoint() self.submit_coroutine(self.leader_loop())
50
__init__.py
Python
freqtrade/rpc/replicate/__init__.py
9f6bba40af1a407f190a89f5c0c8b4e3f528ba46
freqtrade
1
297,518
58
15
23
216
39
0
71
288
async_get_measures
Refactor withings constant (#84095) Split withings constant
https://github.com/home-assistant/core.git
async def async_get_measures(self) -> dict[Measurement, Any]: _LOGGER.debug("Updating withings measures") now = dt.utcnow() startdate = now - datetime.timedelta(days=7) response = await self._hass.async_add_executor_job( self._api.measure_get_meas, None, None, startdate, now, None, startdate ) # Sort from oldest to newest. groups = sorted( query_measure_groups( response, MeasureTypes.ANY, MeasureGroupAttribs.UNAMBIGUOUS ), key=lambda group: group.created.datetime, reverse=False, ) return { WITHINGS_MEASURE_TYPE_MAP[measure.type]: round( float(measure.value * pow(10, measure.unit)), 2 ) for group in groups for measure in group.measures if measure.type in WITHINGS_MEASURE_TYPE_MAP }
144
common.py
Python
homeassistant/components/withings/common.py
c51c8f7e8f4b9ede57a0fbbec0d74023d87d553e
core
4
36,057
14
11
5
67
9
0
19
42
softmax_backward_data
DeBERTa/DeBERTa-v2/SEW Support for torch 1.11 (#16043) * Support for torch 1.11 * Address Sylvain's comment
https://github.com/huggingface/transformers.git
def softmax_backward_data(parent, grad_output, output, dim, self): if is_torch_less_than_1_11: return _softmax_backward_data(grad_output, output, parent.dim, self) else: return _softmax_backward_data(grad_output, output, parent.dim, self.dtype)
47
pytorch_utils.py
Python
src/transformers/pytorch_utils.py
e66743e6c9601a4b12ffc2335a9f60a41d1ca60c
transformers
2
133,852
48
20
30
293
27
0
74
452
build_q_model
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def build_q_model(self, obs_space, action_space, num_outputs, q_model_config, name): self.concat_obs_and_actions = False if self.discrete: input_space = obs_space else: orig_space = getattr(obs_space, "original_space", obs_space) if isinstance(orig_space, Box) and len(orig_space.shape) == 1: input_space = Box( float("-inf"), float("inf"), shape=(orig_space.shape[0] + action_space.shape[0],), ) self.concat_obs_and_actions = True else: if isinstance(orig_space, gym.spaces.Tuple): spaces = list(orig_space.spaces) elif isinstance(orig_space, gym.spaces.Dict): spaces = list(orig_space.spaces.values()) else: spaces = [obs_space] input_space = gym.spaces.Tuple(spaces + [action_space]) model = ModelCatalog.get_model_v2( input_space, action_space, num_outputs, q_model_config, framework="torch", name=name, ) return model
189
sac_torch_model.py
Python
rllib/agents/sac/sac_torch_model.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
6
20,609
96
26
53
627
48
0
164
615
_makeTags
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): if isinstance(tagStr, str_type): resname = tagStr tagStr = Keyword(tagStr, caseless=not xml) else: resname = tagStr.name tagAttrName = Word(alphas, alphanums + "_-:") if xml: tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) openTag = ( suppress_LT + tagStr("tag") + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + Opt("/", default=[False])("empty").set_parse_action( lambda s, l, t: t[0] == "/" ) + suppress_GT ) else: tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( printables, exclude_chars=">" ) openTag = ( suppress_LT + tagStr("tag") + Dict( ZeroOrMore( Group( tagAttrName.set_parse_action(lambda t: t[0].lower()) + Opt(Suppress("=") + tagAttrValue) ) ) ) + Opt("/", default=[False])("empty").set_parse_action( lambda s, l, t: t[0] == "/" ) + suppress_GT ) closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False) openTag.set_name("<%s>" % resname) # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels openTag.add_parse_action( lambda t: t.__setitem__( "start" + "".join(resname.replace(":", " ").title().split()), t.copy() ) ) closeTag = closeTag( "end" + "".join(resname.replace(":", " ").title().split()) ).set_name("</%s>" % resname) openTag.tag = resname closeTag.tag = resname openTag.tag_body = SkipTo(closeTag()) return openTag, closeTag
365
helpers.py
Python
pipenv/patched/notpip/_vendor/pyparsing/helpers.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
3
309,622
35
13
13
168
18
0
42
169
async_send_message
Add LG webOS Smart TV config flow support (#64117) * Add webOS Smart TV config flow support (#53256) * Add Webostv config flow * Fix tests mocks and apply review comments * Apply review comments * Change config flow to use ssdp UDN as unique_id * Fix device info * More review comments * Fix _async_check_configured_entry * Remove turn on script * Add webOS Smart TV device triggers (#53752) * Add webOS Smart TV config flow support (#53256) * Add Webostv config flow * Fix tests mocks and apply review comments * Apply review comments * Change config flow to use ssdp UDN as unique_id * Fix device info * More review comments * Fix _async_check_configured_entry * Remove turn on script * Add webOS Smart TV device triggers (#53752) * Fix webOS Smart TV mypy and pylint errors (#62620) * Change webOS Smart TV PyPi aiopylgtv package to bscpylgtv (#62633) * Change webOS Smart TV PyPi aiopylgtv package to bscpylgtv * Update bscpylgtv to 0.2.8 (revised websockets requirment) * Change webOS Smart TV PyPi package to aiowebostv (#63759) * Change webOS Smart TV PyPi package to aiowebostv * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> Co-authored-by: Martin Hjelmare <[email protected]> * webOS TV check UUID for user added device (#63817) * webOS TV check uuid when for user added device * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> * Add test for form abort and host update Co-authored-by: Martin Hjelmare <[email protected]> * Rework webOS Smart TV device trigger to custom trigger platform (#63950) * Rework webOS Smart TV device trigger to custom trigger platform * Review comments and add tests * Fix webOS TV import from YAML (#63996) * Fix webOS TV import from YAML * Fix requirements * Migrate YAML entities unique id to UUID * Add backoff to migration task delay * Assert result data and unique_id * Apply suggestions from code review Co-authored-by: Martin Hjelmare <[email protected]> * Add codeowner Co-authored-by: Martin Hjelmare <[email protected]>
https://github.com/home-assistant/core.git
async def async_send_message(self, message="", **kwargs): try: if not self._client.is_connected(): await self._client.connect() data = kwargs.get(ATTR_DATA) icon_path = data.get(CONF_ICON, "") if data else None await self._client.send_message(message, icon_path=icon_path) except WebOsTvPairError: _LOGGER.error("Pairing with TV failed") except FileNotFoundError: _LOGGER.error("Icon %s not found", icon_path) except WEBOSTV_EXCEPTIONS: _LOGGER.error("TV unreachable")
97
notify.py
Python
homeassistant/components/webostv/notify.py
dee843bf6e5ca84a94f336a239f6a6138c4c28e6
core
6
133,550
3
6
20
15
3
0
3
6
run_task_workload
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
https://github.com/ray-project/ray.git
def run_task_workload(total_num_cpus, smoke):
131
test_chaos_basic.py
Python
release/nightly_tests/chaos_test/test_chaos_basic.py
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
3
248,367
81
13
27
256
25
0
106
442
test_do_not_prune_gap_if_state_different
Pull out less state when handling gaps mk2 (#12852)
https://github.com/matrix-org/synapse.git
def test_do_not_prune_gap_if_state_different(self): # Fudge a second event which points to an event we don't have. remote_event_2 = event_from_pdu_json( { "type": EventTypes.Message, "state_key": "@user:other", "content": {}, "room_id": self.room_id, "sender": "@user:other", "depth": 10, "prev_events": ["$some_unknown_message"], "auth_events": [], "origin_server_ts": self.clock.time_msec(), }, RoomVersions.V6, ) # Now we persist it with state with a dropped history visibility # setting. The state resolution across the old and new event will then # include it, and so the resolved state won't match the new state. state_before_gap = dict( self.get_success(self.state.get_current_state_ids(self.room_id)) ) state_before_gap.pop(("m.room.history_visibility", "")) context = self.get_success( self.state.compute_event_context( remote_event_2, state_ids_before_event=state_before_gap, ) ) self.get_success(self.persistence.persist_event(remote_event_2, context)) # Check that we haven't dropped the old extremity. self.assert_extremities([self.remote_event_1.event_id, remote_event_2.event_id])
149
test_events.py
Python
tests/storage/test_events.py
b83bc5fab57b37f75a79d02213d6032c586fd36e
synapse
1
190,708
17
11
4
63
7
0
18
43
test_allow_ordered_sequences_only
Disallow unordered sequences in pytest.approx (#9709) Fix #9692
https://github.com/pytest-dev/pytest.git
def test_allow_ordered_sequences_only(self) -> None: with pytest.raises(TypeError, match="only supports ordered sequences"): assert {1, 2, 3} == approx({1, 2, 3})
39
approx.py
Python
testing/python/approx.py
5f3d94c47eb0f4487ef52ac520b3c48170220fc6
pytest
1
128,593
62
11
29
337
33
0
92
206
test_remove_node_before_result
[tune] Store sync config/checkpoint config in experiment, trial (#29019) This is some clean-up required for future changes to the syncing/checkpointing behavior. At the moment we pass single attributes of these configs to the Experiment class, and then subsequently to the Trial class, from which it is passed on to the trainable. If we extend the configurability in the future (e.g. provide fallback mechanisms in the checkpoint config, or make retry wait times configurable in the sync config), we would have to add more and more attributes to these intermediate classes. Instead, we should just pass and store the full config. As a next follow-up, we can pass these configs to the Trainable. Signed-off-by: Kai Fricke <[email protected]>
https://github.com/ray-project/ray.git
def test_remove_node_before_result(start_connected_emptyhead_cluster): cluster = start_connected_emptyhead_cluster node = cluster.add_node(num_cpus=1) cluster.wait_for_nodes() runner = TrialRunner(BasicVariantGenerator()) kwargs = { "stopping_criterion": {"training_iteration": 3}, "checkpoint_config": CheckpointConfig(checkpoint_frequency=2), "max_failures": 2, } trial = Trial("__fake", **kwargs) runner.add_trial(trial) runner.step() # Start trial, call _train once running_trials = _get_running_trials(runner) assert len(running_trials) == 1 assert _check_trial_running(running_trials[0]) assert not trial.has_reported_at_least_once assert trial.status == Trial.RUNNING cluster.remove_node(node) cluster.add_node(num_cpus=1) cluster.wait_for_nodes() assert ray.cluster_resources()["CPU"] == 1 # Process result: fetch data, invoke _train again runner.step() assert trial.last_result.get("training_iteration") == 1 # Process result: discover failure, recover, _train (from scratch) while trial.status != Trial.TERMINATED: runner.step() assert trial.last_result.get("training_iteration") > 1 with pytest.raises(TuneError): runner.step()
195
test_cluster.py
Python
python/ray/tune/tests/test_cluster.py
e142be077f0c727ab11ba51ecaba9a98b7bfe474
ray
2
181,624
11
9
10
58
10
0
13
26
test_generate_import_code
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
https://github.com/EpistasisLab/tpot.git
def test_generate_import_code(): pipeline = creator.Individual.from_string('GaussianNB(RobustScaler(input_matrix))', tpot_obj._pset) expected_code = assert expected_code == generate_import_code(pipeline, tpot_obj.operators)
33
export_tests.py
Python
tests/export_tests.py
388616b6247ca4ea8de4e2f340d6206aee523541
tpot
1
83,384
45
13
30
275
32
0
59
270
test_subscriptions_add_notification_default_none
stream_settings: Show stream privacy & description in stream events. Provide stream privacy and description in stream notification events when stream is created. In function "send_messages_for_new_subscribers" for when stream is created, put policy name and description of the stream. Fixes #21004
https://github.com/zulip/zulip.git
def test_subscriptions_add_notification_default_none(self) -> None: user_profile = self.example_user("iago") invitee_user_id = user_profile.id invitee_realm = user_profile.realm user_profile.enable_stream_desktop_notifications = True user_profile.enable_stream_push_notifications = True user_profile.enable_stream_audible_notifications = True user_profile.enable_stream_email_notifications = True user_profile.save() current_stream = self.get_streams(user_profile)[0] invite_streams = self.make_random_stream_names([current_stream]) self.assert_adding_subscriptions_for_principal( invitee_user_id, invitee_realm, invite_streams, policy_name="Public" ) subscription = self.get_subscription(user_profile, invite_streams[0]) with mock.patch("zerver.models.Recipient.__str__", return_value="recip"): self.assertEqual( str(subscription), "<Subscription: " f"<UserProfile: {user_profile.email} {user_profile.realm}> -> recip>", ) self.assertIsNone(subscription.desktop_notifications) self.assertIsNone(subscription.push_notifications) self.assertIsNone(subscription.audible_notifications) self.assertIsNone(subscription.email_notifications)
157
test_subs.py
Python
zerver/tests/test_subs.py
4b9770e270823b7ed2bbbeda0e4450f0ba6a288b
zulip
1
154,378
63
14
20
176
21
1
82
282
eval_to_file
TEST-#4879: Use pandas `ensure_clean()` in place of `io_tests_data` (#4881) Signed-off-by: Karthik Velayutham <[email protected]>
https://github.com/modin-project/modin.git
def eval_to_file(modin_obj, pandas_obj, fn, extension, **fn_kwargs): with ensure_clean_dir() as dirname: unique_filename_modin = get_unique_filename( extension=extension, data_dir=dirname ) unique_filename_pandas = get_unique_filename( extension=extension, data_dir=dirname ) # parameter `max_retries=0` is set for `to_csv` function on Ray engine, # in order to increase the stability of tests, we repeat the call of # the entire function manually last_exception = None for _ in range(3): try: getattr(modin_obj, fn)(unique_filename_modin, **fn_kwargs) except EXCEPTIONS as exc: last_exception = exc continue break else: raise last_exception getattr(pandas_obj, fn)(unique_filename_pandas, **fn_kwargs) assert assert_files_eq(unique_filename_modin, unique_filename_pandas) @pytest.fixture
@pytest.fixture
104
test_io.py
Python
modin/pandas/test/test_io.py
5086a9ea37bc37e6e58da0ceaf5864b16cc8e0ed
modin
3
89,030
6
8
38
25
5
0
6
12
resolve_condition
perf(issue-search): optimize querying on perf issues by using hasAny on transaction.group_id (#41685) Special optimization for searching on transactions for performance issues by doing a `hasAny` check instead of arrayJoining `group_ids` column and applying a filter after. before query: ``` SELECT (arrayJoin((group_ids AS _snuba_group_ids)) AS _snuba_group_id), _snuba_group_id, (multiply(toUInt64(max((finish_ts AS _snuba_timestamp))), 1000) AS _snuba_last_seen), (ifNull(uniq(_snuba_group_id), 0) AS _snuba_total) FROM transactions_local SAMPLE 1.0 WHERE greaterOrEquals((finish_ts AS _snuba_finish_ts), toDateTime('2022-08-26T02:02:49', 'Universal')) AND less(_snuba_finish_ts, toDateTime('2022-11-24T02:03:49', 'Universal')) AND in((project_id AS _snuba_project_id), tuple(4550959674425346)) AND equals(('transaction' AS _snuba_type), 'transaction') AND in(_snuba_project_id, tuple(4550959674425346)) AND in(_snuba_group_id, (1, 2)) GROUP BY _snuba_group_id WITH TOTALS ORDER BY _snuba_last_seen DESC, _snuba_group_id ASC LIMIT 150 OFFSET 0 ``` after query: ``` SELECT (arrayJoin(arrayIntersect([1, 2], (group_ids AS _snuba_group_ids))) AS _snuba_group_id), (multiply(toUInt64(max((finish_ts AS _snuba_timestamp))), 1000) AS _snuba_last_seen), (ifNull(uniq(_snuba_group_id), 0) AS _snuba_total), _snuba_group_id FROM transactions_local SAMPLE 1.0 WHERE greaterOrEquals((finish_ts AS _snuba_finish_ts), toDateTime('2022-08-26T02:01:32', 'Universal')) AND less(_snuba_finish_ts, toDateTime('2022-11-24T02:02:32', 'Universal')) AND in((project_id AS _snuba_project_id), tuple(4550959669379074)) AND equals(hasAny(_snuba_group_ids, [1, 2]), 1) AND equals(('transaction' AS _snuba_type), 'transaction') AND in(_snuba_project_id, tuple(4550959669379074)) GROUP BY _snuba_group_id WITH TOTALS ORDER BY _snuba_last_seen DESC, _snuba_group_id ASC LIMIT 150 OFFSET 0 ```
https://github.com/getsentry/sentry.git
def resolve_condition(cond, column_resolver): index = get_function_index(cond)
312
snuba.py
Python
src/sentry/utils/snuba.py
1bcb129d69a6c4e481b950ebc5871e9c118db74f
sentry
17
265,190
50
14
13
206
19
0
68
156
prepare_cloned_fields
Closes #9414: Add clone() method to NetBoxModel for copying instance attributes
https://github.com/netbox-community/netbox.git
def prepare_cloned_fields(instance): # Generate the clone attributes from the instance if not hasattr(instance, 'clone'): return None attrs = instance.clone() # Prepare querydict parameters params = [] for key, value in attrs.items(): if type(value) in (list, tuple): params.extend([(key, v) for v in value]) elif value not in (False, None): params.append((key, value)) else: params.append((key, '')) # Return a QueryDict with the parameters return QueryDict('&'.join([f'{k}={v}' for k, v in params]), mutable=True)
122
utils.py
Python
netbox/utilities/utils.py
f9d81fd36232e9bf3f60a215d2c6a405b9b342fb
netbox
7
144,307
17
11
17
113
12
0
25
112
_get_toplevel_child_nodes
[Ray DAG] Implement experimental Ray DAG API for task/class (#22058)
https://github.com/ray-project/ray.git
def _get_toplevel_child_nodes(self) -> Set["DAGNode"]: children = set() for a in self.get_args(): if isinstance(a, DAGNode): children.add(a) for a in self.get_kwargs().values(): if isinstance(a, DAGNode): children.add(a) return children
68
dag_node.py
Python
python/ray/experimental/dag/dag_node.py
c065e3f69ec248383d98b45a8d1c00832ccfdd57
ray
5
272,020
138
24
84
539
40
0
325
2,133
_validate_or_infer_batch_size
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
https://github.com/keras-team/keras.git
def _validate_or_infer_batch_size(self, batch_size, steps, x): if isinstance( x, (tf.compat.v1.data.Dataset, tf.data.Dataset, data_utils.Sequence) ) or tf_inspect.isgenerator(x): if batch_size is not None: raise ValueError( "The `batch_size` argument must not be specified for the given " "input type. Received input: {}, batch_size: {}".format( x, batch_size ) ) return # Avoids the override in Sequential.layers which filters Input layers. # (Which are often the very layers that we're after.) layers = self._flatten_layers(include_self=False, recursive=False) first_layer = next(layers, None) if first_layer: # The per-replica static batch size. static_batch_size = training_utils.get_static_batch_size( first_layer ) if static_batch_size is not None: # Determine number of times the user-supplied batch size will be split. if ( self._distribution_strategy and distributed_training_utils.global_batch_size_supported( self._distribution_strategy ) ): num_splits_for_ds = ( self._distribution_strategy.num_replicas_in_sync ) else: num_splits_for_ds = 1 # Check `batch_size` argument is consistent with InputLayer. if batch_size is not None: if batch_size % num_splits_for_ds != 0: raise ValueError( "The `batch_size` argument ({}) must be divisible " "the by number of replicas ({})".format( batch_size, num_splits_for_ds ) ) per_replica_batch_size = batch_size // num_splits_for_ds if per_replica_batch_size != static_batch_size: raise ValueError( "The `batch_size` argument value {} is " "incompatible with the specified batch size of " "your Input Layer: {}".format( per_replica_batch_size, static_batch_size ) ) # Check Dataset/Iterator batch size is consistent with InputLayer. if isinstance( x, ( tf.data.Dataset, tf.compat.v1.data.Iterator, tf.data.Iterator, ), ): ds_batch_size = tf.compat.v1.Dimension( tf.nest.flatten(tf.compat.v1.data.get_output_shapes(x))[ 0 ][0] ).value if ds_batch_size is not None: if ds_batch_size % num_splits_for_ds != 0: raise ValueError( "The batch output shape of your `Dataset` {} " "cannot be divisible by number of replicas {}".format( ds_batch_size, num_splits_for_ds ) ) ds_per_replica_batch_size = ( ds_batch_size // num_splits_for_ds ) if ds_per_replica_batch_size != static_batch_size: raise ValueError( "The batch output shape of your `Dataset` is " "{}, which is incompatible with the specified " "batch size of your Input Layer: {}".format( ds_per_replica_batch_size, static_batch_size ) ) # Set inferred batch size from the InputLayer. if steps is None: batch_size = static_batch_size * num_splits_for_ds if batch_size is None and steps is None: # Backwards compatibility batch_size = 32 return batch_size
330
training_v1.py
Python
keras/engine/training_v1.py
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
18
19,898
44
12
16
112
14
0
52
160
requires_python
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def requires_python(self) -> SpecifierSet: value = self.metadata.get("Requires-Python") if value is None: return SpecifierSet() try: # Convert to str to satisfy the type checker; this can be a Header object. spec = SpecifierSet(str(value)) except InvalidSpecifier as e: message = "Package %r has an invalid Requires-Python: %s" logger.warning(message, self.raw_name, e) return SpecifierSet() return spec
64
base.py
Python
pipenv/patched/notpip/_internal/metadata/base.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
3
189,464
18
13
5
61
9
0
20
59
_attribute_to_float
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
https://github.com/ManimCommunity/manim.git
def _attribute_to_float(self, attr): stripped_attr = "".join( [char for char in attr if char in string.digits + ".-e"], ) return float(stripped_attr)
36
svg_mobject.py
Python
manim/mobject/svg/svg_mobject.py
902e7eb4f0147b5882a613b67467e38a1d47f01e
manim
3
120,755
76
18
27
287
26
0
96
246
test_converters
[jax2tf] Rewrites the converters_eval framework and makes various improvements. The most visible change is that we are now outputting a single table with models to test as rows, and the converters that are being tested as columns. This allows us to compare converters much easier: [NEW OUTPUT](https://github.com/google/jax/blob/91366a58e69e21b29677f01967951b988659307a/jax/experimental/jax2tf/converters_eval/converters_results.md) vs [OLD OUTPUT](https://github.com/google/jax/blob/main/jax/experimental/jax2tf/converters_eval/converters_results.md). Additional improvements: * The models to test now reside in a subdirectory `test_models`. Before, one had to manually import the Flax examples, which is error prone since these examples may change. * Simpler testing logic: no suites, just examples. I also reduced the number of abstractions for setting up the tests, now we just build ModelTestCase instances in `models.py`, which uses pure Flax and JAX. * Testing improvements: Both TFjs and TFLite functions can now be called with multiple arguments. * Markdown output is now a single table that contains all examples (rows) tested on all converters (columns). All error messages are below and linked. * We only construct a model once for all converters, which makes the total run ~4x faster (before we constructed one for each converter). TODO: * Add `tf.lite.experimental_from_jax` as a converter so we can compare with the jax2tf path. * For each TFLite converter, include one columns with Flex ops and one without. * Add more test cases: models from Scenic and T5X seem like good candidates. * Turns this library into actual tests and move everything under jax2tf/tests. PiperOrigin-RevId: 452694479
https://github.com/google/jax.git
def test_converters(): results = {} converters = {x: CONVERTERS[x] for x in FLAGS.converters} for example_name, test_case_fn in get_test_cases().items(): if FLAGS.examples and example_name not in FLAGS.examples: continue test_case = test_case_fn() # This will create the model's variables. converter_results = [] for converter_name, converter_fn in converters.items(): logging.info('===== Testing example %s, Converter %s', example_name, converter_name) error_msg = '' try: converter_fn(test_case) logging.info('=== OK!') except Exception as e: # pylint: disable=broad-except if FLAGS.fail_on_error: raise e error_msg = repr(e).replace('\\n', '\n') logging.info( '=== ERROR %s', error_msg if len(error_msg) < 250 else error_msg[:250] + '... (CROPPED)') converter_results.append((converter_name, error_msg)) results[example_name] = converter_results if FLAGS.write_markdown: write_markdown(results)
169
models_test.py
Python
jax/experimental/jax2tf/converters_eval/models_test.py
1ecfe05a9e1e8e7b261ceb9c0b03a4ee48dee5b1
jax
10
247,159
27
12
12
142
10
0
33
169
test_check_for_extra_dependencies
Use importlib.metadata to read requirements (#12088) * Pull runtime dep checks into their own module * Reimplement `check_requirements` using `importlib` I've tried to make this clearer. We start by working out which of Synapse's requirements we need to be installed here and now. I was surprised that there wasn't an easier way to see which packages were installed by a given extra. I've pulled out the error messages into functions that deal with "is this for an extra or not". And I've rearranged the loop over two different sets of requirements into one loop with a "must be instaled" flag. I hope you agree that this is clearer. * Test cases
https://github.com/matrix-org/synapse.git
def test_check_for_extra_dependencies(self) -> None: with patch( "synapse.util.check_dependencies.metadata.requires", return_value=["dummypkg >= 1; extra == 'cool-extra'"], ), patch("synapse.util.check_dependencies.EXTRAS", {"cool-extra"}): with self.mock_installed_package(None): self.assertRaises(DependencyException, check_requirements, "cool-extra") with self.mock_installed_package(old): self.assertRaises(DependencyException, check_requirements, "cool-extra") with self.mock_installed_package(new): # should not raise check_requirements()
77
test_check_dependencies.py
Python
tests/util/test_check_dependencies.py
313581e4e9bc2ec3d59ccff86e3a0c02661f71c4
synapse
1
288,093
11
11
4
43
5
0
12
44
_fmt_dewpoint
Use unit_conversion in components (#79204) * Use unit_conversion in components * Two more
https://github.com/home-assistant/core.git
def _fmt_dewpoint(dewpoint): if dewpoint is not None: return round(TemperatureConverter.kelvin_to_celsius(dewpoint), 1) return None
26
weather_update_coordinator.py
Python
homeassistant/components/openweathermap/weather_update_coordinator.py
24c26dc03267da84e3c17d747ecb7143b54e8489
core
2
44,241
6
6
16
23
4
0
6
20
get_ui_field_behaviour
Add optional features in providers. (#21074) Some features in providers can be optional, depending on the presence of some libraries. Since Providers Manager tries to import the right classes that are exposed via providers it should not - in this case - log warning message for those optional features. Previously, all ImportErrors were turned into debug log but now we only turn them in debug log when creator of the provider deliberately raised an AirflowOptionalProviderFeatureException. Instructions on how to raise such exception in the way to keep backwards compatibility were updated in proider's documentation. Fixes: #20709
https://github.com/apache/airflow.git
def get_ui_field_behaviour() -> Dict[str, Any]: ...
13
base.py
Python
airflow/hooks/base.py
cb73053211367e2c2dd76d5279cdc7dc7b190124
airflow
1
64,212
49
13
35
298
20
0
86
63
get_gl_entries
patch: Removed files and code related to old distributed cost center feature
https://github.com/frappe/erpnext.git
def get_gl_entries(filters, accounting_dimensions): currency_map = get_currency(filters) select_fields = order_by_statement = "order by posting_date, account, creation" if filters.get("include_dimensions"): order_by_statement = "order by posting_date, creation" if filters.get("group_by") == "Group by Voucher": order_by_statement = "order by posting_date, voucher_type, voucher_no" if filters.get("group_by") == "Group by Account": order_by_statement = "order by account, posting_date, creation" if filters.get("include_default_book_entries"): filters['company_fb'] = frappe.db.get_value("Company", filters.get("company"), 'default_finance_book') dimension_fields = "" if accounting_dimensions: dimension_fields = ', '.join(accounting_dimensions) + ',' gl_entries = frappe.db.sql(.format( dimension_fields=dimension_fields, select_fields=select_fields, conditions=get_conditions(filters), order_by_statement=order_by_statement ), filters, as_dict=1) if filters.get('presentation_currency'): return convert_to_presentation_currency(gl_entries, currency_map, filters.get('company')) else: return gl_entries
166
general_ledger.py
Python
erpnext/accounts/report/general_ledger/general_ledger.py
3dadfc9048d804e097ebfe6801802f2c980e04a7
erpnext
7
112,954
12
9
10
51
6
0
14
30
silence_stdout
Logging refactor (step 1) - experiment handlers (#4792)
https://github.com/microsoft/nni.git
def silence_stdout() -> None: handler = _handlers.pop('_stdout_', None) if handler is not None: _root_logger.removeHandler(handler)
29
log.py
Python
nni/runtime/log.py
4feab0e34b490500b06efd6e7e8a34d686702c2f
nni
2
22,604
11
8
19
73
11
0
11
17
indiac
refactor: clean code Signed-off-by: slowy07 <[email protected]>
https://github.com/geekcomputers/Python.git
def indiac(): cases = f print(cases) print( ) print("\nDeveloped By @TheDarkW3b")
12
coronacases.py
Python
coronacases.py
f0af0c43340763724f139fa68aa1e5a9ffe458b4
Python
1
337,632
22
9
6
83
10
0
26
43
load_training_checkpoint
DeepSpeed Revamp (#405) * deepspeed revamp * Update dataclasses.py * Update deepspeed.py * quality * fixing code * quality * FIx imports * saving 16bit model in zero stage 3 1. Saving 16bit model in zero stage 3 2. zero init in stage 3 support using HFDeepSpeedConfig * quality * adding test and fixing bugs * update makefile for deepspeed tests * Update test.yml * adding `deepspeed` as requirement for tests * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * quality * addressing comments * add example and minor updates 1. Add example to show the usage of config file with revamped deepspeed support. 2. update required deepspeed version to 0.6.5 2. reverting `reinit` change as it is not required, 3. raising Exception when using `clip_grad_value` with DeepSpeed/FSDP. * Documentation and Zero-3 Inference Support 1. Changes to support ZeRo Stage-3 Inference support. 2. minor bug fixes. 3. Documentation. * doc fix * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * addressing comments * update doc to address comments and bug fixes 1. update tests and add new one testing autofill functionality of `prepare` method. 2. fix bug related to zero-3 init related to HFDeepSpeedConfig 3. Update documentation addressing comments. * removing image and hosting it on `documentation-images` dataset * check for hidden_size for zero_opt heurisitics Co-authored-by: Sylvain Gugger <[email protected]>
https://github.com/huggingface/accelerate.git
def load_training_checkpoint(model, load_dir, tag=None, **kwargs): _, checkpoint_state_dict = model.load_checkpoint(load_dir, tag=tag, **kwargs) epoch = checkpoint_state_dict["epoch"] last_global_step = checkpoint_state_dict["last_global_step"] del checkpoint_state_dict return (epoch, last_global_step) # New Code #
52
deepspeed_with_config_support.py
Python
examples/by_feature/deepspeed_with_config_support.py
1703b79a797dab765996764707186def7533d8fd
accelerate
1
129,422
4
7
17
20
3
0
4
11
test_actor_task_stacktrace
[Part 5] Set actor died error message in ActorDiedError (#20903) This is the second last PR to improve `ActorDiedError` exception. This propagates Actor death cause metadata to the ray error object. In this way, we can raise a better actor died error exception. After this PR is merged, I will add more metadata to each error message and write a documentation that explains when each error happens. TODO - [x] Fix test failures - [x] Add unit tests - [x] Fix Java/cpp cases Follow up PRs - Not allowing nullptr for RayErrorInfo input.
https://github.com/ray-project/ray.git
def test_actor_task_stacktrace(ray_start_regular): expected_output =
63
test_traceback.py
Python
python/ray/tests/test_traceback.py
5514711a3506f9e2675786b13b78c8f10a008f34
ray
2
244,143
47
14
16
217
25
0
80
249
forward_head
[Feature] Add Mask2Former to mmdet (#6938) update doc update doc format deepcopy pixel_decoder cfg move mask_pseudo_sampler cfg to config file move part of postprocess from head to detector fix bug in postprocessing move class setting from head to config file remove if else move mask2bbox to mask/util update docstring update docstring in result2json fix bug update class_weight add maskformer_fusion_head add maskformer fusion head update add cfg for filter_low_score update maskformer update class_weight update config update unit test rename param update comments in config rename variable, rm arg, update unit tests update mask2bbox add unit test for mask2bbox replace unsqueeze(1) and squeeze(1) add unit test for maskformer_fusion_head update docstrings update docstring delete \ remove modification to ce loss update docstring update docstring update docstring of ce loss update unit test update docstring update docstring update docstring rename rename add msdeformattn pixel decoder maskformer refactor add strides in config remove redundant code remove redundant code update unit test update config update
https://github.com/open-mmlab/mmdetection.git
def forward_head(self, decoder_out, mask_feature, attn_mask_target_size): decoder_out = self.transformer_decoder.post_norm(decoder_out) decoder_out = decoder_out.transpose(0, 1) # shape (num_queries, batch_size, c) cls_pred = self.cls_embed(decoder_out) # shape (num_queries, batch_size, c) mask_embed = self.mask_embed(decoder_out) # shape (num_queries, batch_size, h, w) mask_pred = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_feature) attn_mask = F.interpolate( mask_pred, attn_mask_target_size, mode='bilinear', align_corners=False) # shape (num_queries, batch_size, h, w) -> # (batch_size * num_head, num_queries, h, w) attn_mask = attn_mask.flatten(2).unsqueeze(1).repeat( (1, self.num_heads, 1, 1)).flatten(0, 1) attn_mask = attn_mask.sigmoid() < 0.5 attn_mask = attn_mask.detach() return cls_pred, mask_pred, attn_mask
137
mask2former_head.py
Python
mmdet/models/dense_heads/mask2former_head.py
14f0e9585c15c28f0c31dcc3ea352449bbe5eb96
mmdetection
1
267,950
25
8
7
77
10
1
27
73
close
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
https://github.com/ansible/ansible.git
def close(self) -> None: if not self.process: return # forwarding not in use self.process.terminate() display.info('Waiting for the session SSH port forwarding process to terminate.', verbosity=1) self.process.wait() @contextlib.contextmanager
@contextlib.contextmanager
39
containers.py
Python
test/lib/ansible_test/_internal/containers.py
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
2
101,421
13
9
5
57
11
0
14
42
_random_choice
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
https://github.com/deepfakes/faceswap.git
def _random_choice(self) -> List[int]: retval = [random.choice(indices) for indices in self._indices] logger.debug(retval) return retval
35
preview.py
Python
tools/preview/preview.py
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
faceswap
2
4,880
10
6
12
32
7
0
10
24
check_connection
python generators output `spec.yaml` files (#12245) * generators output spec.yaml files * source-singer generator also uses spec.yaml * update scaffold * update python cdk tutorials to use spec.yaml * add docs updates * consistency
https://github.com/airbytehq/airbyte.git
def check_connection(self, logger, config) -> Tuple[bool, any]: return True, None
21
source.py
Python
airbyte-integrations/connectors/source-scaffold-source-http/source_scaffold_source_http/source.py
0c12ad9136d992cde35e3975ad6056669188554c
airbyte
1
189,525
20
13
9
112
17
0
25
71
extract_face_coords
Upgraded typehints (#2429) * Future Annotations * Delete template_twitter_post.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed broken RTD Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
https://github.com/ManimCommunity/manim.git
def extract_face_coords(self) -> list[list[np.ndarray]]: new_vertex_coords = [] for v in self.graph.vertices: new_vertex_coords.append(self.graph[v].get_center()) layout = dict(enumerate(new_vertex_coords)) return [[layout[j] for j in i] for i in self.faces_list]
72
polyhedra.py
Python
manim/mobject/polyhedra.py
daf23c9d1031b12d9c119b8f6b7e60727d7f9242
manim
4
89,805
8
6
6
22
3
0
8
22
get_integration
feat(hybrid-cloud): Create a base parser and middleware for webhooks (#42267) See [HC-468](https://getsentry.atlassian.net/browse/HC-468) Requires https://github.com/getsentry/sentry/pull/42260 This PR establishes the base parser that will be inherited from to forward webhooks to the appropriate integration. It is a slightly modified, portion of this [much larger PR](https://github.com/getsentry/sentry/pull/39169). It was split off in order to update that PR and make it more reviewable. Some background: The IntegrationControlMiddleware catches any incoming requests to the control silo with the `/extensions/` path prefix. It parses the provider out of the URL (e.g. `sentry.io/extensions/slack/something`), and passes the request along to that parser to determine how we handle the request (e.g. do we forward it to a region, multiple regions, handle it async, respond immediately from control, etc.) The BaseRequestParser provides a bunch of helpful methods to these parsers to make the actual integration-specific parsers as minimal as possible. They only need to implement a method for identifying the integration (e.g. from headers, from a signature, from a payload, etc), and how we respond to the webhook (allowing for different behaviour from different webhooks).
https://github.com/getsentry/sentry.git
def get_integration(self) -> Integration | None: return None
12
base.py
Python
src/sentry/middleware/integrations/parsers/base.py
d8609112d6e2f373692b414acff6d4a2f7466750
sentry
1
106,355
92
15
33
604
44
0
121
404
get
[utils, etc] Kill child processes when yt-dl is killed * derived from PR #26592, closes #26592 Authored by: Unrud
https://github.com/ytdl-org/youtube-dl.git
def get(self, url, html=None, video_id=None, note=None, note2='Executing JS on webpage', headers={}, jscode='saveAndExit();'): if 'saveAndExit();' not in jscode: raise ExtractorError('`saveAndExit();` not found in `jscode`') if not html: html = self.extractor._download_webpage(url, video_id, note=note, headers=headers) with open(self._TMP_FILES['html'].name, 'wb') as f: f.write(html.encode('utf-8')) self._save_cookies(url) replaces = self.options replaces['url'] = url user_agent = headers.get('User-Agent') or std_headers['User-Agent'] replaces['ua'] = user_agent.replace('"', '\\"') replaces['jscode'] = jscode for x in self._TMP_FILE_NAMES: replaces[x] = self._TMP_FILES[x].name.replace('\\', '\\\\').replace('"', '\\"') with open(self._TMP_FILES['script'].name, 'wb') as f: f.write(self._TEMPLATE.format(**replaces).encode('utf-8')) if video_id is None: self.extractor.to_screen('%s' % (note2,)) else: self.extractor.to_screen('%s: %s' % (video_id, note2)) p = subprocess.Popen([ self.exe, '--ssl-protocol=any', self._TMP_FILES['script'].name ], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process_communicate_or_kill(p) if p.returncode != 0: raise ExtractorError( 'Executing JS failed\n:' + encodeArgument(err)) with open(self._TMP_FILES['html'].name, 'rb') as f: html = f.read().decode('utf-8') self._load_cookies() return (html, encodeArgument(out))
352
openload.py
Python
youtube_dl/extractor/openload.py
0700fde6403aa9eec1ff02bff7323696a205900c
youtube-dl
7
293,766
15
10
7
66
8
0
15
48
test_lazy_state_prefers_shared_attrs_over_attrs
Separate attrs into another table (reduces database size) (#68224)
https://github.com/home-assistant/core.git
async def test_lazy_state_prefers_shared_attrs_over_attrs(caplog): row = PropertyMock( entity_id="sensor.invalid", shared_attrs='{"shared":true}', attributes='{"shared":false}', ) assert LazyState(row).attributes == {"shared": True}
36
test_models.py
Python
tests/components/recorder/test_models.py
9215702388eef03c7c3ed9f756ea0db533d5beec
core
1
213,056
57
15
25
326
18
0
88
319
_add_iam_resource_policy_for_method
fix: Py27hash fix (#2182) * Add third party py27hash code * Add Py27UniStr and unit tests * Add py27hash_fix utils and tests * Add to_py27_compatible_template and tests * Apply py27hash fix to wherever it is needed * Apply py27hash fix, all tests pass except api_with_any_method_in_swagger * apply py27hash fix in openapi + run black * remove py27 testing * remove other py27 references * black fixes * fixes/typos * remove py27 from tox.ini * refactoring * third party notice * black * Fix py27hash fix to deal with null events * Fix Py27UniStr repr for unicode literals * black reformat * Update _template_has_api_resource to check data type more defensively * Apply py27Dict in _get_authorizers * Apply Py27Dict to authorizers and gateway responses which will go into swagger * Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class * Rename _convert_to_py27_dict to _convert_to_py27_type * Apply Py27UniStr to path param name * Handle HttpApi resource under to_py27_compatible_template * Fix InvalidDocumentException to not sort different exceptions * black reformat * Remove unnecessary test files Co-authored-by: Wing Fung Lau <[email protected]>
https://github.com/aws/serverless-application-model.git
def _add_iam_resource_policy_for_method(self, policy_list, effect, resource_list): if not policy_list: return if effect not in ["Allow", "Deny"]: raise ValueError("Effect must be one of {}".format(["Allow", "Deny"])) if not isinstance(policy_list, (dict, list)): raise InvalidDocumentException( [InvalidTemplateException("Type of '{}' must be a list or dictionary".format(policy_list))] ) if not isinstance(policy_list, list): policy_list = [policy_list] self.resource_policy["Version"] = "2012-10-17" policy_statement = Py27Dict() policy_statement["Effect"] = effect policy_statement["Action"] = "execute-api:Invoke" policy_statement["Resource"] = resource_list policy_statement["Principal"] = Py27Dict({"AWS": policy_list}) if self.resource_policy.get("Statement") is None: self.resource_policy["Statement"] = policy_statement else: statement = self.resource_policy["Statement"] if not isinstance(statement, list): statement = [statement] statement.extend([policy_statement]) self.resource_policy["Statement"] = statement
187
swagger.py
Python
samtranslator/swagger/swagger.py
a5db070f446b7cfebdaa6ad2e3dcf78f6105a272
serverless-application-model
7
163,143
94
21
48
427
49
0
115
840
write_file
DOC: improve IO & General Functions API reference (#45208)
https://github.com/pandas-dev/pandas.git
def write_file(self) -> None: with get_handle( self._fname, "wb", compression=self._compression, is_text=False, storage_options=self.storage_options, ) as self.handles: if self.handles.compression["method"] is not None: # ZipFile creates a file (with the same name) for each write call. # Write it first into a buffer and then write the buffer to the ZipFile. self._output_file, self.handles.handle = self.handles.handle, BytesIO() self.handles.created_handles.append(self.handles.handle) try: self._write_header( data_label=self._data_label, time_stamp=self._time_stamp ) self._write_map() self._write_variable_types() self._write_varnames() self._write_sortlist() self._write_formats() self._write_value_label_names() self._write_variable_labels() self._write_expansion_fields() self._write_characteristics() records = self._prepare_data() self._write_data(records) self._write_strls() self._write_value_labels() self._write_file_close_tag() self._write_map() self._close() except Exception as exc: self.handles.close() if isinstance(self._fname, (str, os.PathLike)) and os.path.isfile( self._fname ): try: os.unlink(self._fname) except OSError: warnings.warn( f"This save was not successful but {self._fname} could not " "be deleted. This file is not valid.", ResourceWarning, ) raise exc
251
stata.py
Python
pandas/io/stata.py
fa3dfdb41f0a75c937e85059a5983da5e5d5aac6
pandas
6
309,489
9
8
3
43
9
0
9
30
delete
Import frontend (#64104) Co-authored-by: epenet <[email protected]>
https://github.com/home-assistant/core.git
async def delete(self, request, addon): frontend.async_remove_panel(self.hass, addon) return web.Response()
26
addon_panel.py
Python
homeassistant/components/hassio/addon_panel.py
946238fb02bc484328603625abf106486b8e6c4a
core
1
81,148
87
15
14
193
23
0
108
249
job_stats_wrapup
Fix notification timing issue by sending in the latter of 2 events (#12110) * Track host_status_counts and use that to process notifications * Remove now unused setting * Back out changes to callback class not needed after all * Skirt the need for duck typing by leaning on the cached field * Delete tests for deleted task * Revert "Back out changes to callback class not needed after all" This reverts commit 3b8ae350d218991d42bffd65ce4baac6f41926b2. * Directly hardcode stats_event_type for callback class * Fire notifications if stats event was never sent * Remove test content for deleted methods * Add placeholder for when no hosts matched * Make field default be None, denote events processed with empty dict * Make UI process null value for host_status_counts * Fix tracking of EOF dispatch for system jobs * Reorganize EVENT_MAP into class properties * Consolidate conditional I missed from EVENT_MAP refactor * Give up on the null condition, also applies for empty hosts * Remove cls position argument not being used * Move wrapup method out of class, add tests
https://github.com/ansible/awx.git
def job_stats_wrapup(job_identifier, event=None): try: # empty dict (versus default of None) can still indicate that events have been processed # for job types like system jobs, and jobs with no hosts matched host_status_counts = {} if event: host_status_counts = event.get_host_status_counts() # Update host_status_counts while holding the row lock with transaction.atomic(): uj = UnifiedJob.objects.select_for_update().get(pk=job_identifier) uj.host_status_counts = host_status_counts uj.save(update_fields=['host_status_counts']) uj.log_lifecycle("stats_wrapup_finished") # If the status was a finished state before this update was made, send notifications # If not, we will send notifications when the status changes if uj.status not in ACTIVE_STATES: uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed') except Exception: logger.exception('Worker failed to save stats or emit notifications: Job {}'.format(job_identifier))
106
callback.py
Python
awx/main/dispatch/worker/callback.py
29d60844a8cee16bcd31657cecc444a692ade689
awx
5
246,128
18
12
15
127
9
0
24
161
test_avatar_constraints_file_size
Configurable limits on avatars (#11846) Only allow files which file size and content types match configured limits to be set as avatar. Most of the inspiration from the non-test code comes from matrix-org/synapse-dinsic#19
https://github.com/matrix-org/synapse.git
def test_avatar_constraints_file_size(self): self._setup_local_files( { "small": {"size": 40}, "big": {"size": 60}, } ) res = self.get_success( self.handler.check_avatar_size_and_mime_type("mxc://test/small") ) self.assertTrue(res) res = self.get_success( self.handler.check_avatar_size_and_mime_type("mxc://test/big") ) self.assertFalse(res)
71
test_profile.py
Python
tests/handlers/test_profile.py
bf60da1a60096fac5fb778b732ff2214862ac808
synapse
1
104,183
29
13
13
116
18
0
30
87
from_directory
Run pyupgrade for Python 3.6+ (#3560) * Run pyupgrade for Python 3.6+ * Fix lint issues * Revert changes for the datasets code Co-authored-by: Quentin Lhoest <[email protected]>
https://github.com/huggingface/datasets.git
def from_directory(cls, metric_info_dir) -> "MetricInfo": logger.info(f"Loading Metric info from {metric_info_dir}") if not metric_info_dir: raise ValueError("Calling MetricInfo.from_directory() with undefined metric_info_dir.") with open(os.path.join(metric_info_dir, config.METRIC_INFO_FILENAME), encoding="utf-8") as f: metric_info_dict = json.load(f) return cls.from_dict(metric_info_dict)
64
info.py
Python
src/datasets/info.py
21bfd0d3f5ff3fbfd691600e2c7071a167816cdf
datasets
2
109,421
4
6
2
19
3
0
4
18
get_boxstyle
Harmonize docstrings for boxstyle/connectionstyle/arrowstyle. - Rely on `__init_subclass__` to avoid the need for the out-of-order `interpd.update`/`dedent_interpd`. - Use consistent wording for all setters, and add ACCEPTS list in all cases. - Move get_boxstyle right next to set_boxstyle (consistently with the other setters/getters). - Make the type check in the setters consistent in all cases (check for str, not for forcing inheritance from the private _Base). - Support `set_connectionstyle()` as equivalent to `set_connectionstyle(None)`, consistently with the other two setters.
https://github.com/matplotlib/matplotlib.git
def get_boxstyle(self): return self._bbox_transmuter
10
patches.py
Python
lib/matplotlib/patches.py
0dc472b4c7cdcc1e88228988fff17762c90f1cb9
matplotlib
1
316,274
24
14
7
122
16
0
28
53
test_key_error
Search/replace RESULT_TYPE_* by FlowResultType enum (#74642)
https://github.com/home-assistant/core.git
async def test_key_error(hass): flow = config_flow.SomaFlowHandler() flow.hass = hass with patch.object(SomaApi, "list_devices", return_value={}): result = await flow.async_step_import({"host": MOCK_HOST, "port": MOCK_PORT}) assert result["type"] == data_entry_flow.FlowResultType.ABORT assert result["reason"] == "connection_error"
68
test_config_flow.py
Python
tests/components/soma/test_config_flow.py
7cd68381f1d4f58930ffd631dfbfc7159d459832
core
1
212,687
37
11
10
101
14
0
50
139
fonts_installed_list
Added Text.fonts_installed_list - returns the fonts installed as reported by tkinter.
https://github.com/PySimpleGUI/PySimpleGUI.git
def fonts_installed_list(cls): # if no windows have been created (there is no hidden master root to rely on) then temporarily make a window so the measurement can happen if Window.hidden_master_root is None: root = tk.Tk() else: root = Window.hidden_master_root fonts = list(tkinter.font.families()) fonts.sort() if Window.hidden_master_root is None: root.destroy() return fonts
58
PySimpleGUI.py
Python
PySimpleGUI.py
81599f643bae6867c29cf2006e6110b9f2265f5e
PySimpleGUI
3
337,710
38
16
13
129
12
0
58
219
del_config_sub_tree
Migrate HFDeepSpeedConfig from trfrs to accelerate (#432) * Migrate HFDeepSpeedConfig from trfrs to accelerate * update state.py to resolve comments 1. Adds static method to have a simple API for integrating deepspeed config in transformers trainer. * reverting changes and addressing comments * Marking DepSpeed and FSDP as experimental in accelerate
https://github.com/huggingface/accelerate.git
def del_config_sub_tree(self, ds_key_long, must_exist=False): config = self.config # find the config node of interest if it exists nodes = ds_key_long.split(".") for node in nodes: parent_config = config config = config.get(node) if config is None: if must_exist: raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}") else: return # if found remove it if parent_config is not None: parent_config.pop(node)
70
deepspeed.py
Python
src/accelerate/utils/deepspeed.py
873dcc63a461558152eec20af991482204e8248f
accelerate
5
295,372
9
10
4
69
4
1
11
22
unauthorized_update_message_text
Refactor telegram_bot polling/webhooks platforms and add tests (#66433) Co-authored-by: Pär Berge <[email protected]>
https://github.com/home-assistant/core.git
def unauthorized_update_message_text(update_message_text): update_message_text["message"]["from"]["id"] = 1234 update_message_text["message"]["chat"]["id"] = 1234 return update_message_text @pytest.fixture
@pytest.fixture
32
conftest.py
Python
tests/components/telegram_bot/conftest.py
d7375f1a9c4a69858a65a56bd524f5a78ecab23c
core
1
154,161
11
12
6
47
6
0
11
65
mask
FIX-#4676: drain sub-virtual-partition call queues. (#4695) Signed-off-by: mvashishtha <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]>
https://github.com/modin-project/modin.git
def mask(self, row_indices, col_indices): return ( self.force_materialization() .list_of_block_partitions[0] .mask(row_indices, col_indices) )
30
virtual_partition.py
Python
modin/core/execution/dask/implementations/pandas_on_dask/partitioning/virtual_partition.py
9bf8d57ca44e22fd69b0abc55793cf60c199ab4d
modin
1
3,835
48
12
14
310
32
0
68
166
test_stream_slices_with_state
🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805) * Facebook Marketing performance improvement * add comments and little refactoring * fix integration tests with the new config * improve job status handling, limit concurrency to 10 * fix campaign jobs, refactor manager * big refactoring of async jobs, support random order of slices * update source _read_incremental to hook new state logic * fix issues with timeout * remove debugging and clean up, improve retry logic * merge changes from #8234 * fix call super _read_increment * generalize batch execution, add use_batch flag * improve coverage, do some refactoring of spec * update test, remove overrides of source * add split by AdSet * add smaller insights * fix end_date < start_date case * add account_id to PK * add notes * fix new streams * fix reversed incremental stream * update spec.json for SAT * upgrade CDK and bump version Co-authored-by: Dmytro Rezchykov <[email protected]> Co-authored-by: Eugene Kulak <[email protected]>
https://github.com/airbytehq/airbyte.git
def test_stream_slices_with_state(self, api, async_manager_mock, start_date): end_date = start_date + duration(days=10) cursor_value = start_date + duration(days=5) state = {AdsInsights.cursor_field: cursor_value.date().isoformat()} stream = AdsInsights(api=api, start_date=start_date, end_date=end_date) async_manager_mock.completed_jobs.return_value = [1, 2, 3] slices = list(stream.stream_slices(stream_state=state, sync_mode=SyncMode.incremental)) assert slices == [{"insight_job": 1}, {"insight_job": 2}, {"insight_job": 3}] async_manager_mock.assert_called_once() args, kwargs = async_manager_mock.call_args generated_jobs = list(kwargs["jobs"]) assert len(generated_jobs) == (end_date - cursor_value).days assert generated_jobs[0].interval.start == cursor_value.date() + duration(days=1) assert generated_jobs[1].interval.start == cursor_value.date() + duration(days=2)
197
test_base_insight_streams.py
Python
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_insight_streams.py
a3aae8017a0a40ff2006e2567f71dccb04c997a5
airbyte
1
127,765
29
10
15
101
13
0
34
98
sample
[RLlib] A few updates for offline DQN usage (#28593) 1. Allow users to select between huber or mse losses for num_atoms=1 case. 2. Allow users to configure temperature parameter for Categorical distribution. 3. Introduce FifoReplayBuffer to allow off-policy algorithms to be used without a real ReplayBuffer. Signed-off-by: Jun Gong <[email protected]>
https://github.com/ray-project/ray.git
def sample(self, *args, **kwargs) -> Optional[SampleBatchType]: if len(self._queue) <= 0: # Return empty SampleBatch if queue is empty. return MultiAgentBatch({}, 0) batch = self._queue.pop(0) # Equal weights of 1.0. batch["weights"] = np.ones(len(batch)) return batch
61
fifo_replay_buffer.py
Python
rllib/utils/replay_buffers/fifo_replay_buffer.py
4105ca4d2be368a7893df9a0bd64e1641936c046
ray
2
260,025
7
10
3
44
9
0
7
21
_validate_targets
ENH Deprecate `class_weight_` in regression/single class models in SVM module (#22898) Co-authored-by: Thomas J. Fan <[email protected]>
https://github.com/scikit-learn/scikit-learn.git
def _validate_targets(self, y): return column_or_1d(y, warn=True).astype(np.float64, copy=False)
38
_base.py
Python
sklearn/svm/_base.py
f205b9579ab44988d28d890a9b4c944b37d89e73
scikit-learn
1
247,511
68
11
49
414
33
0
112
586
test_thumbnail_repeated_thumbnail
Add type hints to `tests/rest`. (#12208) Co-authored-by: Patrick Cloke <[email protected]>
https://github.com/matrix-org/synapse.git
def test_thumbnail_repeated_thumbnail(self) -> None: self._test_thumbnail( "scale", self.test_image.expected_scaled, self.test_image.expected_found ) if not self.test_image.expected_found: return # Fetching again should work, without re-requesting the image from the # remote. params = "?width=32&height=32&method=scale" channel = make_request( self.reactor, FakeSite(self.thumbnail_resource, self.reactor), "GET", self.media_id + params, shorthand=False, await_result=False, ) self.pump() self.assertEqual(channel.code, 200) if self.test_image.expected_scaled: self.assertEqual( channel.result["body"], self.test_image.expected_scaled, channel.result["body"], ) # Deleting the thumbnail on disk then re-requesting it should work as # Synapse should regenerate missing thumbnails. origin, media_id = self.media_id.split("/") info = self.get_success(self.store.get_cached_remote_media(origin, media_id)) file_id = info["filesystem_id"] thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir( origin, file_id ) shutil.rmtree(thumbnail_dir, ignore_errors=True) channel = make_request( self.reactor, FakeSite(self.thumbnail_resource, self.reactor), "GET", self.media_id + params, shorthand=False, await_result=False, ) self.pump() self.assertEqual(channel.code, 200) if self.test_image.expected_scaled: self.assertEqual( channel.result["body"], self.test_image.expected_scaled, channel.result["body"], )
263
test_media_storage.py
Python
tests/rest/media/v1/test_media_storage.py
32c828d0f760492711a98b11376e229d795fd1b3
synapse
4
20,858
13
12
5
71
6
0
13
45
plain
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
https://github.com/pypa/pipenv.git
def plain(self) -> str: if len(self._text) != 1: self._text[:] = ["".join(self._text)] return self._text[0]
42
text.py
Python
pipenv/patched/notpip/_vendor/rich/text.py
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
2