complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
1
18
def test_relays_dyamic_sampling(client, call_endpoint, default_project, dyn_sampling_data): default_project.update_option("sentry:dynamic_sampling", dyn_sampling_data()) with Feature( { "organizations:server-side-sampling": True, "organizations:dynamic-sampling-deprecated": True, } ): result, status_code = call_endpoint(full_config=False) assert status_code < 400 dynamic_sampling = safe.get_path( result, "configs", str(default_project.id), "config", "dynamicSampling" ) assert dynamic_sampling == dyn_sampling_data() @pytest.mark.django_db
tests/sentry/api/endpoints/test_relay_projectconfigs.py
142
@pytest.mark.django_db
sentry
{ "docstring": "\n Tests that dynamic sampling configuration set in project details are retrieved in relay configs\n ", "language": "en", "n_whitespaces": 21, "n_words": 14, "vocab_size": 13 }
38
Python
32
c8bfd65f261769da2565ca4240f11da6e820a7e4
test_relay_projectconfigs.py
87,295
14
78
test_relays_dyamic_sampling
https://github.com/getsentry/sentry.git
feat(dyn-sampling): Switch to new feature flag multiplexer in projectconfig (#40498) This PR switch to new feature flag multiplexer in projectconfig.
131
1
18,274
13
1
2
def below(self): return self["below"]
packages/python/plotly/plotly/graph_objs/_choroplethmapbox.py
22
plotly.py
{ "docstring": "\n Determines if the choropleth polygons will be inserted before\n the layer with the specified ID. By default, choroplethmapbox\n traces are placed above the water layers. If set to '', the\n layer will be inserted above every existing layer.\n\n The 'below' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 147, "n_words": 65, "vocab_size": 46 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_choroplethmapbox.py
226,503
2
11
below
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,176
7
10
21
def execute(): if not frappe.db.table_exists("Additional Salary"): return for doctype in ("Additional Salary", "Employee Incentive", "Salary Detail"): frappe.reload_doc("Payroll", "doctype", doctype) frappe.reload_doc("hr", "doctype", "Leave Encashment") additional_salaries = frappe.get_all( "Additional Salary", fields=["name", "salary_slip", "type", "salary_component"], filters={"salary_slip": ["!=", ""]}, group_by="salary_slip", ) leave_encashments = frappe.get_all( "Leave Encashment", fields=["name", "additional_salary"], filters={"additional_salary": ["!=", ""]}, ) employee_incentives = frappe.get_all( "Employee Incentive", fields=["name", "additional_salary"], filters={"additional_salary": ["!=", ""]}, ) for incentive in employee_incentives: frappe.db.sql( , (incentive["name"], incentive["additional_salary"]), ) for leave_encashment in leave_encashments: frappe.db.sql( , (leave_encashment["name"], leave_encashment["additional_salary"]), ) salary_slips = [sal["salary_slip"] for sal in additional_salaries] for salary in additional_salaries: comp_type = "earnings" if salary["type"] == "Earning" else "deductions" if salary["salary_slip"] and salary_slips.count(salary["salary_slip"]) == 1: frappe.db.sql( , (salary["name"], comp_type, salary["salary_slip"], salary["salary_component"]), )
erpnext/patches/v13_0/patch_to_fix_reverse_linking_in_additional_salary_encashment_and_incentive.py
470
erpnext
{ "docstring": " UPDATE `tabAdditional Salary`\n\t\t\tSET ref_doctype = 'Employee Incentive', ref_docname = %s\n\t\t\tWHERE name = %s\n\t\t UPDATE `tabAdditional Salary`\n\t\t\tSET ref_doctype = 'Leave Encashment', ref_docname = %s\n\t\t\tWHERE name = %s\n\t\t\n\t\t\t\tUPDATE `tabSalary Detail`\n\t\t\t\tSET additional_salary = %s\n\t\t\t\tWHERE parenttype = 'Salary Slip'\n\t\t\t\t\tand parentfield = %s\n\t\t\t\t\tand parent = %s\n\t\t\t\t\tand salary_component = %s\n\t\t\t", "language": "en", "n_whitespaces": 44, "n_words": 54, "vocab_size": 24 }
110
Python
71
494bd9ef78313436f0424b918f200dab8fc7c20b
patch_to_fix_reverse_linking_in_additional_salary_encashment_and_incentive.py
66,773
53
269
execute
https://github.com/frappe/erpnext.git
style: format code with black
70
0
14,329
14
1
15
def test_get_permissions(self): self.assertTrue( win_dacl.set_permissions( obj_name=self.obj_name, principal="Backup Operators", permissions="full_control", access_mode="grant", obj_type=self.obj_type, reset_perms=False, protected=None, ) ) expected = {'Not Inherited': {'Backup Operators': {'grant': {'applies to': 'This key and subkeys', 'permissions': 'Full Control'}}}} self.assertEqual( win_dacl.get_permissions( obj_name=self.obj_name, principal="Backup Operators", obj_type=self.obj_type, ), expected, )
tests/unit/utils/test_win_dacl.py
168
salt
{ "docstring": "\n Test the get_permissions function\n ", "language": "en", "n_whitespaces": 19, "n_words": 4, "vocab_size": 4 }
39
Python
33
55a7519dd5dab2bdfcac2e7e6e77a3d1358538f9
test_win_dacl.py
216,350
21
100
test_get_permissions
https://github.com/saltstack/salt.git
fix tests
286
0
54,555
15
5
42
def run_api_experiment_separated_datasets(input_features, output_features, data_csv): config = { "input_features": input_features, "output_features": output_features, "combiner": {"type": "concat", "output_size": 14}, "training": {"epochs": 2}, } model = LudwigModel(config) # Training with dataframe data_df = read_csv(data_csv) train_df = data_df.sample(frac=0.8) test_df = data_df.drop(train_df.index).sample(frac=0.5) validation_df = data_df.drop(train_df.index).drop(test_df.index) basename, ext = os.path.splitext(data_csv) train_fname = basename + ".train" + ext val_fname = basename + ".validation" + ext test_fname = basename + ".test" + ext output_dirs = [] try: train_df.to_csv(train_fname) validation_df.to_csv(val_fname) test_df.to_csv(test_fname) # Training with csv _, _, output_dir = model.train( training_set=train_fname, skip_save_processed_input=True, skip_save_progress=True, skip_save_unprocessed_output=True, ) output_dirs.append(output_dir) _, _, output_dir = model.train( training_set=train_fname, validation_set=val_fname, skip_save_processed_input=True, skip_save_progress=True, skip_save_unprocessed_output=True, ) output_dirs.append(output_dir) _, _, output_dir = model.train( training_set=train_fname, validation_set=val_fname, test_set=test_fname, skip_save_processed_input=True, skip_save_progress=True, skip_save_unprocessed_output=True, ) output_dirs.append(output_dir) _, output_dir = model.predict(dataset=test_fname) output_dirs.append(output_dir) finally: # Remove results/intermediate data saved to disk os.remove(train_fname) os.remove(val_fname) os.remove(test_fname) for output_dir in output_dirs: shutil.rmtree(output_dir, ignore_errors=True) output_dirs = [] try: _, _, output_dir = model.train( training_set=train_df, skip_save_processed_input=True, skip_save_progress=True, skip_save_unprocessed_output=True, ) output_dirs.append(output_dir) _, _, output_dir = model.train( training_set=train_df, validation_set=validation_df, skip_save_processed_input=True, skip_save_progress=True, skip_save_unprocessed_output=True, ) output_dirs.append(output_dir) _, _, output_dir = model.train( training_set=train_df, validation_set=validation_df, test_set=test_df, skip_save_processed_input=True, skip_save_progress=True, skip_save_unprocessed_output=True, ) output_dirs.append(output_dir) _, output_dir = model.predict(dataset=data_df) output_dirs.append(output_dir) finally: for output_dir in output_dirs: shutil.rmtree(output_dir, ignore_errors=True)
tests/integration_tests/test_api.py
732
ludwig
{ "docstring": "Helper method to avoid code repetition in running an experiment.\n\n :param input_features: input schema\n :param output_features: output schema\n :param data_csv: path to data\n :return: None\n ", "language": "en", "n_whitespaces": 40, "n_words": 25, "vocab_size": 21 }
185
Python
82
69604268c2ddc06a4ee0b3dce0e05a8fb73b5d16
test_api.py
5,907
84
475
run_api_experiment_separated_datasets
https://github.com/ludwig-ai/ludwig.git
Rename fc_size to output_size (#1641) * Rename fc_size to output_size * Responding to comments
846
0
890
13
3
11
def is_same_object(instance, webhook_data, request_id): return ( ContentType.objects.get_for_model(instance) == webhook_data['content_type'] and instance.pk == webhook_data['object_id'] and request_id == webhook_data['request_id'] ) @receiver((post_save, m2m_changed))
netbox/extras/signals.py
84
@receiver((post_save, m2m_changed))
netbox
{ "docstring": "\n Compare the given instance to the most recent queued webhook object, returning True\n if they match. This check is used to avoid creating duplicate webhook entries.\n ", "language": "en", "n_whitespaces": 36, "n_words": 26, "vocab_size": 23 }
20
Python
17
4a95cfd1c4435e6eda01745fe06d902c25d2493e
signals.py
266,093
6
42
is_same_object
https://github.com/netbox-community/netbox.git
Permanently connect change logging & webhook receivers
49
1
78,288
12
1
4
def with_loss(self) -> bool: return self.loss_panoptic is not None
mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py
26
mmdetection
{ "docstring": "bool: whether the panoptic head contains loss function.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
9
Python
9
c08b81510fbfc1199eab6ccc7af07fc3d3f89d12
base_panoptic_fusion_head.py
244,992
3
15
with_loss
https://github.com/open-mmlab/mmdetection.git
Two stage segmentor + Panpotic FPN
23
0
70,624
7
5
26
def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0): if loss not in ("squared_hinge", "log"): raise ValueError('loss type not in ("squared_hinge", "log")') X = check_array(X, accept_sparse="csc") check_consistent_length(X, y) Y = LabelBinarizer(neg_label=-1).fit_transform(y).T # maximum absolute value over classes and features den = np.max(np.abs(safe_sparse_dot(Y, X))) if fit_intercept: bias = np.full( (np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype ) den = max(den, abs(np.dot(Y, bias)).max()) if den == 0.0: raise ValueError( "Ill-posed l1_min_c calculation: l1 will always " "select zero coefficients for this data" ) if loss == "squared_hinge": return 0.5 / den else: # loss == 'log': return 2.0 / den
sklearn/svm/_bounds.py
276
scikit-learn
{ "docstring": "Return the lowest bound for C.\n\n The lower bound for C is computed such that for C in (l1_min_C, infinity)\n the model is guaranteed not to be empty. This applies to l1 penalized\n classifiers, such as LinearSVC with penalty='l1' and\n linear_model.LogisticRegression with penalty='l1'.\n\n This value is valid if class_weight parameter in fit() is not set.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target vector relative to X.\n\n loss : {'squared_hinge', 'log'}, default='squared_hinge'\n Specifies the loss function.\n With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).\n With 'log' it is the loss of logistic regression models.\n\n fit_intercept : bool, default=True\n Specifies if the intercept should be fitted by the model.\n It must match the fit() method parameter.\n\n intercept_scaling : float, default=1.0\n When fit_intercept is True, instance vector x becomes\n [x, intercept_scaling],\n i.e. a \"synthetic\" feature with constant value equals to\n intercept_scaling is appended to the instance vector.\n It must match the fit() method parameter.\n\n Returns\n -------\n l1_min_c : float\n Minimum value for C.\n ", "language": "en", "n_whitespaces": 336, "n_words": 190, "vocab_size": 121 }
93
Python
70
6d16698dd8ba4407e5c3c588d7b5e6a5257eddc9
_bounds.py
260,816
21
176
l1_min_c
https://github.com/scikit-learn/scikit-learn.git
DOC Ensures that l1_min_c passes numpydoc validation (#24134)
216
0
76,515
16
2
6
def screen(self) -> Screen: try: return self._screen_stack[-1] except IndexError: raise ScreenStackError("No screens on stack") from None
src/textual/app.py
49
textual
{ "docstring": "Get the current screen.\n\n Raises:\n ScreenStackError: If there are no screens on the stack.\n\n Returns:\n Screen: The currently active screen.\n ", "language": "en", "n_whitespaces": 63, "n_words": 20, "vocab_size": 18 }
16
Python
16
b22436933acc0d7440ec300f971a249bd6105a5b
app.py
184,614
13
28
screen
https://github.com/Textualize/textual.git
lots of docstrings
59
0
44,714
11
1
4
async def test_timeout_stops_execution_in_sync_subflows(self, tmp_path): canary_file = tmp_path / "canary"
tests/test_flows.py
26
prefect
{ "docstring": "\n Sync flow runs can be cancelled after a timeout once a task is called\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 13 }
9
Python
9
336eca7839fccbcbdb77179f352f926da8b1fa15
test_flows.py
56,927
14
72
test_timeout_stops_execution_in_sync_subflows
https://github.com/PrefectHQ/prefect.git
Ensure flows are called in an interruptible thread (PrefectHQ/orion#2174) * Ensure flows are called in an interruptible thread * Set higher runtime limit in `test_timeout_stops_execution_in_sync_subflows`
23
0
11,587
8
3
32
def test_mark_checked_if_not_deleted(self, mock_patch_already_checked, mock_delete_pod, should_fail): dag = DAG('hello2', start_date=pendulum.now()) k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", name="test", task_id="task", is_delete_operator_pod=False, dag=dag, ) remote_pod_mock = MagicMock() remote_pod_mock.status.phase = 'Failed' if should_fail else 'Succeeded' self.await_pod_mock.return_value = remote_pod_mock context = create_context(k, persist_to_db=True) if should_fail: with pytest.raises(AirflowException): k.execute(context=context) else: k.execute(context=context) mock_patch_already_checked.assert_called_once() mock_delete_pod.assert_not_called()
tests/providers/cncf/kubernetes/operators/test_kubernetes_pod.py
213
airflow
{ "docstring": "If we aren't deleting pods mark \"checked\" if the task completes (successful or otherwise)", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
45
Python
37
c3d883a971a8e4e65ccc774891928daaaa0f4442
test_kubernetes_pod.py
47,753
21
127
test_mark_checked_if_not_deleted
https://github.com/apache/airflow.git
KubernetesPodOperator should patch "already checked" always (#22734) When not configured to delete pods, at end of task execution the current behavior is to patch the pod as "already checked", but only if pod not successful. We should also patch when successful so it isn't "reattached" to after a task clear.
232
0
9,244
12
31
23
def check_dependency(self, operation, dependency): # Created model if dependency[2] is None and dependency[3] is True: return ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() ) # Created field elif dependency[2] is not None and dependency[3] is True: return ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() and any(dependency[2] == x for x, y in operation.fields) ) or ( isinstance(operation, operations.AddField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # Removed field elif dependency[2] is not None and dependency[3] is False: return ( isinstance(operation, operations.RemoveField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # Removed model elif dependency[2] is None and dependency[3] is False: return ( isinstance(operation, operations.DeleteModel) and operation.name_lower == dependency[1].lower() ) # Field being altered elif dependency[2] is not None and dependency[3] == "alter": return ( isinstance(operation, operations.AlterField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # order_with_respect_to being unset for a field elif dependency[2] is not None and dependency[3] == "order_wrt_unset": return ( isinstance(operation, operations.AlterOrderWithRespectTo) and operation.name_lower == dependency[1].lower() and (operation.order_with_respect_to or "").lower() != dependency[2].lower() ) # Field is removed and part of an index/unique_together elif dependency[2] is not None and dependency[3] == "foo_together_change": return ( isinstance( operation, (operations.AlterUniqueTogether, operations.AlterIndexTogether), ) and operation.name_lower == dependency[1].lower() ) # Unknown dependency. Raise an error. else: raise ValueError("Can't handle dependency %r" % (dependency,))
django/db/migrations/autodetector.py
629
django
{ "docstring": "\n Return True if the given operation depends on the given dependency,\n False otherwise.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 11 }
213
Python
74
9c19aff7c7561e3a82978a272ecdaad40dda5c00
autodetector.py
205,277
50
409
check_dependency
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
891
0
51,060
16
2
19
def load_version_info_from_text_file(filename): # Read and parse the version file. It may have a byte order marker or encoding cookie - respect it if it does. import PyInstaller.utils.misc as miscutils with open(filename, 'rb') as fp: text = miscutils.decode(fp.read()) # Deserialize via eval() try: info = eval(text) except Exception as e: raise ValueError("Failed to deserialize VSVersionInfo from text-based representation!") from e # Sanity check assert isinstance(info, VSVersionInfo), \ f"Loaded incompatible structure type! Expected VSVersionInfo, got: {type(info)!r}" return info
PyInstaller/utils/win32/versioninfo.py
129
pyinstaller
{ "docstring": "\n Load the `VSVersionInfo` structure from its string-based (`VSVersionInfo.__str__`) serialization by reading the\n text from the file and running it through `eval()`.\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 18 }
76
Python
68
f57e15ae14d2370cba7a14cfae97d2c29b5c8154
versioninfo.py
264,108
11
69
load_version_info_from_text_file
https://github.com/pyinstaller/pyinstaller.git
building: EXE: load version info structure before comparing guts Load the version information structure in `EXE` constructor, so that the comparison against cached state is done with the structure instead of the filen name. This way, changing the contents of the version information file triggers rebuild of the EXE. Split and clean-up related functions in the `utils.win32.versioninfo` module as well as in `pyi-grab_version` and `pyi-set_version` utility scripts.
134
0
77,609
12
1
3
def rttopo_version(self): return self._get_spatialite_func("rttopo_version()")
django/contrib/gis/db/backends/spatialite/operations.py
26
django
{ "docstring": "Return the version of RTTOPO library used by SpatiaLite.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
4
Python
4
9c19aff7c7561e3a82978a272ecdaad40dda5c00
operations.py
203,873
2
13
rttopo_version
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
18
0
50,568
8
12
13
def filter(example, uniques, args): if not check_uniques(example, uniques): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True
examples/research_projects/codeparrot/scripts/preprocessing.py
215
transformers
{ "docstring": "Filter dataset with heuristics. Config, test and has_no_keywords files are removed with a given probability.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
61
Python
31
e730e1256732b5dfeae2bdd427beacc3fbc20e2a
preprocessing.py
38,436
21
129
filter
https://github.com/huggingface/transformers.git
Update codeparrot data preprocessing (#16944) * add new preprocessing arguments * add new filters * add new filters to readme * fix config and test count, update function names and docstrings * reformat code * update readme * Update readme * rename config_test filter Co-authored-by: Leandro von Werra <[email protected]> * rename few_assignments filter Co-authored-by: Leandro von Werra <[email protected]> * rename tokenizer in arguments Co-authored-by: Leandro von Werra <[email protected]> * rename functions and add limit_line argument for config_test filter * update threshold for config_test filter Co-authored-by: Leandro von Werra <[email protected]> Co-authored-by: Loubna ben allal <[email protected]>
164
0
6,974
11
3
42
def test_stream_concurrency(tctx): playbook, cff = start_h2_client(tctx) flow1 = Placeholder(HTTPFlow) flow2 = Placeholder(HTTPFlow) reqheadershook1 = http.HttpRequestHeadersHook(flow1) reqheadershook2 = http.HttpRequestHeadersHook(flow2) reqhook1 = http.HttpRequestHook(flow1) reqhook2 = http.HttpRequestHook(flow2) server = Placeholder(Server) data_req1 = Placeholder(bytes) data_req2 = Placeholder(bytes) assert ( playbook >> DataReceived( tctx.client, cff.build_headers_frame( example_request_headers, flags=["END_STREAM"], stream_id=1 ).serialize() + cff.build_headers_frame( example_request_headers, flags=["END_STREAM"], stream_id=3 ).serialize(), ) << reqheadershook1 << reqheadershook2 >> reply(to=reqheadershook1) << reqhook1 >> reply(to=reqheadershook2) << reqhook2 # req 2 overtakes 1 and we already have a reply: >> reply(to=reqhook2) << OpenConnection(server) >> reply(None, side_effect=make_h2) << SendData(server, data_req2) >> reply(to=reqhook1) << SendData(server, data_req1) ) frames = decode_frames(data_req2()) assert [type(x) for x in frames] == [ hyperframe.frame.SettingsFrame, hyperframe.frame.HeadersFrame, ] frames = decode_frames(data_req1()) assert [type(x) for x in frames] == [ hyperframe.frame.HeadersFrame, ]
test/mitmproxy/proxy/layers/http/test_http2.py
403
mitmproxy
{ "docstring": "Test that we can send an intercepted request with a lower stream id than one that has already been sent.", "language": "en", "n_whitespaces": 19, "n_words": 20, "vocab_size": 19 }
117
Python
72
b3587b52b25077f68116b9852b041d33e7fc6601
test_http2.py
251,881
44
269
test_stream_concurrency
https://github.com/mitmproxy/mitmproxy.git
make it black!
392
0
73,875
29
6
22
def handle_template(self, template, subdir): if template is None: return os.path.join(django.__path__[0], "conf", subdir) else: if template.startswith("file://"): template = template[7:] expanded_template = os.path.expanduser(template) expanded_template = os.path.normpath(expanded_template) if os.path.isdir(expanded_template): return expanded_template if self.is_url(template): # downloads the file and returns the path absolute_path = self.download(template) else: absolute_path = os.path.abspath(expanded_template) if os.path.exists(absolute_path): return self.extract(absolute_path) raise CommandError( "couldn't handle %s template %s." % (self.app_or_project, template) )
django/core/management/templates.py
228
django
{ "docstring": "\n Determine where the app or project templates are.\n Use django.__path__[0] as the default because the Django install\n directory isn't known.\n ", "language": "en", "n_whitespaces": 49, "n_words": 20, "vocab_size": 18 }
60
Python
43
9c19aff7c7561e3a82978a272ecdaad40dda5c00
templates.py
204,714
19
140
handle_template
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
284
0
50,848
15
1
19
def _load_model_instance(self) -> None: from rasa.nlu.utils.hugging_face.registry import ( model_class_dict, model_tokenizer_dict, ) logger.debug(f"Loading Tokenizer and Model for {self.model_name}") self.tokenizer = model_tokenizer_dict[self.model_name].from_pretrained( self.model_weights, cache_dir=self.cache_dir ) self.model = model_class_dict[self.model_name].from_pretrained( # type: ignore[no-untyped-call] # noqa: E501 self.model_weights, cache_dir=self.cache_dir ) # Use a universal pad token since all transformer architectures do not have a # consistent token. Instead of pad_token_id we use unk_token_id because # pad_token_id is not set for all architectures. We can't add a new token as # well since vocabulary resizing is not yet supported for TF classes. # Also, this does not hurt the model predictions since we use an attention mask # while feeding input. self.pad_token_id = self.tokenizer.unk_token_id
rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py
145
rasa
{ "docstring": "Tries to load the model instance.\n\n Model loading should be skipped in unit tests.\n See unit tests for examples.\n ", "language": "en", "n_whitespaces": 40, "n_words": 19, "vocab_size": 18 }
108
Python
80
a2cb6b72bb72fb9e5598808d564749503ee08784
lm_featurizer.py
159,466
18
87
_load_model_instance
https://github.com/RasaHQ/rasa.git
fix transformers typing issues
258
0
38,282
10
2
7
def all_pairs_dijkstra_path(G, cutoff=None, weight="weight"): path = single_source_dijkstra_path # TODO This can be trivially parallelized. for n in G: yield (n, path(G, n, cutoff=cutoff, weight=weight))
networkx/algorithms/shortest_paths/weighted.py
64
networkx
{ "docstring": "Compute shortest paths between all nodes in a weighted graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n cutoff : integer or float, optional\n Length (sum of edge weights) at which the search is stopped.\n If cutoff is provided, only return paths with summed weight <= cutoff.\n\n weight : string or function\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number or None to indicate a hidden edge.\n\n Returns\n -------\n distance : dictionary\n Dictionary, keyed by source and target, of shortest paths.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> path = dict(nx.all_pairs_dijkstra_path(G))\n >>> path[0][4]\n [0, 1, 2, 3, 4]\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n See Also\n --------\n floyd_warshall, all_pairs_bellman_ford_path\n\n ", "language": "en", "n_whitespaces": 362, "n_words": 205, "vocab_size": 127 }
24
Python
24
d82815dba6c8ddce19cd49f700298dc82a58f066
weighted.py
177,500
4
41
all_pairs_dijkstra_path
https://github.com/networkx/networkx.git
Hide edges with a weight of None in A*. (#5945) * Hide edges with a weight of None in A*. This matches the Dijkstra's weight interface. * Update Dijkstra's and A* docs for weights of None. * Add tests for A* with weight of None. * Add another test for A* with a weight function. * Document that None indicates a hidden edge.
43
0
42,404
12
1
5
def preset_modes(self) -> list[str] | None: return self._attr_preset_modes
homeassistant/components/climate/__init__.py
29
core
{ "docstring": "Return a list of available preset modes.\n\n Requires ClimateEntityFeature.PRESET_MODE.\n ", "language": "en", "n_whitespaces": 23, "n_words": 9, "vocab_size": 9 }
8
Python
8
8fc55b71c5153580508446d478adfb450c76ea41
__init__.py
295,294
6
17
preset_modes
https://github.com/home-assistant/core.git
Add EntityFeature enum to Climate (#69077)
22
0
94,318
6
1
5
def call(cls, func, join_type="outer", labels="replace"):
modin/core/dataframe/algebra/binary.py
29
modin
{ "docstring": "\n Build template binary operator.\n\n Parameters\n ----------\n func : callable(pandas.DataFrame, [pandas.DataFrame, list-like, scalar]) -> pandas.DataFrame\n Binary function to execute. Have to be able to accept at least two arguments.\n join_type : {'left', 'right', 'outer', 'inner', None}, default: 'outer'\n Type of join that will be used if indices of operands are not aligned.\n labels : {\"keep\", \"replace\", \"drop\"}, default: \"replace\"\n Whether keep labels from left Modin DataFrame, replace them with labels\n from joined DataFrame or drop altogether to make them be computed lazily later.\n\n Returns\n -------\n callable\n Function that takes query compiler and executes binary operation.\n ", "language": "en", "n_whitespaces": 220, "n_words": 94, "vocab_size": 79 }
5
Python
5
bd326f1c4175102489f08d271a53cf374bd9125e
binary.py
154,285
3
20
call
https://github.com/modin-project/modin.git
PERF-#4268: Implement partition-parallel __getitem__ for bool Series masks (#4753) Signed-off-by: Vasily Litvinov <[email protected]>
12
0
35,897
6
1
4
def recalc_open_trade_value(self) -> None: self.open_trade_value = self._calc_open_trade_value()
freqtrade/persistence/models.py
31
freqtrade
{ "docstring": "\n Recalculate open_trade_value.\n Must be called whenever open_rate, fee_open or is_short is changed.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
7
Python
7
1c0946833da746b480f6ef88d4866d6a87824e17
models.py
149,271
7
17
recalc_open_trade_value
https://github.com/freqtrade/freqtrade.git
Fix bug in exit-count detection
21
0
34,386
8
1
6
def dist_in_site_packages(dist): # type: (Distribution) -> bool return dist_location(dist).startswith(normalize_path(site_packages))
.venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py
35
transferlearning
{ "docstring": "\n Return True if given Distribution is installed in\n sysconfig.get_python_lib().\n ", "language": "en", "n_whitespaces": 19, "n_words": 9, "vocab_size": 9 }
9
Python
9
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
misc.py
61,230
2
19
dist_in_site_packages
https://github.com/jindongwang/transferlearning.git
upd; format
18
0
12,452
9
5
19
def get_account_type_based_gl_data(company, start_date, end_date, account_type, filters=None): cond = "" filters = frappe._dict(filters or {}) if filters.include_default_book_entries: company_fb = frappe.db.get_value("Company", company, "default_finance_book") cond = % ( frappe.db.escape(filters.finance_book), frappe.db.escape(company_fb), ) else: cond = " AND (finance_book in (%s, '') OR finance_book IS NULL)" % ( frappe.db.escape(cstr(filters.finance_book)) ) gl_sum = frappe.db.sql_list( .format( cond=cond ), (company, start_date, end_date, account_type), ) return gl_sum[0] if gl_sum and gl_sum[0] else 0
erpnext/accounts/report/cash_flow/cash_flow.py
214
erpnext
{ "docstring": " AND (finance_book in (%s, %s, '') OR finance_book IS NULL)\n\t\t\t\n\t\tselect sum(credit) - sum(debit)\n\t\tfrom `tabGL Entry`\n\t\twhere company=%s and posting_date >= %s and posting_date <= %s\n\t\t\tand voucher_type != 'Period Closing Voucher'\n\t\t\tand account in ( SELECT name FROM tabAccount WHERE account_type = %s) {cond}\n\t", "language": "en", "n_whitespaces": 41, "n_words": 46, "vocab_size": 40 }
64
Python
48
494bd9ef78313436f0424b918f200dab8fc7c20b
cash_flow.py
65,186
27
137
get_account_type_based_gl_data
https://github.com/frappe/erpnext.git
style: format code with black
45
0
13,820
16
8
21
def get_docstring(node, clean=True): if not isinstance(node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)): raise TypeError("%r can't have docstrings" % node.__class__.__name__) if not(node.body and isinstance(node.body[0], Expr)): return None node = node.body[0].value if isinstance(node, Str): text = node.s elif isinstance(node, Constant) and isinstance(node.value, str): text = node.value else: return None if clean: import inspect text = inspect.cleandoc(text) return text
python3.10.4/Lib/ast.py
193
XX-Net
{ "docstring": "\n Return the docstring for the given node or None if no docstring can\n be found. If the node provided does not have docstrings a TypeError\n will be raised.\n\n If *clean* is `True`, all tabs are expanded to spaces and any whitespace\n that can be uniformly removed from the second line onwards is removed.\n ", "language": "en", "n_whitespaces": 73, "n_words": 53, "vocab_size": 43 }
54
Python
39
8198943edd73a363c266633e1aa5b2a9e9c9f526
ast.py
220,220
16
124
get_docstring
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
130
0
55,940
12
1
3
def get_success_url(self): return self.success_url
wagtail/contrib/forms/views.py
19
wagtail
{ "docstring": "Returns the success URL to redirect to after a successful deletion", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
4
Python
4
d10f15e55806c6944827d801cd9c2d53f5da4186
views.py
73,081
2
10
get_success_url
https://github.com/wagtail/wagtail.git
Reformat with black
18
0
15,949
6
1
3
def expand_basedirs(self): self._expand_attrs(['install_base', 'install_platbase', 'root'])
python3.10.4/Lib/distutils/command/install.py
36
XX-Net
{ "docstring": "Calls `os.path.expanduser` on install_base, install_platbase and\n root.", "language": "en", "n_whitespaces": 13, "n_words": 7, "vocab_size": 7 }
5
Python
5
8198943edd73a363c266633e1aa5b2a9e9c9f526
install.py
222,753
2
18
expand_basedirs
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
19
0
56,733
9
1
9
async def test_delete_nonsense_block_document(self, client, block_schemas): response = await client.get("/block_documents/not-even") assert response.status_code == status.HTTP_404_NOT_FOUND
tests/orion/api/test_block_documents.py
47
prefect
{ "docstring": "Regression test for an issue we observed in Cloud where a client made\n requests for /block_documents/null", "language": "en", "n_whitespaces": 22, "n_words": 16, "vocab_size": 15 }
13
Python
13
74b49c72657da5e18fc00c4b1da3012b575210cd
test_block_documents.py
58,678
3
27
test_delete_nonsense_block_document
https://github.com/PrefectHQ/prefect.git
Prevent non-UUID slugs from raising errors on the BlockDocuments APIs. (#6541) In Prefect Cloud, we observed some errors when clients would send requests for `.../block_documents/null`, which should really be handled at the routing layer with 404s when the path UUIDs can't be parsed. Note that this is just correcting the server-side issue, but does not attempt to diagnose the client-side issue at this time. Also, this does not attempt to go through every route in Orion that includes UUIDs in its path.
34
0
11,797
10
1
31
def test_cli_backfill_depends_on_past_backwards(self, mock_run): dag_id = 'test_depends_on_past' start_date = DEFAULT_DATE + timedelta(days=1) end_date = start_date + timedelta(days=1) args = [ 'dags', 'backfill', dag_id, '--local', '--start-date', start_date.isoformat(), '--end-date', end_date.isoformat(), '--ignore-first-depends-on-past', '--run-backwards', ] dag = self.dagbag.get_dag(dag_id) dag_command.dag_backfill(self.parser.parse_args(args), dag=dag) mock_run.assert_called_once_with( start_date=start_date, end_date=end_date, conf=None, delay_on_limit_secs=1.0, donot_pickle=False, ignore_first_depends_on_past=True, ignore_task_deps=False, local=True, mark_success=False, pool=None, rerun_failed_tasks=False, run_backwards=True, verbose=False, continue_on_failures=False, )
tests/cli/commands/test_dag_command.py
230
airflow
{ "docstring": "\n Test that CLI respects -B argument and raises on interaction with depends_on_past\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
51
Python
44
3849b4e709acfc9e85496aa2dededb2dae117fc7
test_dag_command.py
46,734
34
153
test_cli_backfill_depends_on_past_backwards
https://github.com/apache/airflow.git
support for continue backfill on failures (#22697)
385
0
8,976
10
1
4
def _api_status(self): response.status = 200 return None
glances/outputs/glances_bottle.py
24
glances
{ "docstring": "Glances API RESTful implementation.\n\n Return a 200 status code.\n This entry point should be used to check the API health.\n\n See related issue: Web server health check endpoint #1988\n ", "language": "en", "n_whitespaces": 58, "n_words": 29, "vocab_size": 27 }
7
Python
7
8d4a20a6a843e1e35b5324bc83be422fbed04b87
glances_bottle.py
69,841
3
13
_api_status
https://github.com/nicolargo/glances.git
Web server status check endpoint #1988
28
0
15,108
7
4
20
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {"channels_first", "channels_last"}: raise ValueError("Unknown data_format: " + str(data_format)) if data_format == "channels_first": pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])] else: pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]] return tf.compat.v1.pad(x, pattern) @keras_export("keras.backend.spatial_3d_padding") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
keras/backend.py
278
@keras_export("keras.backend.spatial_3d_padding") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
keras
{ "docstring": "Pads the 2nd and 3rd dimensions of a 4D tensor.\n\n Args:\n x: Tensor or variable.\n padding: Tuple of 2 tuples, padding pattern.\n data_format: One of `channels_last` or `channels_first`.\n\n Returns:\n A padded 4D tensor.\n\n Raises:\n ValueError: if `data_format` is neither\n `channels_last` or `channels_first`.\n ", "language": "en", "n_whitespaces": 100, "n_words": 42, "vocab_size": 34 }
65
Python
45
84afc5193d38057e2e2badf9c889ea87d80d8fbf
backend.py
269,439
27
165
spatial_2d_padding
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
117
1
80,078
13
1
5
def is_container(self) -> bool: return self.styles.layout is not None
src/textual/widget.py
29
textual
{ "docstring": "Check if this widget is a container (contains other widgets)\n\n Returns:\n bool: True if this widget is a container.\n ", "language": "en", "n_whitespaces": 44, "n_words": 19, "vocab_size": 14 }
9
Python
9
025a0e348d3d3c360498f4f2035451d50f79b40e
widget.py
182,594
7
17
is_container
https://github.com/Textualize/textual.git
Scrolling working
23
0
43,875
8
2
5
def available(self) -> bool: return self._device is not None and self._device.profile_device.available
homeassistant/components/dlna_dms/dms.py
38
core
{ "docstring": "Device is available when we have a connection to it.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
11
Python
11
b19bf9b147f4321e89d1f7f01e68337f2102f460
dms.py
292,448
3
23
available
https://github.com/home-assistant/core.git
Add dlna_dms integration to support DLNA Digital Media Servers (#66437)
25
0
91,534
9
4
28
def test_co_positions_artificial_instructions(self): import dis namespace = {} exec(textwrap.dedent(), namespace) exc = namespace['exc'] traceback = exc.__traceback__ code = traceback.tb_frame.f_code artificial_instructions = [] for instr, positions in zip( dis.get_instructions(code), code.co_positions(), strict=True ): # If any of the positions is None, then all have to # be None as well for the case above. There are still # some places in the compiler, where the artificial instructions # get assigned the first_lineno but they don't have other positions. # There is no easy way of inferring them at that stage, so for now # we don't support it. self.assertTrue(positions.count(None) in [0, 4]) if not any(positions): artificial_instructions.append(instr) self.assertEqual( [ (instruction.opname, instruction.argval) for instruction in artificial_instructions ], [ ("PUSH_EXC_INFO", None), ("LOAD_CONST", None), # artificial 'None' ("STORE_NAME", "e"), # XX: we know the location for this ("DELETE_NAME", "e"), ("RERAISE", 1), ("COPY", 3), ("POP_EXCEPT", None), ("RERAISE", 1) ] )
Lib/test/test_code.py
278
cpython
{ "docstring": "\\\n try:\n 1/0\n except Exception as e:\n exc = e\n ", "language": "en", "n_whitespaces": 53, "n_words": 10, "vocab_size": 10 }
142
Python
105
a94461d7189d7f1147ab304a332c8684263dc17e
test_code.py
175,178
37
169
test_co_positions_artificial_instructions
https://github.com/python/cpython.git
bpo-46202: Remove opcode POP_EXCEPT_AND_RERAISE (GH-30302) * bpo-46202: remove opcode POP_EXCEPT_AND_RERAISE * do not assume that an exception group is truthy
549
0
41,563
12
1
8
def get_assessment_criteria(course): return frappe.get_all( "Course Assessment Criteria", fields=["assessment_criteria", "weightage"], filters={"parent": course}, order_by="idx", ) @frappe.whitelist()
erpnext/education/api.py
72
@frappe.whitelist()
erpnext
{ "docstring": "Returns Assessmemt Criteria and their Weightage from Course Master.\n\n\t:param Course: Course\n\t", "language": "en", "n_whitespaces": 10, "n_words": 12, "vocab_size": 11 }
14
Python
14
494bd9ef78313436f0424b918f200dab8fc7c20b
api.py
65,846
7
34
get_assessment_criteria
https://github.com/frappe/erpnext.git
style: format code with black
6
1
14,035
11
5
12
def get_cloud_syncer(local_dir, remote_dir=None, sync_function=None) -> CloudSyncer: key = (local_dir, remote_dir) if key in _syncers: return _syncers[key] if not remote_dir: _syncers[key] = CloudSyncer(local_dir, remote_dir, NOOP) return _syncers[key] if sync_function == "auto": sync_function = None # Auto-detect # Maybe get user-provided sync client here client = get_sync_client(sync_function) if client: # If the user provided a sync template or function _syncers[key] = CloudSyncer(local_dir, remote_dir, client) else: # Else, get default cloud sync client (e.g. S3 syncer) sync_client = get_cloud_sync_client(remote_dir) _syncers[key] = CloudSyncer(local_dir, remote_dir, sync_client) return _syncers[key]
python/ray/tune/syncer.py
174
ray
{ "docstring": "Returns a Syncer.\n\n This syncer is in charge of syncing the local_dir with upload_dir.\n\n If no ``remote_dir`` is provided, it will return a no-op syncer.\n\n If a ``sync_function`` is provided, it will return a CloudSyncer using\n a custom SyncClient initialized by the sync function. Otherwise it will\n return a CloudSyncer with default templates for s3/gs/hdfs.\n\n Args:\n local_dir (str): Source directory for syncing.\n remote_dir (str): Target directory for syncing. If not provided, a\n no-op Syncer is returned.\n sync_function (func | str): Function for syncing the local_dir to\n remote_dir. If string, then it must be a string template for\n syncer to run. If not provided, it defaults\n to standard S3, gsutil or HDFS sync commands.\n\n Raises:\n ValueError if malformed remote_dir.\n ", "language": "en", "n_whitespaces": 214, "n_words": 118, "vocab_size": 72 }
83
Python
53
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
syncer.py
132,338
38
111
get_cloud_syncer
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
177
0
29,744
11
8
19
def assemble_files_to_ship(complete_file_list): # All files which are in the repository except these: ignore_patterns = ( # Developer-only tools '.azure-pipelines/*', '.github/*', '.github/*/*', 'changelogs/fragments/*', 'hacking/backport/*', 'hacking/azp/*', 'hacking/tests/*', 'hacking/ticket_stubs/*', 'test/sanity/code-smell/botmeta.*', 'test/sanity/code-smell/release-names.*', 'test/utils/*', 'test/utils/*/*', 'test/utils/*/*/*', 'test/results/.tmp/*', 'test/results/.tmp/*/*', 'test/results/.tmp/*/*/*', 'test/results/.tmp/*/*/*/*', 'test/results/.tmp/*/*/*/*/*', '.git*', ) ignore_files = frozenset(( # Developer-only tools 'changelogs/config.yaml', 'hacking/README.md', 'hacking/ansible-profile', 'hacking/cgroup_perf_recap_graph.py', 'hacking/create_deprecated_issues.py', 'hacking/deprecated_issue_template.md', 'hacking/create_deprecation_bug_reports.py', 'hacking/fix_test_syntax.py', 'hacking/get_library.py', 'hacking/metadata-tool.py', 'hacking/report.py', 'hacking/return_skeleton_generator.py', 'hacking/test-module', 'test/support/README.md', 'test/lib/ansible_test/_internal/commands/sanity/bin_symlinks.py', 'test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py', '.cherry_picker.toml', '.mailmap', # Generated as part of a build step 'docs/docsite/rst/conf.py', 'docs/docsite/rst/index.rst', # Possibly should be included 'examples/scripts/uptime.py', 'examples/scripts/my_test.py', 'examples/scripts/my_test_info.py', 'examples/scripts/my_test_facts.py', 'examples/DOCUMENTATION.yml', 'examples/play.yml', 'examples/hosts.yaml', 'examples/hosts.yml', 'examples/inventory_script_schema.json', 'examples/plugin_filters.yml', 'hacking/env-setup', 'hacking/env-setup.fish', 'MANIFEST', 'setup.cfg', # docs for test files not included in sdist 'docs/docsite/rst/dev_guide/testing/sanity/bin-symlinks.rst', 'docs/docsite/rst/dev_guide/testing/sanity/botmeta.rst', 'docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst', 'docs/docsite/rst/dev_guide/testing/sanity/release-names.rst', )) # These files are generated and then intentionally added to the sdist # Manpages ignore_script = ('ansible-connection', 'ansible-test') manpages = ['docs/man/man1/ansible.1'] for dirname, dummy, files in os.walk('bin'): for filename in files: if filename in ignore_script: continue manpages.append('docs/man/man1/%s.1' % filename) # Misc misc_generated_files = [ 'PKG-INFO', ] shipped_files = manpages + misc_generated_files for path in complete_file_list: if path not in ignore_files: for ignore in ignore_patterns: if fnmatch.fnmatch(path, ignore): break else: shipped_files.append(path) return shipped_files
test/sanity/code-smell/package-data.py
423
ansible
{ "docstring": "\n This looks for all files which should be shipped in the sdist\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 12 }
177
Python
136
353511a900f6216a25a25d8a36528f636428b57b
package-data.py
266,979
81
224
assemble_files_to_ship
https://github.com/ansible/ansible.git
Add script to handle more deprecations. (#77400) * Add script to handle more deprecations. This script currently supports deprecations from the following sanity tests: * deprecated-config * update-bundled * Ignore script in package-data test.
791
0
78,675
14
3
25
def _solve_svd_design_matrix(self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y): w = ((singvals_sq + alpha) ** -1) - (alpha**-1) if self.fit_intercept: # detect intercept column normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw) intercept_dim = _find_smallest_angle(normalized_sw, U) # cancel the regularization for the intercept w[intercept_dim] = -(alpha**-1) c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha**-1) * y G_inverse_diag = self._decomp_diag(w, U) + (alpha**-1) if len(y.shape) != 1: # handle case where y is 2-d G_inverse_diag = G_inverse_diag[:, np.newaxis] return G_inverse_diag, c
sklearn/linear_model/_ridge.py
220
scikit-learn
{ "docstring": "Compute dual coefficients and diagonal of G^-1.\n\n Used when we have an SVD decomposition of X\n (n_samples > n_features and X is dense).\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 20 }
76
Python
57
1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe
_ridge.py
258,898
11
143
_solve_svd_design_matrix
https://github.com/scikit-learn/scikit-learn.git
MNT Update black to stable version (#22474)
202
0
75,474
12
2
5
async def _send_server_info_to_all(self) -> None: for client_handler in self.clients: await self.send_server_info(client_handler)
src/textual/devtools/service.py
39
textual
{ "docstring": "Add `server_info` message to the queues of every client", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
11
Python
11
a72e347ed99333a090377ee438eaf63477cbf98b
service.py
182,902
4
22
_send_server_info_to_all
https://github.com/Textualize/textual.git
Seperate server and client handling logic into classes for devtools
36
0
44,001
10
1
16
def test_basic_discovery(self): with os_helper.temp_cwd(): os.mkdir('foo') file1 = os.path.join('foo', 'file1.txt') os_helper.create_empty_file(file1) os.mkdir('bar') file2 = os.path.join('bar', 'file2.txt') os_helper.create_empty_file(file2) expected = [file2, file1] self.assertEqual(sorted(filelist.findall()), expected)
python3.10.4/Lib/distutils/tests/test_filelist.py
149
XX-Net
{ "docstring": "\n When findall is called with no parameters or with\n '.' as the parameter, the dot should be omitted from\n the results.\n ", "language": "en", "n_whitespaces": 50, "n_words": 21, "vocab_size": 18 }
22
Python
20
8198943edd73a363c266633e1aa5b2a9e9c9f526
test_filelist.py
223,221
10
83
test_basic_discovery
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
124
0
56,865
13
6
27
def _total_stats(self) -> Dict[str, Union[str, int, float]]: logger.debug("Compiling Totals") elapsed = 0 examples = 0 iterations = 0 batchset = set() total_summaries = len(self._per_session_stats) for idx, summary in enumerate(self._per_session_stats): if idx == 0: starttime = summary["start"] if idx == total_summaries - 1: endtime = summary["end"] elapsed += summary["elapsed"] examples += ((summary["batch"] * 2) * summary["iterations"]) batchset.add(summary["batch"]) iterations += summary["iterations"] batch = ",".join(str(bs) for bs in batchset) totals = {"session": "Total", "start": starttime, "end": endtime, "elapsed": elapsed, "rate": examples / elapsed if elapsed != 0 else 0, "batch": batch, "iterations": iterations} logger.debug(totals) return totals
lib/gui/analysis/stats.py
311
faceswap
{ "docstring": " Compile the Totals stats.\n Totals are fully calculated each time as they will change on the basis of the training\n session.\n\n Returns\n -------\n dict\n The Session name, start time, end time, elapsed time, rate, batch size and number of\n iterations for all session ids within the loaded data.\n ", "language": "en", "n_whitespaces": 113, "n_words": 48, "vocab_size": 41 }
93
Python
65
47867a0dd424b3e31d7beead0ffdb8b37c970a9e
stats.py
101,819
36
184
_total_stats
https://github.com/deepfakes/faceswap.git
typing: lib.gui.analysis.stats
375
0
21,206
14
1
2
def json_deserialize(message):
scripts/ws_client.py
13
freqtrade
{ "docstring": "\n Deserialize JSON to a dict\n :param message: The message to deserialize\n ", "language": "en", "n_whitespaces": 21, "n_words": 11, "vocab_size": 10 }
2
Python
2
3e08c6e5409d3e1b9c6f787415869e3e49289a00
ws_client.py
151,437
4
21
json_deserialize
https://github.com/freqtrade/freqtrade.git
testing/debugging ws client script
5
0
35,014
6
2
4
def apply(func, args, kwargs=None): if kwargs: return func(*args, **kwargs) else: return func(*args)
dask/utils.py
53
dask
{ "docstring": "Apply a function given its positional and keyword arguments.\n\n Equivalent to ``func(*args, **kwargs)``\n Most Dask users will never need to use the ``apply`` function.\n It is typically only used by people who need to inject\n keyword argument values into a low level Dask task graph.\n\n Parameters\n ----------\n func : callable\n The function you want to apply.\n args : tuple\n A tuple containing all the positional arguments needed for ``func``\n (eg: ``(arg_1, arg_2, arg_3)``)\n kwargs : dict, optional\n A dictionary mapping the keyword arguments\n (eg: ``{\"kwarg_1\": value, \"kwarg_2\": value}``\n\n Examples\n --------\n >>> from dask.utils import apply\n >>> def add(number, second_number=5):\n ... return number + second_number\n ...\n >>> apply(add, (10,), {\"second_number\": 2}) # equivalent to add(*args, **kwargs)\n 12\n\n >>> task = apply(add, (10,), {\"second_number\": 2})\n >>> dsk = {'task-name': task} # adds the task to a low level Dask task graph\n ", "language": "en", "n_whitespaces": 240, "n_words": 139, "vocab_size": 100 }
12
Python
11
e61405cb5d345e73f1952ee3d50708566b5263d1
utils.py
156,832
5
32
apply
https://github.com/dask/dask.git
Docs: how to use kwargs with custom task graphs (#9322)
35
0
36,780
11
2
4
def preferred_index(self): if self._get_preferred_index(): return self.args[1] else: return self.args[0]
sympy/functions/special/tensor_functions.py
49
sympy
{ "docstring": "\n Returns the index which is preferred to keep in the final expression.\n\n Explanation\n ===========\n\n The preferred index is the index with more information regarding fermi\n level. If indices contain the same information, 'a' is preferred before\n 'b'.\n\n Examples\n ========\n\n >>> from sympy import KroneckerDelta, Symbol\n >>> a = Symbol('a', above_fermi=True)\n >>> i = Symbol('i', below_fermi=True)\n >>> j = Symbol('j', below_fermi=True)\n >>> p = Symbol('p')\n >>> KroneckerDelta(p, i).preferred_index\n i\n >>> KroneckerDelta(p, a).preferred_index\n a\n >>> KroneckerDelta(i, j).preferred_index\n i\n\n See Also\n ========\n\n killable_index\n\n ", "language": "en", "n_whitespaces": 242, "n_words": 80, "vocab_size": 55 }
9
Python
8
498015021131af4dbb07eb110e5badaba8250c7b
tensor_functions.py
196,258
5
29
preferred_index
https://github.com/sympy/sympy.git
Updated import locations
52
0
47,758
10
2
17
def isoformat(self, sep='T', timespec='auto'): s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) + _format_time(self._hour, self._minute, self._second, self._microsecond, timespec)) off = self.utcoffset() tz = _format_offset(off) if tz: s += tz return s
python3.10.4/Lib/datetime.py
121
XX-Net
{ "docstring": "Return the time formatted according to ISO.\n\n The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'.\n By default, the fractional part is omitted if self.microsecond == 0.\n\n If self.tzinfo is not None, the UTC offset is also attached, giving\n giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.\n\n Optional argument sep specifies the separator between date and\n time, default 'T'.\n\n The optional argument timespec specifies the number of additional\n terms of the time to include. Valid options are 'auto', 'hours',\n 'minutes', 'seconds', 'milliseconds' and 'microseconds'.\n ", "language": "en", "n_whitespaces": 151, "n_words": 81, "vocab_size": 62 }
31
Python
26
8198943edd73a363c266633e1aa5b2a9e9c9f526
datetime.py
222,344
9
77
isoformat
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
121
0
56,543
11
1
13
async def test_cuda_visible_devices(self, job_manager): run_cmd = f"python {_driver_script_path('check_cuda_devices.py')}" job_id = job_manager.submit_job(entrypoint=run_cmd) await async_wait_for_condition( check_job_succeeded, job_manager=job_manager, job_id=job_id ) @pytest.mark.asyncio
dashboard/modules/job/tests/test_job_manager.py
79
@pytest.mark.asyncio
ray
{ "docstring": "Check CUDA_VISIBLE_DEVICES behavior.\n\n Should not be set in the driver, but should be set in tasks.\n ", "language": "en", "n_whitespaces": 30, "n_words": 16, "vocab_size": 13 }
18
Python
17
4c1f27118a3af246006ab63325cdff53321bf68b
test_job_manager.py
139,339
6
35
test_cuda_visible_devices
https://github.com/ray-project/ray.git
[job submission] Don't set CUDA_VISIBLE_DEVICES in job driver (#24546) Currently job drivers cannot use GPUs due to `CUDA_VISIBLE_DEVICES` being set (no resource request for job driver's supervisor actor). This is a regression from `ray submit`. This is a temporary workaround -- in the future we should support a resource request for the job supervisor actor.
63
1
31,668
11
1
8
def query_put_bounders(query, partition_column, start, end): where = " WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}".format( partition_column, start, end ) query_with_bounders = "SELECT * FROM ({0}) AS TMP_TABLE {1}".format(query, where) return query_with_bounders
modin/experimental/core/execution/unidist/implementations/pandas_on_unidist/io/sql.py
58
modin
{ "docstring": "\n Put partition boundaries into the query.\n\n Parameters\n ----------\n query : str\n SQL query string.\n partition_column : str\n Column name used for data partitioning between the workers.\n start : int\n Lowest value to request from the `partition_column`.\n end : int\n Highest value to request from the `partition_column`.\n\n Returns\n -------\n str\n Query string with boundaries.\n ", "language": "en", "n_whitespaces": 122, "n_words": 53, "vocab_size": 38 }
32
Python
27
193505fdf0c984743397ba3df56262f30aee13a8
sql.py
155,212
6
36
query_put_bounders
https://github.com/modin-project/modin.git
FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059) Signed-off-by: Igoshev, Iaroslav <[email protected]>
54
0
36,303
9
4
14
def resize_feats(self, feats): out = [] for i in range(len(feats)): if i == 0: out.append( F.interpolate( feats[0], size=feats[i + 1].shape[-2:], mode='bilinear', align_corners=False)) elif i == len(feats) - 1: out.append( F.interpolate( feats[i], size=feats[i - 1].shape[-2:], mode='bilinear', align_corners=False)) else: out.append(feats[i]) return out
mmdet/models/dense_heads/solo_head.py
198
mmdetection
{ "docstring": "Downsample the first feat and upsample last feat in feats.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
40
Python
29
d18cdb140ef3cb9ed5fdef6f1a815f5836f1b1ab
solo_head.py
244,278
20
127
resize_feats
https://github.com/open-mmlab/mmdetection.git
[Feature] Support SOLOv2 (#7441) * solov2 init * solov2 r18 lightweight * add model docstrings and reformat the code * add docstrings to model method * add solov2 big model config and correct some errors in the docstring * fix linting issues * refactor code and configs * rename variables according to the convention * add and enhance solov2 logic * add doc strings * update solov2 config files * fix norm_cfg in mask head * minor fix * update configs Co-authored-by: BIGWangYuDong <[email protected]>
368
0
70,305
19
2
4
def top_widget(self): if self.overlay: return self.overlay return self.top_window()
mitmproxy/tools/console/window.py
37
mitmproxy
{ "docstring": "\n The current top widget - either a window or the active overlay.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
8
Python
7
b3587b52b25077f68116b9852b041d33e7fc6601
window.py
251,580
4
21
top_widget
https://github.com/mitmproxy/mitmproxy.git
make it black!
40
0
73,795
8
1
21
def get_config(self): json_word_counts = json.dumps(self.word_counts) json_word_docs = json.dumps(self.word_docs) json_index_docs = json.dumps(self.index_docs) json_word_index = json.dumps(self.word_index) json_index_word = json.dumps(self.index_word) return { "num_words": self.num_words, "filters": self.filters, "lower": self.lower, "split": self.split, "char_level": self.char_level, "oov_token": self.oov_token, "document_count": self.document_count, "word_counts": json_word_counts, "word_docs": json_word_docs, "index_docs": json_index_docs, "index_word": json_index_word, "word_index": json_word_index, }
keras/preprocessing/text.py
203
keras
{ "docstring": "Returns the tokenizer configuration as Python dictionary.\n\n The word count dictionaries used by the tokenizer get serialized\n into plain JSON, so that the configuration can be read by other\n projects.\n\n Returns:\n A Python dictionary with the tokenizer configuration.\n ", "language": "en", "n_whitespaces": 84, "n_words": 38, "vocab_size": 30 }
44
Python
40
84afc5193d38057e2e2badf9c889ea87d80d8fbf
text.py
275,783
20
121
get_config
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
232
0
81,462
9
6
24
def write_file(masvs_file, input_file, output_file): # enhanced_masvs_dict = {} # for file in Path('masvs_yaml').glob('*.yaml'): # masvs_dict = yaml.load(open(file)) # enhanced_masvs_dict[MASVS_TITLES[file.stem]] = masvs_dict masvs = yaml.safe_load(open(masvs_file)) testcases_info = [] for file in Path(input_file).glob("*.html"): contents = file.read_text() chapter = BeautifulSoup(contents, "lxml") # print(get_links_to_other_chapters(chapter)) # print(get_all_links_to_tools(chapter)) # print(get_links_to_tools_per_section(chapter)) testcases_info += get_testcases_info(f"{file.stem}.md", chapter) # print_yaml(testcases_info) # print(get_sections_plain_text(chapter, "overview")) # print(get_sections_innerHtml(chapter, "overview")) for tc in testcases_info: for id in tc["mstg_ids"]: if masvs.get(id): # masvs[id].update(tc) masvs_req = masvs[id] if not masvs_req.get("links"): masvs_req["links"] = [] masvs_req["links"].append(tc["link"]) # masvs_dict[id]['solution'].append(tc['overview']) # todo # print_yaml(masvs) write_yaml_file(output_file, masvs)
tools/scripts/parse_html.py
227
owasp-mastg
{ "docstring": "\n Parses the MSTG and completes the MASVS file with information from the MSTG.\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 11 }
86
Python
53
a4c1ff1391bfa45b78db5473d1df4a1ace6651f5
parse_html.py
191,744
15
123
write_file
https://github.com/OWASP/owasp-mastg.git
Generate MSTG Checklists automatically and machine-readable YAML (#2010) * port masvs checklist generation to the mstg * add recursive ls * fix Tools -> tools * generate MSTG html * checkout latest masvs tag * use GITHUB_ENV instead of steps.output * add MASVS and MSTG link including versions and commit IDs * add new logo * set avenir as main font * add column status with validation * add conditional formatting for pass, fail and n/a * add step Show openpyxl Version * try format only relevant status cells * create new About sheet with the same header * add intro to project * black and flake8 fixes
298
0
46,842
16
4
6
def _signature_get_bound_param(spec): assert spec.startswith('($') pos = spec.find(',') if pos == -1: pos = spec.find(')') cpos = spec.find(':') assert cpos == -1 or cpos > pos cpos = spec.find('=') assert cpos == -1 or cpos > pos return spec[2:pos]
python3.10.4/Lib/inspect.py
134
XX-Net
{ "docstring": " Private helper to get first parameter name from a\n __text_signature__ of a builtin method, which should\n be in the following format: '($param1, ...)'.\n Assumptions are that the first argument won't have\n a default value or an annotation.\n ", "language": "en", "n_whitespaces": 53, "n_words": 37, "vocab_size": 33 }
38
Python
19
8198943edd73a363c266633e1aa5b2a9e9c9f526
inspect.py
218,383
10
76
_signature_get_bound_param
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
72
0
55,271
11
1
5
def upgrade(): with op.get_context().autocommit_block(): op.execute( ) op.execute( ) op.execute( ) op.execute( )
src/prefect/orion/database/migrations/versions/postgresql/2022_06_04_133535_d60c18774a5d_add_indexes_for_partial_name_matches.py
77
prefect
{ "docstring": "\n CREATE INDEX CONCURRENTLY \n trgm_ix_flow_name \n ON flow USING gin (name gin_trgm_ops);\n \n CREATE INDEX CONCURRENTLY \n trgm_ix_flow_run_name \n ON flow_run USING gin (name gin_trgm_ops);\n \n CREATE INDEX CONCURRENTLY \n trgm_ix_task_run_name \n ON task_run USING gin (name gin_trgm_ops);\n \n CREATE INDEX CONCURRENTLY \n trgm_ix_deployment_name \n ON deployment USING gin (name gin_trgm_ops);\n ", "language": "en", "n_whitespaces": 228, "n_words": 40, "vocab_size": 16 }
12
Python
6
b5b3d808bf059294a7adf17156e4ccdb5a3799da
2022_06_04_133535_d60c18774a5d_add_indexes_for_partial_name_matches.py
56,199
30
39
upgrade
https://github.com/PrefectHQ/prefect.git
Add index migrations
118
0
11,462
11
1
8
def write_exports(self, exports): rf = self.get_distinfo_file(EXPORTS_FILENAME) with open(rf, 'w') as f: write_exports(exports, f)
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
57
transferlearning
{ "docstring": "\n Write a dictionary of exports to a file in .ini format.\n :param exports: A dictionary of exports, mapping an export category to\n a list of :class:`ExportEntry` instances describing the\n individual export entries.\n ", "language": "en", "n_whitespaces": 100, "n_words": 32, "vocab_size": 25 }
13
Python
13
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
database.py
61,961
4
32
write_exports
https://github.com/jindongwang/transferlearning.git
upd; format
45
0
12,781
11
1
3
def pause_writing(self): self._app_protocol.pause_writing()
python3.10.4/Lib/asyncio/sslproto.py
25
XX-Net
{ "docstring": "Called when the low-level transport's buffer goes over\n the high-water mark.\n ", "language": "en", "n_whitespaces": 25, "n_words": 11, "vocab_size": 10 }
3
Python
3
8198943edd73a363c266633e1aa5b2a9e9c9f526
sslproto.py
220,708
2
13
pause_writing
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
17
0
56,095
8
5
11
def local_process_index(self): if is_torch_tpu_available(): return xm.get_local_ordinal() elif is_sagemaker_mp_enabled(): return smp.local_rank() elif is_sagemaker_dp_enabled(): return dist.get_rank() elif self.local_rank != -1: return self.local_rank return 0
src/transformers/training_args.py
92
transformers
{ "docstring": "\n The index of the local process used.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
22
Python
15
81ac45f85c35244831f11f73c09ea10eee4f953a
training_args.py
36,752
10
53
local_process_index
https://github.com/huggingface/transformers.git
update smddp api to v1.4.0 (#16371) * update smddp api to v1.4.0 * Update src/transformers/trainer.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/trainer.py Co-authored-by: Sylvain Gugger <[email protected]> * address comments * fix style * remove unused import * fix indent * disable style check for import * fix space Co-authored-by: Sylvain Gugger <[email protected]>
108
0
6,671
10
1
8
async def test_only_one_lock(hass, client, lock_home_connect_620, integration): assert len(hass.states.async_entity_ids("lock")) == 1
tests/components/zwave_js/test_lock.py
44
core
{ "docstring": "Test node with both Door Lock and Lock CC values only gets one lock entity.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
10
Python
10
9d14201b13be4f5a5cc5e5f52bba56bfd8fa9694
test_lock.py
294,590
2
26
test_only_one_lock
https://github.com/home-assistant/core.git
Don't create two zwave_js.lock entities for a single device (#68651)
16
0
93,624
11
1
6
def axis_1(request): return request.param @pytest.fixture(params=[True, False, None])
pandas/conftest.py
41
@pytest.fixture(params=[True, False, None])
pandas
{ "docstring": "\n Fixture for returning aliases of axis 1 of a DataFrame.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 9 }
7
Python
7
15a06d3d9e7656afff239da7a295a7b684456680
conftest.py
164,912
2
10
axis_1
https://github.com/pandas-dev/pandas.git
BUG: groupby.size and groupby.transform('size') incorrect for axis=1 (#45987)
12
1
39,619
8
12
44
def list_summaries(logdir): result = _SummaryFile() for (dirpath, _, filenames) in os.walk(logdir): for filename in filenames: if not filename.startswith("events.out."): continue path = os.path.join(dirpath, filename) for event in tf.compat.v1.train.summary_iterator(path): if event.graph_def: result.graph_defs.append(event.graph_def) if not event.summary: # (e.g., it's a `graph_def` event) continue for value in event.summary.value: tag = value.tag # Case on the `value` rather than the summary metadata because # the Keras callback uses `summary_ops_v2` to emit old-style # summaries. See b/124535134. kind = value.WhichOneof("value") container = { "simple_value": result.scalars, "image": result.images, "histo": result.histograms, "tensor": result.tensors, }.get(kind) if container is None: raise ValueError( "Unexpected summary kind %r in event file %s:\n%r" % (kind, path, event) ) elif kind == "tensor" and tag != "keras": # Convert the tf2 summary proto to old style for type checking. plugin_name = value.metadata.plugin_data.plugin_name container = { "images": result.images, "histograms": result.histograms, "scalars": result.scalars, }.get(plugin_name) if container is not None: result.convert_from_v2_summary_proto = True else: container = result.tensors container.add(_ObservedSummary(logdir=dirpath, tag=tag)) return result @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes(always_skip_v1=True)
keras/callbacks_test.py
426
@test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes(always_skip_v1=True)
keras
{ "docstring": "Read all summaries under the logdir into a `_SummaryFile`.\n\n Args:\n logdir: A path to a directory that contains zero or more event\n files, either as direct children or in transitive subdirectories.\n Summaries in these events must only contain old-style scalars,\n images, and histograms. Non-summary events, like `graph_def`s, are\n ignored.\n\n Returns:\n A `_SummaryFile` object reflecting all summaries written to any\n event files in the logdir or any of its descendant directories.\n\n Raises:\n ValueError: If an event file contains an summary of unexpected kind.\n ", "language": "en", "n_whitespaces": 142, "n_words": 82, "vocab_size": 65 }
156
Python
107
84afc5193d38057e2e2badf9c889ea87d80d8fbf
callbacks_test.py
269,998
39
245
list_summaries
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
960
1
80,375
22
1
2
def dash(self): return self["dash"]
packages/python/plotly/plotly/graph_objs/contour/_line.py
22
plotly.py
{ "docstring": "\n Sets the dash style of lines. Set to a dash type string\n (\"solid\", \"dot\", \"dash\", \"longdash\", \"dashdot\", or\n \"longdashdot\") or a dash length list in px (eg\n \"5px,10px,2px,2px\").\n\n The 'dash' property is an enumeration that may be specified as:\n - One of the following dash styles:\n ['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']\n - A string containing a dash length list in pixels or percentages\n (e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 192, "n_words": 80, "vocab_size": 65 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_line.py
229,539
2
11
dash
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
61,212
7
8
17
def _get_state(self): result = {} for child_attr, child_obj in self.__dict__.items(): # TODO(rchao): Store non-variable states in the dict as well. if isinstance(child_obj, tf.Variable): result[child_attr] = child_obj.numpy() elif saving_lib.is_container(child_obj): for k, contained_obj in enumerate(child_obj): if isinstance(contained_obj, tf.Variable): # Handling the case where `child_obj` is a list/tuple. result[f"{child_attr}-{k}"] = contained_obj.numpy() elif isinstance(child_obj, dict) and isinstance( child_obj[contained_obj], tf.Variable ): # Handling the case where `child_obj` is a dict. result[f"{child_attr}-{contained_obj}"] = child_obj[ contained_obj ].numpy() return result
keras/engine/base_layer.py
205
keras
{ "docstring": "Experimental method for getting the state of this layer object.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
72
Python
50
ba5086fa31d24a9f61b46d4a844311b58dea7ff1
base_layer.py
279,672
16
118
_get_state
https://github.com/keras-team/keras.git
Keras saving: A prototype of config-based (idempotent) saving and loading, with simple model state restoration added. It's done via the archive provided by `zipfile` package. Preliminary for review and the APIs and implementation are subject to changes. PiperOrigin-RevId: 470784761
385
0
83,087
19
19
19
def radius_of_convergence(self): if any(a.is_integer and (a <= 0) == True for a in self.ap + self.bq): aints = [a for a in self.ap if a.is_Integer and (a <= 0) == True] bints = [a for a in self.bq if a.is_Integer and (a <= 0) == True] if len(aints) < len(bints): return S.Zero popped = False for b in bints: cancelled = False while aints: a = aints.pop() if a >= b: cancelled = True break popped = True if not cancelled: return S.Zero if aints or popped: # There are still non-positive numerator parameters. # This is a polynomial. return oo if len(self.ap) == len(self.bq) + 1: return S.One elif len(self.ap) <= len(self.bq): return oo else: return S.Zero
sympy/functions/special/hyper.py
294
sympy
{ "docstring": "\n Compute the radius of convergence of the defining series.\n\n Explanation\n ===========\n\n Note that even if this is not ``oo``, the function may still be\n evaluated outside of the radius of convergence by analytic\n continuation. But if this is zero, then the function is not actually\n defined anywhere else.\n\n Examples\n ========\n\n >>> from sympy import hyper\n >>> from sympy.abc import z\n >>> hyper((1, 2), [3], z).radius_of_convergence\n 1\n >>> hyper((1, 2, 3), [4], z).radius_of_convergence\n 0\n >>> hyper((1, 2), (3, 4), z).radius_of_convergence\n oo\n\n ", "language": "en", "n_whitespaces": 207, "n_words": 80, "vocab_size": 54 }
118
Python
61
498015021131af4dbb07eb110e5badaba8250c7b
hyper.py
196,251
25
185
radius_of_convergence
https://github.com/sympy/sympy.git
Updated import locations
479
0
47,751
14
2
13
def get_ordered_amount(args, budget): item_code = args.get("item_code") condition = get_other_condition(args, budget, "Purchase Order") data = frappe.db.sql( .format( condition ), item_code, as_list=1, ) return data[0][0] if data else 0
erpnext/accounts/doctype/budget/budget.py
92
erpnext
{ "docstring": " select ifnull(sum(child.amount - child.billed_amt), 0) as amount\n\t\tfrom `tabPurchase Order Item` child, `tabPurchase Order` parent where\n\t\tparent.name = child.parent and child.item_code = %s and parent.docstatus = 1 and child.amount > child.billed_amt\n\t\tand parent.status != 'Closed' and {0}", "language": "en", "n_whitespaces": 34, "n_words": 37, "vocab_size": 30 }
27
Python
23
494bd9ef78313436f0424b918f200dab8fc7c20b
budget.py
64,812
14
59
get_ordered_amount
https://github.com/frappe/erpnext.git
style: format code with black
16
0
13,728
11
4
13
def set_color_by_t2c(self, t2c=None): t2c = t2c if t2c else self.t2c for word, color in list(t2c.items()): for start, end in self.find_indexes(word, self.text): self.chars[start:end].set_color(color)
manim/mobject/svg/text_mobject.py
96
manim
{ "docstring": "Internally used function. Sets color for specified strings.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
22
Python
18
540dc70d2fd7a2f759a6da158303ef81a1ae53f8
text_mobject.py
189,392
5
62
set_color_by_t2c
https://github.com/ManimCommunity/manim.git
Update `Text` to use new ManimPango color setting (#2341) * Find indexes in stripped text, not original text * Add regression test * Only run the test in linux environement * Rewrite text2settings in Text to set text color via pango * Make gradient in Text use pango coloring * Bump manimpango to newest version * Update test to use new frames_comparison * Don't remove svg file on exception * Bump manimpango * Fix pre-commit errors * Fix index bug * Deprecate no longer used functions set_color_by_t2x * Remove old commented out code * Update poetry.lock
69
0
46,033
13
2
7
def sleeper(self, duration): s = time() yield time_to_sleep = duration - (time() - s) if time_to_sleep > 0: self.wait(time_to_sleep)
src/streamlink/stream/dash.py
63
streamlink
{ "docstring": "\n Do something and then wait for a given duration minus the time it took doing something\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 15 }
19
Python
16
d1a8d1597d4fe9f129a72fe94c1508304b7eae0f
dash.py
187,407
6
36
sleeper
https://github.com/streamlink/streamlink.git
stream.dash: update DASHStreamWorker.iter_segments - Refactor DASHStreamWorker.iter_segments() - Replace dash_manifest.sleeper() with SegmentedStreamWorker.wait(), and make the worker thread shut down immediately on close(). - Prevent unnecessary wait times for static manifest types by calling close() after all segments were put into the writer's queue.
65
0
45,770
11
5
9
def _group_lengths(grouping): # The result from localeconv()['grouping'], and the input to this # function, should be a list of integers in one of the # following three forms: # # (1) an empty list, or # (2) nonempty list of positive integers + [0] # (3) list of positive integers + [locale.CHAR_MAX], or from itertools import chain, repeat if not grouping: return [] elif grouping[-1] == 0 and len(grouping) >= 2: return chain(grouping[:-1], repeat(grouping[-2])) elif grouping[-1] == _locale.CHAR_MAX: return grouping[:-1] else: raise ValueError('unrecognised format for grouping')
python3.10.4/Lib/_pydecimal.py
138
XX-Net
{ "docstring": "Convert a localeconv-style grouping into a (possibly infinite)\n iterable of integers representing group lengths.\n\n ", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 13 }
86
Python
62
8198943edd73a363c266633e1aa5b2a9e9c9f526
_pydecimal.py
219,711
10
79
_group_lengths
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
159
0
55,734
14
5
26
def expand(image, border=0, fill=0): left, top, right, bottom = _border(border) width = left + image.size[0] + right height = top + image.size[1] + bottom color = _color(fill, image.mode) if image.mode == "P" and image.palette: palette = ImagePalette.ImagePalette(palette=image.getpalette()) if isinstance(color, tuple): color = palette.getcolor(color) else: palette = None out = Image.new(image.mode, (width, height), color) if palette: out.putpalette(palette.palette) out.paste(image, (left, top)) return out
src/PIL/ImageOps.py
230
Pillow
{ "docstring": "\n Add border to the image\n\n :param image: The image to expand.\n :param border: Border width, in pixels.\n :param fill: Pixel fill value (a color value). Default is 0 (black).\n :return: An image.\n ", "language": "en", "n_whitespaces": 52, "n_words": 32, "vocab_size": 28 }
61
Python
45
279ddf4ce6c76498ac29df2552a3023b9aaa76c1
ImageOps.py
243,427
16
149
expand
https://github.com/python-pillow/Pillow.git
Use getpalette() in ImageOps
133
0
70,030
13
5
21
def merge_account(old, new, is_group, root_type, company): # Validate properties before merging if not frappe.db.exists("Account", new): throw(_("Account {0} does not exist").format(new)) val = list(frappe.db.get_value("Account", new, ["is_group", "root_type", "company"])) if val != [cint(is_group), root_type, company]: throw( _( ) ) if is_group and frappe.db.get_value("Account", new, "parent_account") == old: frappe.db.set_value( "Account", new, "parent_account", frappe.db.get_value("Account", old, "parent_account") ) frappe.rename_doc("Account", old, new, merge=1, force=1) return new @frappe.whitelist()
erpnext/accounts/doctype/account/account.py
248
@frappe.whitelist()
erpnext
{ "docstring": "Merging is only possible if following properties are same in both records. Is Group, Root Type, Company", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 17 }
61
Python
47
494bd9ef78313436f0424b918f200dab8fc7c20b
account.py
64,741
16
145
merge_account
https://github.com/frappe/erpnext.git
style: format code with black
44
1
13,713
14
11
16
def print_help(self): has_fund_start = "" if self.fund_symbol else "[unvl]" has_fund_end = "" if self.fund_symbol else "[/unvl]" has_fund_usa_start = ( "" if self.fund_symbol and self.country == "united states" else "[unvl]" ) has_fund_usa_end = ( "" if self.fund_symbol and self.country == "united states" else "[/unvl]" ) if self.fund_name: if self.fund_symbol: fund_string = f"{self.fund_name} ({self.fund_symbol})" else: fund_string = f"{self.fund_name}" else: fund_string = "" help_text = f if self.fund_symbol != "" and self.country == "sweden": help_text += console.print(text=help_text, menu="Mutual Funds")
gamestonk_terminal/mutual_funds/mutual_fund_controller.py
267
OpenBBTerminal
{ "docstring": "Print help\n[src][Investing.com][/src][cmds]\n country set a country for filtering[/cmds]\n\n[param]Current Country: [/param]{self.country.title()}\n\n[src][Investing.com][/src][cmds]\n overview overview of top funds by country\n search search for Mutual Funds\n load load historical fund data[/cmds]\n\n[param]Current Fund: [/param]{fund_string}\n{has_fund_start}\n[src][Investing.com][/src][cmds]\n info get fund information\n plot plot loaded historical fund data{has_fund_end}{has_fund_usa_start}\n[src][YFinance][/src]\n sector sector weightings\n equity equity holdings[/cmds]{has_fund_usa_end}\n \n[src][Avanza][/src]\n al_swe display fund allocation (sector, country, holdings)\n info_swe get fund information\n ", "language": "en", "n_whitespaces": 164, "n_words": 64, "vocab_size": 45 }
76
Python
35
493617752699ff4ab63a1ed9df478ac030e68492
mutual_fund_controller.py
282,826
43
115
print_help
https://github.com/OpenBB-finance/OpenBBTerminal.git
Add avanza mutual fund data and commands (#1452) * Adding info_se and al_swe commands * Linting * Linting * linting * Fixes * Fixes to formatting * Linting * Linting * Linting Co-authored-by: jmaslek <[email protected]> Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: didierlopes.eth <[email protected]>
257
0
84,315
14
1
11
def forward_dummy(self, img, img_metas): super(SingleStageDetector, self).forward_train(img, img_metas) x = self.extract_feat(img) outs = self.panoptic_head(x, img_metas) return outs
mmdet/models/detectors/maskformer.py
67
mmdetection
{ "docstring": "Used for computing network flops. See\n `mmdetection/tools/analysis_tools/get_flops.py`\n\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[Dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n ", "language": "en", "n_whitespaces": 179, "n_words": 61, "vocab_size": 55 }
16
Python
13
cac356380d505bf15587f07c0529218cc36b9652
maskformer.py
244,047
5
43
forward_dummy
https://github.com/open-mmlab/mmdetection.git
[Feature] Add Maskformer to mmdet (#7212) * first commit * add README * move model description from config to readme add description for binary_input add description for dice loss add a independent panoptic gt processing function add a independent panoptic gt processing function remove compatibility of pretrain in maskformer * update comments in maskformer_head * update docs format
51
0
70,214
9
1
4
def setDefaultWhitespaceChars(chars): r ParserElement.DEFAULT_WHITE_CHARS = chars
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
21
transferlearning
{ "docstring": "\n Overrides the default whitespace chars\n\n Example::\n\n # default whitespace chars are space, <TAB> and newline\n OneOrMore(Word(alphas)).parseString(\"abc def\\nghi jkl\") # -> ['abc', 'def', 'ghi', 'jkl']\n\n # change to just treat newline as significant\n ParserElement.setDefaultWhitespaceChars(\" \\t\")\n OneOrMore(Word(alphas)).parseString(\"abc def\\nghi jkl\") # -> ['abc', 'def']\n ", "language": "en", "n_whitespaces": 120, "n_words": 41, "vocab_size": 29 }
6
Python
6
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
pyparsing.py
63,356
14
12
setDefaultWhitespaceChars
https://github.com/jindongwang/transferlearning.git
upd; format
19
0
13,265
7
7
9
def override_recursive(a, b): for key in b: if isinstance(b[key], dict) is False: a[key] = b[key] elif key not in a or isinstance(a[key], dict) is False: a[key] = b[key] # make config section empty by demand elif isinstance(b[key], dict) is True and b[key] == {}: a[key] = b[key] else: override_recursive(a[key], b[key]) @pytest.fixture(scope="module")
tests/integration_tests/flows/conftest.py
176
@pytest.fixture(scope="module")
mindsdb
{ "docstring": "Overrides some elements in json 'a' by elements in json 'b'", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 8 }
51
Python
35
ae4fa77a2c0a9fa57cc9c8bc7e8961dd01e4067e
conftest.py
117,084
10
106
override_recursive
https://github.com/mindsdb/mindsdb.git
It mysql api test pytest (#3694) * migration to pytest * Tests start passing * Fully working tests * Increase timeout for mindsdb start * reduce amount of logs * show logs only for failed tests
135
1
25,896
13
1
5
def result(term): print("\n" + str(calc(term)))
calculator.py
36
Python
{ "docstring": "\n input: term of type str\n output: none\n purpose: passes the argument to the function calc(...) and\n prints the result onto console.\n ", "language": "en", "n_whitespaces": 46, "n_words": 21, "vocab_size": 19 }
5
Python
5
f0af0c43340763724f139fa68aa1e5a9ffe458b4
calculator.py
22,595
2
18
result
https://github.com/geekcomputers/Python.git
refactor: clean code Signed-off-by: slowy07 <[email protected]>
11
0
4,374
12
3
9
def installed_location(self) -> Optional[str]: egg_link = egg_link_path_from_location(self.raw_name) if egg_link: location = egg_link elif self.location: location = self.location else: return None return normalize_path(location)
pipenv/patched/notpip/_internal/metadata/base.py
74
pipenv
{ "docstring": "The distribution's \"installed\" location.\n\n This should generally be a ``site-packages`` directory. This is\n usually ``dist.location``, except for legacy develop-installed packages,\n where ``dist.location`` is the source code location, and this is where\n the ``.egg-link`` file is.\n\n The returned location is normalized (in particular, with symlinks removed).\n ", "language": "en", "n_whitespaces": 87, "n_words": 45, "vocab_size": 38 }
22
Python
17
f3166e673fe8d40277b804d35d77dcdb760fc3b3
base.py
19,913
18
44
installed_location
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
97
0
3,146
10
2
14
def spsolve(data, indices, indptr, b, tol=1e-6, reorder=1): if jax._src.lib.xla_extension_version < 86: raise ValueError('spsolve requires jaxlib version 86 or above.') return spsolve_p.bind(data, indices, indptr, b, tol=tol, reorder=reorder)
jax/experimental/sparse/linalg.py
83
jax
{ "docstring": "A sparse direct solver using QR factorization.\n\n Accepts a sparse matrix in CSR format `data, indices, indptr` arrays.\n Currently only the CUDA GPU backend is implemented.\n\n Args:\n data : An array containing the non-zero entries of the CSR matrix.\n indices : The column indices of the CSR matrix.\n indptr : The row pointer array of the CSR matrix.\n b : The right hand side of the linear system.\n tol : Tolerance to decide if singular or not. Defaults to 1e-6.\n reorder : The reordering scheme to use to reduce fill-in. No reordering if\n `reorder=0'. Otherwise, symrcm, symamd, or csrmetisnd (`reorder=1,2,3'),\n respectively. Defaults to symrcm.\n\n Returns:\n An array with the same dtype and size as b representing the solution to\n the sparse linear system.\n ", "language": "en", "n_whitespaces": 166, "n_words": 123, "vocab_size": 81 }
26
Python
23
2bc3e39cd9104071ee39dacac22abd51b94eb27e
linalg.py
121,503
4
59
spsolve
https://github.com/google/jax.git
Sparse direct solver using QR factorization from cuSOLVER. This is the jaxlib implementation. We will want to combine this with the sparse libraries already existing in JAX. PiperOrigin-RevId: 468303019
32
0
27,067
10
3
14
def request_params(self, **kwargs) -> MutableMapping[str, Any]: params = {"limit": self.page_size} if self._include_deleted: params.update(self._filter_all_statuses()) if self.send_fields: params.update({"fields": ",".join(self.fields)}) return params
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams.py
109
airbyte
{ "docstring": "Parameters that should be passed to query_records method", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
19
Python
17
2573fa145a1fbf4e849d26c54de105bcacde114f
streams.py
3,690
8
64
request_params
https://github.com/airbytehq/airbyte.git
🎉 Source Facebook Marketing: Add AdAccount and Images stream implementation (#10180) * Add AdAccount and Images stream implementation * Update PR number * Updated docker version * Updated to linter * Update to review * Add comment to AdAccount read_records method * Bumped version in seed, definitions and specs files
76
0
516
14
1
9
def axis(self): q = self AX = Quaternion(0, q.b, q.c, q.d).normalize() return AX
sympy/algebras/quaternion.py
53
sympy
{ "docstring": "\n Returns the axis part of the quaternion.\n\n Returns\n =======\n Ax : The axis of the quaternion.\n\n Examples\n ========\n\n >>> from sympy.algebras.quaternion import Quaternion\n >>> q = Quaternion(1, 1, 1, 1)\n >>> q.axis()\n 0 + sqrt(3)/3*i + sqrt(3)/3*j + sqrt(3)/3*k\n\n >>> q = Quaternion(4, 8, 13, 12)\n >>> q.axis()\n 0 + 8*sqrt(377)/377*i + sqrt(377)/29*j + 12*sqrt(377)/377*k\n\n ", "language": "en", "n_whitespaces": 154, "n_words": 55, "vocab_size": 35 }
13
Python
11
e8c5f4fe692e863bf0a48573a1d0c7b92487c5c1
quaternion.py
196,510
4
33
axis
https://github.com/sympy/sympy.git
hamilton
41
0
47,950
11
1
11
def dagrun_queued(self): dag_id = request.form.get('dag_id') dag_run_id = request.form.get('dag_run_id') confirmed = request.form.get('confirmed') == 'true' origin = get_safe_url(request.form.get('origin')) return self._mark_dagrun_state_as_queued(dag_id, dag_run_id, confirmed, origin)
airflow/www/views.py
112
airflow
{ "docstring": "Queue DagRun so tasks that haven't run yet can be started.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
21
Python
18
afd3c135c7d1815c56578d020625a33dc27fe640
views.py
46,033
6
64
dagrun_queued
https://github.com/apache/airflow.git
Add queue button to click-on-DagRun interface. (#21555) * Initial implementation of adding Queue button to DagRun interface * Implement the test cases * FIX Add all required MyPy ignores * FIX import * Update airflow/www/views.py FIX Documentation Co-authored-by: Brent Bovenzi <[email protected]> * update modal UI Co-authored-by: Brent Bovenzi <[email protected]>
63
0
8,767
11
1
13
def _start(self) -> int: warnings.warn( self._deprecation_message.format("_start", "start"), FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.start
pandas/core/indexes/range.py
69
pandas
{ "docstring": "\n The value of the `start` parameter (``0`` if this was not supplied).\n\n .. deprecated:: 0.25.0\n Use ``start`` instead.\n ", "language": "en", "n_whitespaces": 52, "n_words": 18, "vocab_size": 18 }
12
Python
12
2f8d0a36703e81e4dca52ca9fe4f58c910c1b304
range.py
168,252
13
41
_start
https://github.com/pandas-dev/pandas.git
PERF cache find_stack_level (#48023) cache stacklevel
73
0
40,259
12
2
6
def string_position(self, id_): if self.bow: return self.string_start[self.positions[id_]] else: return self.string_start[[self.positions[id_]]]
examples/model_interpretation/task/senti/LIME/lime_text.py
64
PaddleNLP
{ "docstring": "Returns a np array with indices to id_ (int) occurrences", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
10
Python
9
93cae49c0c572b5c1ac972759140fbe924b0374d
lime_text.py
322,894
5
41
string_position
https://github.com/PaddlePaddle/PaddleNLP.git
Add NLP model interpretation (#1752) * upload NLP interpretation * fix problems and relocate project * remove abandoned picture * remove abandoned picture * fix dead link in README * fix dead link in README * fix code style problems * fix CR round 1 * remove .gitkeep files * fix code style * fix file encoding problem * fix code style * delete duplicated files due to directory rebuild * fix CR round 2 * fix code style * fix ernie tokenizer * fix code style * fix problem from CR round 1 * fix bugs * fix README * remove duplicated files * deal with diff of old and new tokenizer results * fix CR round 4 * fix code style * add missing dependence * fix broken import path * move some data file to cloud * MRC upper case to lower case Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: binlinquge <xxx> Co-authored-by: Guo Sheng <[email protected]>
53
0
118,273
12
5
13
def _get_save_args(self) -> Tuple[int, ...]: filetype = self.config["format"] args: Tuple[int, ...] = tuple() if filetype == "jpg" and self.config["jpg_quality"] > 0: args = (cv2.IMWRITE_JPEG_QUALITY, # pylint: disable=no-member self.config["jpg_quality"]) if filetype == "png" and self.config["png_compress_level"] > -1: args = (cv2.IMWRITE_PNG_COMPRESSION, # pylint: disable=no-member self.config["png_compress_level"]) logger.debug(args) return args
plugins/convert/writer/opencv.py
165
faceswap
{ "docstring": " Obtain the save parameters for the file format.\n\n Returns\n -------\n tuple\n The OpenCV specific arguments for the selected file format\n ", "language": "en", "n_whitespaces": 61, "n_words": 20, "vocab_size": 16 }
46
Python
31
049314429f71a21e6595e9d27e9e36f6a3479c42
opencv.py
101,068
18
98
_get_save_args
https://github.com/deepfakes/faceswap.git
Convert: Add option to output mask separately for draw-transparent
157
0
20,505
11
2
25
def test_resample(): n = 101 colorlist = np.empty((n, 4), float) colorlist[:, 0] = np.linspace(0, 1, n) colorlist[:, 1] = 0.2 colorlist[:, 2] = np.linspace(1, 0, n) colorlist[:, 3] = 0.7 lsc = mcolors.LinearSegmentedColormap.from_list('lsc', colorlist) lc = mcolors.ListedColormap(colorlist) # Set some bad values for testing too for cmap in [lsc, lc]: cmap.set_under('r') cmap.set_over('g') cmap.set_bad('b') lsc3 = lsc.resample(3) lc3 = lc.resample(3) expected = np.array([[0.0, 0.2, 1.0, 0.7], [0.5, 0.2, 0.5, 0.7], [1.0, 0.2, 0.0, 0.7]], float) assert_array_almost_equal(lsc3([0, 0.5, 1]), expected) assert_array_almost_equal(lc3([0, 0.5, 1]), expected) # Test over/under was copied properly assert_array_almost_equal(lsc(np.inf), lsc3(np.inf)) assert_array_almost_equal(lsc(-np.inf), lsc3(-np.inf)) assert_array_almost_equal(lsc(np.nan), lsc3(np.nan)) assert_array_almost_equal(lc(np.inf), lc3(np.inf)) assert_array_almost_equal(lc(-np.inf), lc3(-np.inf)) assert_array_almost_equal(lc(np.nan), lc3(np.nan))
lib/matplotlib/tests/test_colors.py
467
matplotlib
{ "docstring": "\n GitHub issue #6025 pointed to incorrect ListedColormap.resample;\n here we test the method for LinearSegmentedColormap as well.\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 16 }
100
Python
76
1e40f41713fab2d4a86aa26766b3cf6cccd9203d
test_colors.py
109,337
26
337
test_resample
https://github.com/matplotlib/matplotlib.git
ENH: Make the ability to resample interpolated colormaps public
238
0
23,532
11
5
16
def test_cr_image_consistency(): cr = _get_basic_ray_cr() group_specs = [cr["spec"]["headGroupSpec"]] + cr["spec"]["workerGroupSpecs"] # Head, CPU group, GPU group. assert len(group_specs) == 3 ray_containers = [ group_spec["template"]["spec"]["containers"][0] for group_spec in group_specs ] # All Ray containers in the example config have "ray-" in their name. assert all("ray-" in ray_container["name"] for ray_container in ray_containers) # All Ray images are from the Ray repo. assert all( "rayproject/ray" in ray_container["image"] for ray_container in ray_containers ) # All Ray images are the same. assert len({ray_container["image"] for ray_container in ray_containers}) == 1 @pytest.mark.parametrize("exception", [Exception, requests.HTTPError]) @pytest.mark.parametrize("num_exceptions", range(6))
python/ray/tests/kuberay/test_autoscaling_config.py
229
@pytest.mark.parametrize("exception", [Exception, requests.HTTPError]) @pytest.mark.parametrize("num_exceptions", range(6))
ray
{ "docstring": "Verify that the example config uses the same Ray image for all Ray pods.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 12 }
89
Python
57
7d3ceb222c8af98a5c101b1c28ab37ffcb0a3793
test_autoscaling_config.py
124,155
12
101
test_cr_image_consistency
https://github.com/ray-project/ray.git
[kuberay][autoscaler] Improve CPU, GPU, and memory detection. (#26219) This PR improves the autoscaler's resource detection logic
143
1
27,530
12
2
12
def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"): if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") clip = crop(clip, i, j, h, w) clip = resize(clip, size, interpolation_mode) return clip
torchvision/transforms/_functional_video.py
87
vision
{ "docstring": "\n Do spatial cropping and resizing to the video clip\n Args:\n clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)\n i (int): i in (i,j) i.e coordinates of the upper left corner.\n j (int): j in (i,j) i.e coordinates of the upper left corner.\n h (int): Height of the cropped region.\n w (int): Width of the cropped region.\n size (tuple(int, int)): height and width of resized clip\n Returns:\n clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W)\n ", "language": "en", "n_whitespaces": 145, "n_words": 83, "vocab_size": 46 }
32
Python
25
289fce29b3e2392114aadbe7a419df0f2e3ac1be
_functional_video.py
192,417
6
58
resized_crop
https://github.com/pytorch/vision.git
Replace asserts with exceptions (#5587) * replace most asserts with exceptions * fix formating issues * fix linting and remove more asserts * fix regresion * fix regresion * fix bug * apply ufmt * apply ufmt * fix tests * fix format * fix None check * fix detection models tests * non scriptable any * add more checks for None values * fix retinanet test * fix retinanet test * Update references/classification/transforms.py Co-authored-by: Nicolas Hug <[email protected]> * Update references/classification/transforms.py Co-authored-by: Nicolas Hug <[email protected]> * Update references/optical_flow/transforms.py Co-authored-by: Nicolas Hug <[email protected]> * Update references/optical_flow/transforms.py Co-authored-by: Nicolas Hug <[email protected]> * Update references/optical_flow/transforms.py Co-authored-by: Nicolas Hug <[email protected]> * make value checks more pythonic: * Update references/optical_flow/transforms.py Co-authored-by: Nicolas Hug <[email protected]> * make value checks more pythonic * make more checks pythonic * fix bug * appy ufmt * fix tracing issues * fib typos * fix lint * remove unecessary f-strings * fix bug * Update torchvision/datasets/mnist.py Co-authored-by: Nicolas Hug <[email protected]> * Update torchvision/datasets/mnist.py Co-authored-by: Nicolas Hug <[email protected]> * Update torchvision/ops/boxes.py Co-authored-by: Nicolas Hug <[email protected]> * Update torchvision/ops/poolers.py Co-authored-by: Nicolas Hug <[email protected]> * Update torchvision/utils.py Co-authored-by: Nicolas Hug <[email protected]> * address PR comments * Update torchvision/io/_video_opt.py Co-authored-by: Nicolas Hug <[email protected]> * Update torchvision/models/detection/generalized_rcnn.py Co-authored-by: Nicolas Hug <[email protected]> * Update torchvision/models/feature_extraction.py Co-authored-by: Nicolas Hug <[email protected]> * Update torchvision/models/optical_flow/raft.py Co-authored-by: Nicolas Hug <[email protected]> * address PR comments * addressing further pr comments * fix bug * remove unecessary else * apply ufmt * last pr comment * replace RuntimeErrors Co-authored-by: Nicolas Hug <[email protected]>
54
0
46,892
10
1
13
def test_spectral_params_validation(input, params, err_type, err_msg): est = SpectralClustering(**params) with pytest.raises(err_type, match=err_msg): est.fit(input) @pytest.mark.parametrize("assign_labels", ("kmeans", "discretize", "cluster_qr"))
sklearn/cluster/tests/test_spectral.py
93
@pytest.mark.parametrize("assign_labels", ("kmeans", "discretize", "cluster_qr"))
scikit-learn
{ "docstring": "Check the parameters validation in `SpectralClustering`.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
16
Python
16
26e2c38a961a27a9e53ce7814bf27f840510b237
test_spectral.py
258,494
4
37
test_spectral_params_validation
https://github.com/scikit-learn/scikit-learn.git
[MRG] MNT use check_scalar to validate scalar in SpectralClustering (#21881) * use check_scalar in SpectralClustering * Add check_scalar parameters validation for cluster.SpectralClustering * fix missing comma * tiny changelog update to relauch CI * errors are raised at fit time solely Co-authored-by: Julien Jerphanion <[email protected]> * fix typos Co-authored-by: Julien Jerphanion <[email protected]> * merge ..utils imports Co-authored-by: Julien Jerphanion <[email protected]> Co-authored-by: hvassard <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
31
1
75,252
10
8
15
def check_graph_consistency(tensor=None, method="add_loss", force_raise=False): if force_raise or ( tf.compat.v1.executing_eagerly_outside_functions() and hasattr(tensor, "graph") and tensor.graph.is_control_flow_graph ): if method == "activity_regularizer": bad_example =
keras/engine/base_layer_utils.py
102
bad_example = """
keras
{ "docstring": "Checks that tensors passed to `add_*` method match the Keras graph.\n\n When one of the `add_*` method is called inside a V2 conditional branch,\n the underlying tensor gets created in a FuncGraph managed by control_flow_v2.\n We need to raise clear error messages in such cases.\n\n Args:\n tensor: Tensor to check, or `False` if it is known that an error\n should be raised.\n method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}.\n force_raise: If an error should be raised regardless of `tensor`.\n\n Raises:\n RuntimeError: In case of an out-of-graph tensor.\n \n class TestModel(tf.keras.Model):\n", "language": "en", "n_whitespaces": 140, "n_words": 90, "vocab_size": 70 }
21
Python
19
84afc5193d38057e2e2badf9c889ea87d80d8fbf
base_layer_utils.py
270,862
112
142
check_graph_consistency
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
70
1
80,577
13
6
20
def get_url(path, dest="", saltenv=None, makedirs=False, source_hash=None): if not saltenv: saltenv = __opts__["saltenv"] or "base" if isinstance(dest, str): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash ) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash ) if not result: log.error( "Unable to fetch file %s from saltenv %s.", salt.utils.url.redact_http_basic_auth(path), saltenv, ) if result: return salt.utils.stringutils.to_unicode(result) return result
salt/modules/cp.py
198
salt
{ "docstring": "\n .. versionchanged:: 3005\n ``saltenv`` will use value from config if not explicitly set\n\n .. versionchanged:: 2018.3.0\n ``dest`` can now be a directory\n\n Used to get a single file from a URL.\n\n path\n A URL to download a file from. Supported URL schemes are: ``salt://``,\n ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and\n ``file://`` (local filesystem). If no scheme was specified, this is\n equivalent of using ``file://``.\n If a ``file://`` URL is given, the function just returns absolute path\n to that file on a local filesystem.\n The function returns ``False`` if Salt was unable to fetch a file from\n a ``salt://`` URL.\n\n dest\n The default behaviour is to write the fetched file to the given\n destination path. If this parameter is omitted or set as empty string\n (``''``), the function places the remote file on the local filesystem\n inside the Minion cache directory and returns the path to that file.\n\n .. note::\n\n To simply return the file contents instead, set destination to\n ``None``. This works with ``salt://``, ``http://``, ``https://``\n and ``file://`` URLs. The files fetched by ``http://`` and\n ``https://`` will not be cached.\n\n saltenv\n Salt fileserver environment from which to retrieve the file. Ignored if\n ``path`` is not a ``salt://`` URL.\n\n source_hash\n If ``path`` is an http(s) or ftp URL and the file exists in the\n minion's file cache, this option can be passed to keep the minion from\n re-downloading the file if the cached copy matches the specified hash.\n\n .. versionadded:: 2018.3.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine\n salt '*' cp.get_url http://www.slashdot.org /tmp/index.html\n ", "language": "en", "n_whitespaces": 491, "n_words": 255, "vocab_size": 146 }
58
Python
39
2bd6323ef5f87d871891a59917ee96f44ef55e75
cp.py
216,181
20
128
get_url
https://github.com/saltstack/salt.git
fixes saltstack/salt#61562 cp functions derive saltenv from config
190
0
54,458
13
1
4
def test_mapped_literal_verify_integrity(dag_maker, session): with dag_maker(session=session) as dag:
tests/models/test_dagrun.py
34
airflow
{ "docstring": "Test that when the length of a mapped literal changes we remove extra TIs", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
7
Python
7
91832a42d8124b040073481fd93c54e9e64c2609
test_dagrun.py
46,886
19
173
test_mapped_literal_verify_integrity
https://github.com/apache/airflow.git
Expand mapped tasks at DagRun.Veriy_integrity (#22679) Create the necessary task instances for a mapped task at dagrun.verify_integrity Co-authored-by: Ash Berlin-Taylor <[email protected]>
13
0
9,033
11
1
9
def test_cursor_var(self): with connection.cursor() as cursor: var = cursor.var(str) cursor.execute("BEGIN %s := 'X'; END; ", [var]) self.assertEqual(var.getvalue(), "X")
tests/backends/oracle/tests.py
82
django
{ "docstring": "Cursor variables can be passed as query parameters.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
18
Python
18
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
201,727
5
45
test_cursor_var
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
65
0
49,983
11
18
29
def _eval_Mod(self, q): r base, exp = self.base, self.exp if exp.is_integer and exp.is_positive: if q.is_integer and base % q == 0: return S.Zero from sympy.ntheory.factor_ import totient if base.is_Integer and exp.is_Integer and q.is_Integer: b, e, m = int(base), int(exp), int(q) mb = m.bit_length() if mb <= 80 and e >= mb and e.bit_length()**4 >= m: phi = totient(m) return Integer(pow(b, phi + e%phi, m)) return Integer(pow(b, e, m)) from .mod import Mod if isinstance(base, Pow) and base.is_integer and base.is_number: base = Mod(base, q) return Mod(Pow(base, exp, evaluate=False), q) if isinstance(exp, Pow) and exp.is_integer and exp.is_number: bit_length = int(q).bit_length() # XXX Mod-Pow actually attempts to do a hanging evaluation # if this dispatched function returns None. # May need some fixes in the dispatcher itself. if bit_length <= 80: phi = totient(q) exp = phi + Mod(exp, phi) return Mod(Pow(base, exp, evaluate=False), q)
sympy/core/power.py
389
sympy
{ "docstring": "A dispatched function to compute `b^e \\bmod q`, dispatched\n by ``Mod``.\n\n Notes\n =====\n\n Algorithms:\n\n 1. For unevaluated integer power, use built-in ``pow`` function\n with 3 arguments, if powers are not too large wrt base.\n\n 2. For very large powers, use totient reduction if $e \\ge \\log(m)$.\n Bound on m, is for safe factorization memory wise i.e. $m^{1/4}$.\n For pollard-rho to be faster than built-in pow $\\log(e) > m^{1/4}$\n check is added.\n\n 3. For any unevaluated power found in `b` or `e`, the step 2\n will be recursed down to the base and the exponent\n such that the $b \\bmod q$ becomes the new base and\n $\\phi(q) + e \\bmod \\phi(q)$ becomes the new exponent, and then\n the computation for the reduced expression can be done.\n ", "language": "en", "n_whitespaces": 237, "n_words": 125, "vocab_size": 95 }
142
Python
90
cda8dfe6f45dc5ed394c2f5cda706cd6c729f713
power.py
195,855
45
253
_eval_Mod
https://github.com/sympy/sympy.git
Improved documentation formatting
503
0
47,442
17
4
10
def host_header(self) -> Optional[str]: if self.is_http2 or self.is_http3: return self.authority or self.data.headers.get("Host", None) else: return self.data.headers.get("Host", None)
mitmproxy/http.py
86
mitmproxy
{ "docstring": "\n The request's host/authority header.\n\n This property maps to either ``request.headers[\"Host\"]`` or\n ``request.authority``, depending on whether it's HTTP/1.x or HTTP/2.0.\n\n *See also:* `Request.authority`,`Request.host`, `Request.pretty_host`\n ", "language": "en", "n_whitespaces": 59, "n_words": 23, "vocab_size": 22 }
17
Python
13
8e71b0331b8de95c4204d5cc26fb07e967883972
http.py
252,191
13
52
host_header
https://github.com/mitmproxy/mitmproxy.git
[quic] add is_http3 where necessary
60
0
73,921
12
2
46
def test_proxy_manager_lifecycle(shutdown_only): proxier.CHECK_PROCESS_INTERVAL_S = 1 os.environ["TIMEOUT_FOR_SPECIFIC_SERVER_S"] = "5" pm, free_ports = start_ray_and_proxy_manager(n_ports=2) client = "client1" pm.create_specific_server(client) assert pm.start_specific_server(client, JobConfig()) # Channel should be ready and corresponding to an existing server grpc.channel_ready_future(pm.get_channel(client)).result(timeout=5) proc = pm._get_server_for_client(client) assert proc.port == free_ports[0], f"Free Ports are: {free_ports}" log_files_path = os.path.join( pm.node.get_session_dir_path(), "logs", "ray_client_server*" ) files = glob(log_files_path) assert any(str(free_ports[0]) in f for f in files) proc.process_handle_future.result().process.wait(10) # Wait for reconcile loop time.sleep(2) assert len(pm._free_ports) == 2 assert pm._get_unused_port() == free_ports[1] @pytest.mark.skipif( sys.platform == "win32", reason="PSUtil does not work the same on windows." )
python/ray/tests/test_client_proxy.py
315
@pytest.mark.skipif( sys.platform == "win32", reason="PSUtil does not work the same on windows." )
ray
{ "docstring": "\n Creates a ProxyManager and tests basic handling of the lifetime of a\n specific RayClient Server. It checks the following properties:\n 1. The SpecificServer is created using the first port.\n 2. The SpecificServer comes alive and has a log associated with it.\n 3. The SpecificServer destructs itself when no client connects.\n 4. The ProxyManager returns the port of the destructed SpecificServer.\n ", "language": "en", "n_whitespaces": 82, "n_words": 60, "vocab_size": 45 }
88
Python
70
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
test_client_proxy.py
131,422
19
170
test_proxy_manager_lifecycle
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
156
1
29,519
11
2
5
def require_wandb(test_case): if not is_wandb_available(): return unittest.skip("test requires wandb")(test_case) else: return test_case
src/transformers/testing_utils.py
49
transformers
{ "docstring": "\n Decorator marking a test that requires wandb.\n\n These tests are skipped when wandb isn't installed.\n\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 15 }
12
Python
11
c74f3d4c480a6971e302de7cef226e9a157ef0d0
testing_utils.py
34,828
5
26
require_wandb
https://github.com/huggingface/transformers.git
Add W&B backend for hyperparameter sweep (#14582) # Add support for W&B hyperparameter sweep This PR: * allows using wandb for running hyperparameter search. * The runs are visualized on W&B sweeps dashboard * This supports runnning sweeps on parallel devices, all reporting to the same central dashboard. ### Usage **To run new a hyperparameter search:** ``` trainer.hyperparameter_search( backend="wandb", project="transformers_sweep", # name of the project n_trials=5, metric="eval/loss", # metric to be optimized, default 'eval/loss'. A warning is raised if the passed metric is not found ) ``` This outputs a sweep id. Eg. `my_project/sweep_id` **To run sweeps on parallel devices:** Just pass sweep id which you want to run parallel ``` trainer.hyperparameter_search( backend="wandb", sweep_id = "my_project/sweep_id" ) ```
35
0
6,347
11
1
9
def _object2proto(self) -> GetObjectResponseMessage_PB: ser = serialize(self.obj) return GetObjectResponseMessage_PB( msg_id=serialize(self.id), address=serialize(self.address), obj=ser, )
packages/syft/src/syft/core/node/common/action/get_object_action.py
67
PySyft
{ "docstring": "Returns a protobuf serialization of self.\n\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n\n :return: returns a protobuf object\n :rtype: GetObjectResponseMessage_PB\n\n .. note::\n This method is purely an internal method. Please use serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ", "language": "en", "n_whitespaces": 150, "n_words": 68, "vocab_size": 56 }
13
Python
13
7f44809cca9457058171cfd65994fb4aee8031ac
get_object_action.py
2,736
21
42
_object2proto
https://github.com/OpenMined/PySyft.git
Replace absolute syft imports
74
0
355
11
1
1
def doctest_tb_context():
IPython/core/tests/test_iplib.py
12
ipython
{ "docstring": "\n In [3]: xmode context\n Exception reporting mode: Context\n\n In [4]: run simpleerr.py\n ---------------------------------------------------------------------------\n ZeroDivisionError Traceback (most recent call last)\n <BLANKLINE>\n ...\n 30 except IndexError:\n 31 mode = 'div'\n ---> 33 bar(mode)\n <BLANKLINE>\n ... in bar(mode)\n 15 \"bar\"\n 16 if mode=='div':\n ---> 17 div0()\n 18 elif mode=='exit':\n 19 try:\n <BLANKLINE>\n ... in div0()\n 6 x = 1\n 7 y = 0\n ----> 8 x/y\n <BLANKLINE>\n ZeroDivisionError: ...", "language": "en", "n_whitespaces": 260, "n_words": 66, "vocab_size": 53 }
2
Python
2
a72418e2dcdfc3c91f70d724d16d2691a41c9c24
test_iplib.py
208,726
1
5
doctest_tb_context
https://github.com/ipython/ipython.git
Restore lineno's for Input mapped files (#13560) * Implement lineno's for Input mapped files * Adopt In [123], line 123 format * Revert "Set co_name for cells run line by line. Fixes https://github.com/ipython/ipykernel/issues/841" (This reverts commit d11e987f174a15f1640f8006c86f58d884c3faa4.) * Omit mention of ", in <module>" for input tracebacks * Input cell -> Cell * Remove <module> from traceback doctests * Use f-string for `in ...' format * Simplify _format_list logic, converting to f-strings
5
0
52,485
6
11
42
def new(self) -> None: from rich import box, print from rich.console import Console from rich.panel import Panel from rich.progress import track from rich.prompt import Confirm, Prompt from rich.syntax import Syntax from rich.table import Table console = Console() print( Panel.fit( , title='Create New Executor', ) ) exec_name = ( self.args.name if self.args.name else Prompt.ask( ':grey_question: What is the [bold]name[/bold] of your executor?\n' '[dim]CamelCase is required[/dim]', default=f'MyExecutor{random.randint(0, 100)}', ) ) exec_path = ( self.args.path if self.args.path else Prompt.ask( ':grey_question: [bold]Which folder[/bold] to store your executor?', default=os.path.join(os.getcwd(), exec_name), ) ) exec_description = '{{}}' exec_keywords = '{{}}' exec_url = '{{}}' is_dockerfile = False if self.args.advance_configuration or Confirm.ask( '[green]That\'s all we need to create an Executor![/green]\n' ':grey_question: Or do you want to proceed to advanced configuration', default=False, ): exec_description = ( self.args.description if self.args.description else ( Prompt.ask( ':grey_question: Please give a [bold]short description[/bold] of your executor?\n' f'[dim]Example: {exec_name} embeds images into 128-dim vectors using ResNet.[/dim]' ) ) ) exec_keywords = ( self.args.keywords if self.args.keywords else ( Prompt.ask( ':grey_question: Please give some [bold]keywords[/bold] to help people search your executor [dim](separated by comma)[/dim]\n' f'[dim]Example: image cv embedding encoding resnet[/dim]' ) ) ) exec_url = ( self.args.url if self.args.url else ( Prompt.ask( ':grey_question: What is the [bold]URL[/bold] for GitHub repo?\n' f'[dim]Example: https://github.com/yourname/my-executor[/dim]' ) ) ) print( Panel.fit( , title='[Optional] [bold]Dockerfile[/bold]', width=80, ) ) is_dockerfile = self.args.add_dockerfile or Confirm.ask( ':grey_question: Do you need to write your own [bold]Dockerfile[/bold] instead of the auto-generated one?', default=False, ) print('[green]That\'s all we need to create an Executor![/green]')
jina/hubble/hubio.py
505
jina
{ "docstring": "Create a new executor folder interactively.\n[bold green]Executor[/bold green] is how Jina processes [bold]Document[/bold].\n\nThis guide helps you to create your own Executor in 30 seconds.\n[bold]Dockerfile[/bold] describes how this executor will be built. It is useful when\nyour executor has non-trivial dependencies or must be run under certain environment.\n\n- If the [bold]Dockerfile[/bold] is missing, Jina automatically generates one for you.\n- If you provide one, then Jina will respect the given [bold]Dockerfile[/bold].", "language": "en", "n_whitespaces": 67, "n_words": 74, "vocab_size": 59 }
244
Python
133
beb0d8f569530755f7797781a8cb49e1b8a2faaf
hubio.py
11,754
205
694
new
https://github.com/jina-ai/jina.git
feat(hubble): fetch image only when required (#4445)
1,309
0
2,111
16
14
20
def split_having(self, negated=False): if not self.contains_aggregate: return self, None in_negated = negated ^ self.negated # If the effective connector is OR and this node contains an aggregate, # then we need to push the whole branch to HAVING clause. may_need_split = (in_negated and self.connector == AND) or ( not in_negated and self.connector == OR ) if may_need_split and self.contains_aggregate: return None, self where_parts = [] having_parts = [] for c in self.children: if hasattr(c, "split_having"): where_part, having_part = c.split_having(in_negated) if where_part is not None: where_parts.append(where_part) if having_part is not None: having_parts.append(having_part) elif c.contains_aggregate: having_parts.append(c) else: where_parts.append(c) having_node = ( self.__class__(having_parts, self.connector, self.negated) if having_parts else None ) where_node = ( self.__class__(where_parts, self.connector, self.negated) if where_parts else None ) return where_node, having_node
django/db/models/sql/where.py
290
django
{ "docstring": "\n Return two possibly None nodes: one for those parts of self that\n should be included in the WHERE clause and one for those parts of\n self that must be included in the HAVING clause.\n ", "language": "en", "n_whitespaces": 63, "n_words": 34, "vocab_size": 23 }
121
Python
75
9c19aff7c7561e3a82978a272ecdaad40dda5c00
where.py
205,899
33
184
split_having
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
478
0
51,273
13
2
9
def unpack_collections(*args, traverse=True): collections = [] repack_dsk = {} collections_token = uuid.uuid4().hex
dask/base.py
49
dask
{ "docstring": "Extract collections in preparation for compute/persist/etc...\n\n Intended use is to find all collections in a set of (possibly nested)\n python objects, do something to them (compute, etc...), then repackage them\n in equivalent python objects.\n\n Parameters\n ----------\n *args\n Any number of objects. If it is a dask collection, it's extracted and\n added to the list of collections returned. By default, python builtin\n collections are also traversed to look for dask collections (for more\n information see the ``traverse`` keyword).\n traverse : bool, optional\n If True (default), builtin python collections are traversed looking for\n any dask collections they might contain.\n\n Returns\n -------\n collections : list\n A list of all dask collections contained in ``args``\n repack : callable\n A function to call on the transformed collections to repackage them as\n they were in the original ``args``.\n ", "language": "en", "n_whitespaces": 231, "n_words": 132, "vocab_size": 83 }
12
Python
10
8971c37f810aa242295dd6a7d9a7cbdf9621d92e
base.py
155,868
9
64
unpack_collections
https://github.com/dask/dask.git
Tokenize dataclasses (#8557)
24
0
36,480
9
1
30
async def test_callback_view_with_jwt(hass, hass_client): registrations = {"device": SUBSCRIPTION_1} client = await mock_client(hass, hass_client, registrations) with patch("homeassistant.components.html5.notify.WebPusher") as mock_wp: mock_wp().send().status_code = 201 await hass.services.async_call( "notify", "notify", {"message": "Hello", "target": ["device"], "data": {"icon": "beer.png"}}, blocking=True, ) assert len(mock_wp.mock_calls) == 4 # WebPusher constructor assert mock_wp.mock_calls[2][1][0] == SUBSCRIPTION_1["subscription"] # Call to send push_payload = json.loads(mock_wp.mock_calls[3][1][0]) assert push_payload["body"] == "Hello" assert push_payload["icon"] == "beer.png" bearer_token = "Bearer {}".format(push_payload["data"]["jwt"]) resp = await client.post( PUBLISH_URL, json={"type": "push"}, headers={AUTHORIZATION: bearer_token} ) assert resp.status == HTTPStatus.OK body = await resp.json() assert body == {"event": "push", "status": "ok"}
tests/components/html5/test_notify.py
368
core
{ "docstring": "Test that the notification callback view works with JWT.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
89
Python
66
652fedf4d1de2645ba08e6ace5376c7126839154
test_notify.py
296,995
23
208
test_callback_view_with_jwt
https://github.com/home-assistant/core.git
Fix html5 Firefox Notifications (#82556) Co-authored-by: Paulus Schoutsen <[email protected]> fixes undefined
212
0
95,966
15
4
12
def _render_href(x, format): if isinstance(x, str): if format == "html": href = '<a href="{0}" target="_blank">{0}</a>' elif format == "latex": href = r"\href{{{0}}}{{{0}}}" else: raise ValueError("``hyperlinks`` format can only be 'html' or 'latex'") pat = r"((http|ftp)s?:\/\/|www.)[\w/\-?=%.:@]+\.[\w/\-&?=%.,':;~!@#$*()\[\]]+" return re.sub(pat, lambda m: href.format(m.group(0)), x) return x
pandas/io/formats/style_render.py
121
pandas
{ "docstring": "uses regex to detect a common URL pattern and converts to href tag in format.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
43
Python
35
f99ec8bf80ba64b2f852cfab7b27ec9e05055589
style_render.py
165,791
11
70
_render_href
https://github.com/pandas-dev/pandas.git
BUG: url regex in `style_render` does not pass colon and other valid (#46457) * BUG: url regex in `style_render` does not pass colon and other valid URLs containing some valid characters such as colon in port numbers get cut off when html-formatting. As a workaround, expanded the regex to match a wider variety of URLs. * Add whatsnew entry for #46389 fix * Update whatsnew entry for fix #46389 Co-authored-by: Simon Hawkins <[email protected]> Co-authored-by: Simon Hawkins <[email protected]>
120
0
39,716
14