complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
1
5
def dodecahedral_graph(create_using=None): G = LCF_graph(20, [10, 7, 4, -4, -7, 10, -4, 7, -7, 4], 2, create_using) G.name = "Dodecahedral Graph" return G
networkx/generators/small.py
74
networkx
{ "docstring": "\n Returns the Platonic Dodecahedral graph.\n\n The dodecahedral graph has 20 nodes and 30 edges. The skeleton of the\n dodecahedron forms a graph. It is one of 5 Platonic graphs [1]_.\n It can be described in LCF notation as:\n ``[10, 7, 4, -4, -7, 10, -4, 7, -7, 4]^2`` [2]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Dodecahedral Graph with 20 nodes and 30 edges\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Regular_dodecahedron#Dodecahedral_graph\n .. [2] https://mathworld.wolfram.com/DodecahedralGraph.html\n\n ", "language": "en", "n_whitespaces": 153, "n_words": 91, "vocab_size": 69 }
23
Python
18
dec723f072eb997a497a159dbe8674cd39999ee9
small.py
176,163
4
51
dodecahedral_graph
https://github.com/networkx/networkx.git
Docstrings for the small.py module (#5240) * added description for the first 5 small graphs * modified descriptions based on comment and added description for two more functions * added doctrings to all the functions * Minor touchups. Co-authored-by: Ross Barnowski <[email protected]>
35
0
41,733
10
1
4
def as_integer_ratio(self): return (self._numerator, self._denominator)
python3.10.4/Lib/fractions.py
27
XX-Net
{ "docstring": "Return the integer ratio as a tuple.\n\n Return a tuple of two integers, whose ratio is equal to the\n Fraction and with a positive denominator.\n ", "language": "en", "n_whitespaces": 46, "n_words": 25, "vocab_size": 20 }
5
Python
5
8198943edd73a363c266633e1aa5b2a9e9c9f526
fractions.py
217,396
2
16
as_integer_ratio
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
19
0
54,748
7
5
25
def get_iso_3166_2_state_code(address): import pycountry country_code = frappe.db.get_value("Country", address.get("country"), "code") error_message = _( ).format(address.get("state")) state = address.get("state").upper().strip() # The max length for ISO state codes is 3, excluding the country code if len(state) <= 3: # PyCountry returns state code as {country_code}-{state-code} (e.g. US-FL) address_state = (country_code + "-" + state).upper() states = pycountry.subdivisions.get(country_code=country_code.upper()) states = [pystate.code for pystate in states] if address_state in states: return state frappe.throw(_(error_message)) else: try: lookup_state = pycountry.subdivisions.lookup(state) except LookupError: frappe.throw(_(error_message)) else: return lookup_state.code.split("-")[1]
erpnext/erpnext_integrations/taxjar_integration.py
280
erpnext
{ "docstring": "{0} is not a valid state! Check for typos or enter the ISO code for your state.", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 16 }
78
Python
58
494bd9ef78313436f0424b918f200dab8fc7c20b
taxjar_integration.py
66,028
21
162
get_iso_3166_2_state_code
https://github.com/frappe/erpnext.git
style: format code with black
56
0
14,090
15
1
4
def snapshot(self): self._prechange_snapshot = self.serialize_object()
netbox/netbox/models/features.py
28
netbox
{ "docstring": "\n Save a snapshot of the object's current state in preparation for modification. The snapshot is saved as\n `_prechange_snapshot` on the instance.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 19 }
5
Python
5
23f391c5b59d5e01321cf5b83e5337c45f9a09ac
features.py
265,277
2
15
snapshot
https://github.com/netbox-community/netbox.git
Closes #9228: Add serialize_object() method to ChangeLoggingMixin
19
0
78,058
8
4
41
def test_sub_dag_task_group(): execution_date = pendulum.parse("20200101") with DAG("test_test_task_group_sub_dag", start_date=execution_date) as dag: task1 = EmptyOperator(task_id="task1") with TaskGroup("group234") as group234: _ = EmptyOperator(task_id="task2") with TaskGroup("group34") as group34: _ = EmptyOperator(task_id="task3") _ = EmptyOperator(task_id="task4") with TaskGroup("group6") as group6: _ = EmptyOperator(task_id="task6") task7 = EmptyOperator(task_id="task7") task5 = EmptyOperator(task_id="task5") task1 >> group234 group34 >> task5 group234 >> group6 group234 >> task7 subdag = dag.partial_subset(task_ids_or_regex="task5", include_upstream=True, include_downstream=False) assert extract_node_id(task_group_to_dict(subdag.task_group)) == { 'id': None, 'children': [ { 'id': 'group234', 'children': [ { 'id': 'group234.group34', 'children': [ {'id': 'group234.group34.task3'}, {'id': 'group234.group34.task4'}, {'id': 'group234.group34.downstream_join_id'}, ], }, {'id': 'group234.upstream_join_id'}, ], }, {'id': 'task1'}, {'id': 'task5'}, ], } edges = dag_edges(subdag) assert sorted((e["source_id"], e["target_id"]) for e in edges) == [ ('group234.group34.downstream_join_id', 'task5'), ('group234.group34.task3', 'group234.group34.downstream_join_id'), ('group234.group34.task4', 'group234.group34.downstream_join_id'), ('group234.upstream_join_id', 'group234.group34.task3'), ('group234.upstream_join_id', 'group234.group34.task4'), ('task1', 'group234.upstream_join_id'), ] subdag_task_groups = subdag.task_group.get_task_group_dict() assert subdag_task_groups.keys() == {None, "group234", "group234.group34"} included_group_ids = {"group234", "group234.group34"} included_task_ids = {'group234.group34.task3', 'group234.group34.task4', 'task1', 'task5'} for task_group in subdag_task_groups.values(): assert task_group.upstream_group_ids.issubset(included_group_ids) assert task_group.downstream_group_ids.issubset(included_group_ids) assert task_group.upstream_task_ids.issubset(included_task_ids) assert task_group.downstream_task_ids.issubset(included_task_ids) for task in subdag.task_group: assert task.upstream_task_ids.issubset(included_task_ids) assert task.downstream_task_ids.issubset(included_task_ids)
tests/utils/test_task_group.py
704
airflow
{ "docstring": "\n Tests dag.partial_subset() updates task_group correctly.\n ", "language": "en", "n_whitespaces": 12, "n_words": 5, "vocab_size": 5 }
160
Python
97
49e336ae0302b386a2f47269a6d13988382d975f
test_task_group.py
47,692
60
396
test_sub_dag_task_group
https://github.com/apache/airflow.git
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
732
0
9,214
18
1
13
def test_app_with_import(self):
tests/admin_scripts/tests.py
28
"""manage.py check does noterrors when an app imports a
django
{ "docstring": "manage.py check does not raise errors when an app imports a base", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
2
Python
2
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,392
15
63
test_app_with_import
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
9
2
51,948
6
1
9
def normalize_sort(self, names, os_sep, character): names.sort(key=lambda item: item.replace(os_sep, character))
awscli/customizations/s3/filegenerator.py
47
aws-cli
{ "docstring": "\n The purpose of this function is to ensure that the same path separator\n is used when sorting. In windows, the path operator is a backslash as\n opposed to a forward slash which can lead to differences in sorting\n between s3 and a windows machine.\n ", "language": "en", "n_whitespaces": 81, "n_words": 44, "vocab_size": 36 }
9
Python
9
08542a0ad75642b52eebf15b3e052e3387904b05
filegenerator.py
189,207
2
30
normalize_sort
https://github.com/aws/aws-cli.git
Fix a few typos
23
0
46,015
11
2
14
def _load_module_shim(self, fullname): msg = ("the load_module() method is deprecated and slated for removal in " "Python 3.12; use exec_module() instead") _warnings.warn(msg, DeprecationWarning) spec = spec_from_loader(fullname, self) if fullname in sys.modules: module = sys.modules[fullname] _exec(spec, module) return sys.modules[fullname] else: return _load(spec) # Module specifications #######################################################
python3.10.4/Lib/importlib/_bootstrap.py
108
XX-Net
{ "docstring": "Load the specified module into sys.modules and return it.\n\n This method is deprecated. Use loader.exec_module() instead.\n\n ", "language": "en", "n_whitespaces": 23, "n_words": 16, "vocab_size": 16 }
45
Python
40
8198943edd73a363c266633e1aa5b2a9e9c9f526
_bootstrap.py
218,069
11
65
_load_module_shim
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
99
0
55,111
10
2
20
def get_ext_fullpath(self, ext_name): fullname = self.get_ext_fullname(ext_name) modpath = fullname.split('.') filename = self.get_ext_filename(modpath[-1]) if not self.inplace: # no further work needed # returning : # build_dir/package/path/filename filename = os.path.join(*modpath[:-1]+[filename]) return os.path.join(self.build_lib, filename) # the inplace option requires to find the package directory # using the build_py command for that package = '.'.join(modpath[0:-1]) build_py = self.get_finalized_command('build_py') package_dir = os.path.abspath(build_py.get_package_dir(package)) # returning # package_dir/filename return os.path.join(package_dir, filename)
python3.10.4/Lib/distutils/command/build_ext.py
209
XX-Net
{ "docstring": "Returns the path of the filename for a given extension.\n\n The file is located in `build_lib` or directly in the package\n (inplace option).\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 20 }
64
Python
44
8198943edd73a363c266633e1aa5b2a9e9c9f526
build_ext.py
222,679
11
123
get_ext_fullpath
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
214
0
56,693
15
2
8
def check_idle(self): if self._message_queue.empty(): self.post_message_no_wait(messages.Prompt(sender=self))
src/textual/message_pump.py
49
textual
{ "docstring": "Prompt the message pump to call idle if the queue is empty.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
5
Python
5
237c556673f058a60aa59f441a8bbb7c953be55f
message_pump.py
182,485
3
28
check_idle
https://github.com/Textualize/textual.git
refactor of compositor
30
0
43,842
12
2
10
def flush(self) -> None: self._connected = True for info in self._cache: self.show(**dataclasses.asdict(info)) self._cache = [] global_bridge = GlobalMessageBridge()
qutebrowser/utils/message.py
73
qutebrowser
{ "docstring": "Flush messages which accumulated while no handler was connected.\n\n This is so we don't miss messages shown during some early init phase.\n It needs to be called once the show_message signal is connected.\n ", "language": "en", "n_whitespaces": 54, "n_words": 33, "vocab_size": 30 }
18
Python
16
5616a99eff34f7074641d1391ed77d6b4b743529
message.py
320,923
10
38
flush
https://github.com/qutebrowser/qutebrowser.git
Add a MessageInfo data class Preparation for #7246
56
0
117,438
12
1
5
def default_params(self) -> dict: return {"order": "asc", "sort": self.sort_key, "limit": self.limit}
airbyte-integrations/connectors/source-recurly/source_recurly/streams.py
49
airbyte
{ "docstring": "\n Returns the parameters to be sent together with the API call to Recurly\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 11 }
11
Python
11
63af98e3b999d4b223237b51472a819915c5a558
streams.py
4,171
5
26
default_params
https://github.com/airbytehq/airbyte.git
🎉 Recurly Schema Revamp (#9866) * Cleanup Recurly connector schemas * Add more Recurly schemas to the connector - `billing_infos` - `shipping_addresses` - `shipping_methods` - `subscription_changes` * Add Recurly `add-on` resouce * Add Recurly's account notes resource schema * Add unique coupons to Recurly source * Add credit payments to Recurly connector * Add Recurly resources to integration tests configurations * Bump Recurly source version to `0.4.0` * Add `line_items` Recurly resource * Add `line_items` to Recurly documentation * Add missing `line_items` JSON schema * Replace Subscription Change Recurly API call with Subscription `pending_changes` field * Replace Recurly unique coupon codes API call with coupons `unique_coupon` field To avoid the extra API call to import unique coupon calls * Revert "Replace Recurly unique coupon codes API call with coupons `unique_coupon` field" This reverts commit 1c4592d82da3c5e5e0026dda8eb2ed7a896ac5b8. * Add `end_time` parameter to Recurly connector * Order Recurly specs * Set the Recurly `begin_time` and `end_time` to be optional * Add `order` to Recurly `source_spec.yaml` * Add `maxLength` to Recurly source schemas * Set `maxLength` for Recurly Subscription and Transaction `uuid` * Fix Recurly `export_dates` acceptance tests
25
0
628
8
2
16
def query_task(doctype, txt, searchfield, start, page_len, filters): from frappe.desk.reportview import build_match_conditions search_string = "%%%s%%" % txt order_by_string = "%s%%" % txt match_conditions = build_match_conditions("Task") match_conditions = ("and" + match_conditions) if match_conditions else "" return frappe.db.sql( % (searchfield, "%s", "%s", match_conditions, "%s", searchfield, "%s", searchfield, "%s", "%s"), (search_string, search_string, order_by_string, order_by_string, page_len, start), )
erpnext/projects/utils.py
150
erpnext
{ "docstring": "select name, subject from `tabTask`\n\t\twhere (`%s` like %s or `subject` like %s) %s\n\t\torder by\n\t\t\tcase when `subject` like %s then 0 else 1 end,\n\t\t\tcase when `%s` like %s then 0 else 1 end,\n\t\t\t`%s`,\n\t\t\tsubject\n\t\tlimit %s offset %s", "language": "en", "n_whitespaces": 34, "n_words": 42, "vocab_size": 25 }
53
Python
37
00ef499739959630cd7cf97419fbb6ca59be05f2
utils.py
68,806
18
96
query_task
https://github.com/frappe/erpnext.git
refactor: use db independent offset syntax (#31345) * chore: use db independent offset syntax * fix: typo * style: reformat code to black spec Co-authored-by: Ankush Menat <[email protected]>
43
0
14,887
10
2
9
def write(self, pkt): # type: ignore # type: (_PacketIterable) -> None # Import here to avoid circular dependency from scapy.supersocket import IterSocket for p in IterSocket(pkt).iter: self.write_packet(p)
scapy/utils.py
52
scapy
{ "docstring": "\n Writes a Packet, a SndRcvList object, or bytes to a ERF file.\n\n :param pkt: Packet(s) to write (one record for each Packet)\n :type pkt: iterable[scapy.packet.Packet], scapy.packet.Packet\n ", "language": "en", "n_whitespaces": 55, "n_words": 26, "vocab_size": 22 }
27
Python
24
3df072ecb66b53251f8ec66b0bf7129a649166ae
utils.py
209,104
4
30
write
https://github.com/secdev/scapy.git
Add ERF Ethernet Support
74
0
52,606
9
1
15
def info(self, pretty=False, best=False): # type: (bool, bool) -> InfoDict return dict( id=self.id(), version=self.version(pretty, best), version_parts=dict( major=self.major_version(best), minor=self.minor_version(best), build_number=self.build_number(best), ), like=self.like(), codename=self.codename(), )
pipenv/patched/notpip/_vendor/distro.py
130
pipenv
{ "docstring": "\n Return certain machine-readable information about the OS\n distribution.\n\n For details, see :func:`distro.info`.\n ", "language": "en", "n_whitespaces": 41, "n_words": 12, "vocab_size": 12 }
23
Python
23
f3166e673fe8d40277b804d35d77dcdb760fc3b3
distro.py
20,079
12
86
info
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
162
0
3,224
13
2
2
def get_form(self):
netbox/netbox/views/generic/bulk_views.py
13
netbox
{ "docstring": "\n Provide a standard bulk delete form if none has been specified for the view\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
2
Python
2
e03593d86f3082c19255ae24f39d1ed860a04c4d
bulk_views.py
264,315
6
37
get_form
https://github.com/netbox-community/netbox.git
Move get_extra_context() to base views
9
0
77,684
6
2
11
def push(self, line): self.buffer.append(line) source = "\n".join(self.buffer) more = self.runsource(source, self.filename) if not more: self.resetbuffer() return more
python3.10.4/Lib/code.py
84
XX-Net
{ "docstring": "Push a line to the interpreter.\n\n The line should not have a trailing newline; it may have\n internal newlines. The line is appended to a buffer and the\n interpreter's runsource() method is called with the\n concatenated contents of the buffer as source. If this\n indicates that the command was executed or invalid, the buffer\n is reset; otherwise, the command is incomplete, and the buffer\n is left as it was after the line was appended. The return\n value is 1 if more input is required, 0 if the line was dealt\n with in some way (this is the same as runsource()).\n\n ", "language": "en", "n_whitespaces": 173, "n_words": 100, "vocab_size": 60 }
17
Python
15
8198943edd73a363c266633e1aa5b2a9e9c9f526
code.py
221,357
7
49
push
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
70
0
56,372
9
1
3
def _update_feature_log_prob(self, alpha):
sklearn/naive_bayes.py
15
scikit-learn
{ "docstring": "Update feature log probabilities based on counts.\n\n This method is called each time `fit` or `partial_fit` update the\n model.\n\n Parameters\n ----------\n alpha : float\n smoothing parameter. See :meth:`_check_alpha`.\n ", "language": "en", "n_whitespaces": 81, "n_words": 28, "vocab_size": 28 }
3
Python
3
1c94c0b0be3b9146aae41376f3f4ef3853e0ca97
naive_bayes.py
259,027
1
8
_update_feature_log_prob
https://github.com/scikit-learn/scikit-learn.git
DOC Add abstract methods to _BaseDiscreteNB (#22596) Co-authored-by: avm19 <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
10
0
75,533
6
1
26
def accuracy(rating_true, rating_pred): with tf.compat.v1.name_scope("accuracy"): # define and apply the mask mask = tf.not_equal(rating_true, 0) n_values = tf.reduce_sum(input_tensor=tf.cast(mask, "float32"), axis=1) # Take the difference between the input data and the inferred ones. This value is zero whenever # the two values coincides vd = tf.compat.v1.where( mask, x=tf.abs(tf.subtract(rating_true, rating_pred)), y=tf.ones_like(rating_true) ) # correct values: find the location where rating_true = rating_pred corr = tf.cast(tf.equal(vd, 0), "float32") # evaluate accuracy accuracy_score = tf.reduce_mean( input_tensor=tf.compat.v1.div( tf.reduce_sum(input_tensor=corr, axis=1), n_values ) ) return accuracy_score
recommenders/evaluation/tf_evaluation.py
224
recommenders
{ "docstring": "Accuracy\n\n Evaluates accuracy evaluated on the rated items only (rated items are the ones with non-zero ratings).\n\n :math:`accuracy = 1/m \\sum_{mu=1}^{m} \\sum{i=1}^Nv 1/s(i) I(rating_true - rating_pred = 0)_{mu,i}`\n\n where `m = Nusers`, `Nv = number of items = number of visible units` and `s(i)` is the number of non-zero elements\n per row.\n\n Args:\n rating_true (tf.Tensor, float32): True Data.\n rating_pred (tf.Tensor, float32): Predicted Data.\n\n Returns:\n tf.Tensor: accuracy.\n\n ", "language": "en", "n_whitespaces": 108, "n_words": 66, "vocab_size": 49 }
79
Python
59
e2abad62317180f0a2f9200f892320afff3a1dda
tf_evaluation.py
39,008
14
139
accuracy
https://github.com/microsoft/recommenders.git
added newlines
220
0
7,074
16
4
13
def density(w, **kwargs): if kwargs: warnings.warn( "Additional keyword arguments are deprecated in version 1.2 and will be" " removed in version 1.4.", FutureWarning, ) if hasattr(w, "toarray"): d = float(w.nnz) / (w.shape[0] * w.shape[1]) else: d = 0 if w is None else float((w != 0).sum()) / w.size return d
sklearn/utils/extmath.py
135
scikit-learn
{ "docstring": "Compute density of a sparse vector.\n\n Parameters\n ----------\n w : array-like\n The sparse vector.\n **kwargs : keyword arguments\n Ignored.\n\n .. deprecated:: 1.2\n ``**kwargs`` were deprecated in version 1.2 and will be removed in\n 1.4.\n\n Returns\n -------\n float\n The density of w, between 0 and 1.\n ", "language": "en", "n_whitespaces": 119, "n_words": 45, "vocab_size": 36 }
50
Python
42
5d8a1994620713c2e4226fb8e40fef7e81af1103
extmath.py
261,230
12
82
density
https://github.com/scikit-learn/scikit-learn.git
API Deprecate the extra keyword arguments of utils.extmath.density (#24523) Co-authored-by: Meekail Zain <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
126
0
76,700
17
2
31
def test_dataset(ray_start_4_cpus, use_local): model_creator = mlp_identity.model_creator optimizer_creator = mlp_identity.optimizer_creator dataset_creator = mlp_identity.dataset_creator DatasetOperator = TrainingOperator.from_creators( model_creator=model_creator, optimizer_creator=optimizer_creator, loss_creator=nn.MSELoss, ) trainer = TorchTrainer( training_operator_cls=DatasetOperator, use_local=use_local, num_workers=2, ) dataset = dataset_creator() for i in range(5): trainer.train(dataset=dataset, num_steps=100) x = mlp_identity.to_mat(0.5) prediction = float(trainer.get_model()(x)[0][0]) assert 0.4 <= prediction <= 0.6 trainer.shutdown() @pytest.mark.parametrize("use_local", [True, False])
python/ray/util/sgd/tests/test_torch_2.py
216
@pytest.mark.parametrize("use_local", [True, False])
ray
{ "docstring": "\n This test tries training the mlp_identity example. We check the accuracy of\n the model as an all inclusive way of ensuring that we are properly sharding\n and iterating over the entire dataset (instead of repeating the first set\n of points for example).\n ", "language": "en", "n_whitespaces": 58, "n_words": 42, "vocab_size": 35 }
51
Python
41
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
test_torch_2.py
133,242
21
130
test_dataset
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
141
1
29,963
13
1
11
def test_export_pipeline_5(): pipeline_string = ( 'DecisionTreeRegressor(SelectFromModel(input_matrix, ' 'SelectFromModel__ExtraTreesRegressor__max_features=0.05, SelectFromModel__ExtraTreesRegressor__n_estimators=100, ' 'SelectFromModel__threshold=0.05), DecisionTreeRegressor__max_depth=8,' 'DecisionTreeRegressor__min_samples_leaf=5, DecisionTreeRegressor__min_samples_split=5)' ) pipeline = creator.Individual.from_string(pipeline_string, tpot_obj_reg._pset) expected_code = assert expected_code == export_pipeline(pipeline, tpot_obj_reg.operators, tpot_obj_reg._pset)
tests/export_tests.py
82
tpot
{ "docstring": "Assert that exported_pipeline() generated a compile source file as expected given a fixed simple pipeline with SelectFromModel.import numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.tree import DecisionTreeRegressor\n\n# NOTE: Make sure that the outcome column is labeled 'target' in the data file\ntpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)\nfeatures = tpot_data.drop('target', axis=1)\ntraining_features, testing_features, training_target, testing_target = \\\\\n train_test_split(features, tpot_data['target'], random_state=None)\n\nexported_pipeline = make_pipeline(\n SelectFromModel(estimator=ExtraTreesRegressor(max_features=0.05, n_estimators=100), threshold=0.05),\n DecisionTreeRegressor(max_depth=8, min_samples_leaf=5, min_samples_split=5)\n)\n\nexported_pipeline.fit(training_features, training_target)\nresults = exported_pipeline.predict(testing_features)\n", "language": "en", "n_whitespaces": 94, "n_words": 92, "vocab_size": 73 }
27
Python
22
388616b6247ca4ea8de4e2f340d6206aee523541
export_tests.py
181,619
31
45
test_export_pipeline_5
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
74
0
43,407
9
1
11
def get_party_gle_currency(party_type, party, company): def generator(): existing_gle_currency = frappe.db.sql( , {"company": company, "party_type": party_type, "party": party}, ) return existing_gle_currency[0][0] if existing_gle_currency else None return frappe.local_cache( "party_gle_currency", (party_type, party, company), generator, regenerate_if_none=True )
erpnext/accounts/party.py
109
erpnext
{ "docstring": "select account_currency from `tabGL Entry`\n\t\t\twhere docstatus=1 and company=%(company)s and party_type=%(party_type)s and party=%(party)s\n\t\t\tlimit 1", "language": "en", "n_whitespaces": 12, "n_words": 15, "vocab_size": 13 }
32
Python
27
494bd9ef78313436f0424b918f200dab8fc7c20b
party.py
65,129
5
32
get_party_gle_currency
https://github.com/frappe/erpnext.git
style: format code with black
22
0
13,801
13
5
13
def get_template(self, template_name, skip=None): tried = [] for origin in self.get_template_sources(template_name): if skip is not None and origin in skip: tried.append((origin, "Skipped to avoid recursion")) continue try: contents = self.get_contents(origin) except TemplateDoesNotExist: tried.append((origin, "Source does not exist")) continue else: return Template( contents, origin, origin.template_name, self.engine, ) raise TemplateDoesNotExist(template_name, tried=tried)
django/template/loaders/base.py
155
django
{ "docstring": "\n Call self.get_template_sources() and return a Template object for\n the first template matching template_name. If skip is provided, ignore\n template origins in skip. This is used to avoid recursion during\n template extending.\n ", "language": "en", "n_whitespaces": 67, "n_words": 31, "vocab_size": 28 }
49
Python
43
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
206,293
19
98
get_template
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
302
0
51,471
14
15
20
def model_is_indexable(cls, model, allow_child_models=False): if getattr(model, "wagtail_reference_index_ignore", False): return False # Don't check any models that have a parental key, references from these will be collected from the parent if not allow_child_models and any( [isinstance(field, ParentalKey) for field in model._meta.get_fields()] ): return False for field in model._meta.get_fields(): if field.is_relation and field.many_to_one: if getattr(field, "wagtail_reference_index_ignore", False): continue if getattr( field.related_model, "wagtail_reference_index_ignore", False ): continue if isinstance(field, (ParentalKey, GenericRel)): continue return True if hasattr(field, "extract_references"): return True if issubclass(model, ClusterableModel): for child_relation in get_all_child_relations(model): if cls.model_is_indexable( child_relation.related_model, allow_child_models=True, ): return True return False
wagtail/models/reference_index.py
244
wagtail
{ "docstring": "\n Returns True if the given model may have outbound references that we would be interested in recording in the index.\n\n\n Args:\n model (type): a Django model class\n allow_child_models (boolean): Child models are not indexable on their own. If you are looking at\n a child model from the perspective of indexing it through its parent,\n set this to True to disable checking for this. Default False.\n ", "language": "en", "n_whitespaces": 191, "n_words": 65, "vocab_size": 55 }
91
Python
59
c8689acb3724dc12fb09a0bfc14d7e4755a1ea0f
reference_index.py
79,676
28
156
model_is_indexable
https://github.com/wagtail/wagtail.git
Check field for .extract_references method instead of field type Co-authored-by: Matt Westcott <[email protected]>
466
0
16,955
13
1
4
def is_connected(self) -> bool: return self._device.is_connected
homeassistant/components/asuswrt/device_tracker.py
25
core
{ "docstring": "Return true if the device is connected to the network.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
6
Python
6
bc2ba8e1c8c988ae24f6961ce64187782f5ba32d
device_tracker.py
296,122
3
14
is_connected
https://github.com/home-assistant/core.git
Add missing type declaration to AsusWrt Scanner Entity (#69773)
20
0
95,126
7
1
24
def test_get_conda_env_dir(tmp_path): # Simulate starting in an env named tf1. d = tmp_path / "envs" / "tf1" Path.mkdir(d, parents=True) with mock.patch.dict( os.environ, {"CONDA_PREFIX": str(d), "CONDA_DEFAULT_ENV": "tf1"} ): with pytest.raises(ValueError): # Env tf2 should not exist. env_dir = get_conda_env_dir("tf2") tf2_dir = tmp_path / "envs" / "tf2" Path.mkdir(tf2_dir, parents=True) env_dir = get_conda_env_dir("tf2") assert env_dir == str(tmp_path / "envs" / "tf2") # Simulate starting in (base) conda env. with mock.patch.dict( os.environ, {"CONDA_PREFIX": str(tmp_path), "CONDA_DEFAULT_ENV": "base"} ): with pytest.raises(ValueError): # Env tf3 should not exist. env_dir = get_conda_env_dir("tf3") # Env tf2 still should exist. env_dir = get_conda_env_dir("tf2") assert env_dir == str(tmp_path / "envs" / "tf2") @pytest.mark.skipif( os.environ.get("CI") and sys.platform != "linux", reason="This test is only run on linux CI machines.", )
python/ray/tests/test_runtime_env_complicated.py
335
@pytest.mark.skipif( os.environ.get("CI") and sys.platform != "linux", reason="This test is only run on linux CI machines.", )
ray
{ "docstring": "\n Typical output of `conda env list`, for context:\n\n base /Users/scaly/anaconda3\n my_env_1 /Users/scaly/anaconda3/envs/my_env_1\n\n For this test, `tmp_path` is a stand-in for `Users/scaly/anaconda3`.\n ", "language": "en", "n_whitespaces": 65, "n_words": 21, "vocab_size": 20 }
117
Python
65
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
test_runtime_env_complicated.py
131,824
19
152
test_get_conda_env_dir
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
269
1
29,601
13
1
15
def test_create_expiry_time(self) -> None: # Should fail with a time in the past channel = self.make_request( "POST", self.url + "/new", {"expiry_time": self.clock.time_msec() - 10000}, access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM) # Should fail with float channel = self.make_request( "POST", self.url + "/new", {"expiry_time": self.clock.time_msec() + 1000000.5}, access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["errcode"], Codes.INVALID_PARAM)
tests/rest/admin/test_registration_tokens.py
226
synapse
{ "docstring": "Check you can't create a token with an invalid expiry_time.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
54
Python
33
2281427175e4c93a30c39607fb4ac23c2a1f399f
test_registration_tokens.py
249,337
18
142
test_create_expiry_time
https://github.com/matrix-org/synapse.git
Use literals in place of `HTTPStatus` constants in tests (#13488) * Use literals in place of `HTTPStatus` constants in tests * newsfile * code style * code style
219
0
72,840
14
1
2
def cli() -> None: @cli.command()
scripts-dev/release.py
26
@cli.command()
synapse
{ "docstring": "An interactive script to walk through the parts of creating a release.\n\n Requires the dev dependencies be installed, which can be done via:\n\n pip install -e .[dev]\n\n Then to use:\n\n ./scripts-dev/release.py prepare\n\n # ... ask others to look at the changelog ...\n\n ./scripts-dev/release.py tag\n\n # ... wait for assets to build ...\n\n ./scripts-dev/release.py publish\n\n ./scripts-dev/release.py upload\n\n # Optional: generate some nice links for the announcement\n\n ./scripts-dev/release.py announce\n\n If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the\n `tag`/`publish` command, then a new draft release will be created/published.\n ", "language": "en", "n_whitespaces": 168, "n_words": 90, "vocab_size": 69 }
5
Python
5
30c8e7e408322967e5beb2a64ef5f796cb8df226
release.py
248,082
28
7
cli
https://github.com/matrix-org/synapse.git
Make `scripts-dev` pass `mypy --disallow-untyped-defs` (#12356) Not enforced in config yet. One day.
7
1
72,090
6
5
12
def add_update(self, updates): call_context = base_layer_utils.call_context() # No need to run updates during Functional API construction. if call_context.in_keras_graph: return # Callable updates are disabled by setting `trainable=False`. if not call_context.frozen: for update in tf.nest.flatten(updates): if callable(update): update()
keras/engine/base_layer.py
83
keras
{ "docstring": "Add update op(s), potentially dependent on layer inputs.\n\n Weight updates (for instance, the updates of the moving mean and\n variance in a BatchNormalization layer) may be dependent on the inputs\n passed when calling a layer. Hence, when reusing the same layer on\n different inputs `a` and `b`, some entries in `layer.updates` may be\n dependent on `a` and some on `b`. This method automatically keeps track\n of dependencies.\n\n This call is ignored when eager execution is enabled (in that case,\n variable updates are run on the fly and thus do not need to be tracked\n for later execution).\n\n Args:\n updates: Update op, or list/tuple of update ops, or zero-arg callable\n that returns an update op. A zero-arg callable should be passed in\n order to disable running the updates by setting `trainable=False`\n on this Layer, when executing in Eager mode.\n ", "language": "en", "n_whitespaces": 257, "n_words": 138, "vocab_size": 92 }
37
Python
33
3613c3defc39c236fb1592c4f7ba1a9cc887343a
base_layer.py
278,684
8
48
add_update
https://github.com/keras-team/keras.git
Remove pylint comments. PiperOrigin-RevId: 452353044
135
0
82,676
13
2
48
def _add_option_iterations(self) -> None: logger.debug("Adding Iterations Slider") tk_var = self.vars["display_iterations"] min_max = (0, 100000) hlp = _("Set the number of iterations to display. 0 displays the full session.") ctl_frame = ttk.Frame(self.optsframe) ctl_frame.pack(padx=2, side=tk.RIGHT) lbl = ttk.Label(ctl_frame, text="Iterations:", anchor=tk.W) lbl.pack(pady=5, side=tk.LEFT, anchor=tk.N, expand=True) tbox = ttk.Entry(ctl_frame, width=6, textvariable=tk_var, justify=tk.RIGHT) tbox.pack(padx=(0, 5), side=tk.RIGHT) ctl = ttk.Scale( ctl_frame, variable=tk_var, command=lambda val, var=tk_var, dt=int, rn=1000, mm=min_max: # type:ignore set_slider_rounding(val, var, dt, rn, mm)) ctl["from_"] = min_max[0] ctl["to"] = min_max[1] ctl.pack(padx=5, pady=5, fill=tk.X, expand=True) for item in (tbox, ctl): Tooltip(item, text=hlp, wrap_length=200) logger.debug("Added Iterations Slider")
lib/gui/display_command.py
384
faceswap
{ "docstring": " Add a slider to adjust the amount if iterations to display ", "language": "en", "n_whitespaces": 12, "n_words": 11, "vocab_size": 10 }
90
Python
77
dab823a3eb7a5257cb1e0818ee10ed234d3de97f
display_command.py
101,893
25
254
_add_option_iterations
https://github.com/deepfakes/faceswap.git
Typing - lib.gui.display_command
303
0
21,275
12
3
13
def build_partition(cls, partition_ids, column_widths): return np.array( [ [ cls.frame_partition_cls( part_id[0], length=part_id[2], width=col_width, ) for part_id, col_width in zip(part_ids, column_widths) ] for part_ids in partition_ids ] )
modin/core/io/column_stores/parquet_dispatcher.py
81
modin
{ "docstring": "\n Build array with partitions of `cls.frame_partition_cls` class.\n\n Parameters\n ----------\n partition_ids : list\n Array with references to the partitions data.\n column_widths : list\n Number of columns in each partition.\n\n Returns\n -------\n np.ndarray\n array with shape equals to the shape of `partition_ids` and\n filed with partition objects.\n\n Notes\n -----\n The second level of partitions_ids contains a list of object references\n for each read call:\n partition_ids[i][j] -> [ObjectRef(df), ObjectRef(df.index), ObjectRef(len(df))].\n ", "language": "en", "n_whitespaces": 210, "n_words": 67, "vocab_size": 50 }
26
Python
21
8864bc197974da6d8cda2de2f35ca31d561be1cc
parquet_dispatcher.py
154,121
14
56
build_partition
https://github.com/modin-project/modin.git
PERF-#4305: Parallelize `read_parquet` over row groups (#4700) Co-authored-by: mvashishtha <[email protected]>
240
0
35,794
13
2
2
def replace_cfg_vals(ori_cfg):
mmdet/utils/replace_cfg_vals.py
13
mmdetection
{ "docstring": "Replace the string \"${key}\" with the corresponding value.\n\n Replace the \"${key}\" with the value of ori_cfg.key in the config. And\n support replacing the chained ${key}. Such as, replace \"${key0.key1}\"\n with the value of cfg.key0.key1. Code is modified from `vars.py\n < https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_ # noqa: E501\n\n Args:\n ori_cfg (mmcv.utils.config.Config):\n The origin config with \"${key}\" generated from a file.\n\n Returns:\n updated_cfg [mmcv.utils.config.Config]:\n The config with \"${key}\" replaced by the corresponding value.\n ", "language": "en", "n_whitespaces": 126, "n_words": 68, "vocab_size": 46 }
2
Python
2
0db1b9b3d2c3f231241b25c54b3632a0413732ed
replace_cfg_vals.py
244,243
10
64
replace_cfg_vals
https://github.com/open-mmlab/mmdetection.git
[Tools] Support replacing the ${key} with the value of cfg.key (#7492) * Support replacing config * Support replacing config * Add unit test for replace_cfig * pre-commit * fix * modify the docstring * rename function * fix a bug * fix a bug and simplify the code * simplify the code * add replace_cfg_vals for some scripts * add replace_cfg_vals for some scripts * add some unit tests
5
0
70,293
6
1
6
def artifacts(self) -> Dict[str, "mlflow.models.EvaluationArtifact"]: return self._artifacts _cached_mlflow_client = None
mlflow/models/evaluation/base.py
35
mlflow
{ "docstring": "\n A dictionary mapping standardized artifact names (e.g. \"roc_data\") to\n artifact content and location information\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
10
Python
10
4c58179509e6f6047789efb0a95c2b0e20cb6c8f
base.py
19,154
6
17
artifacts
https://github.com/mlflow/mlflow.git
Improve evaluation api (#5256) * init Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * add shap limitation on value type Signed-off-by: Weichen Xu <[email protected]> * fix format Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]>
23
0
2,900
6
4
28
def decode(ew): _, charset, cte, cte_string, _ = ew.split('?') charset, _, lang = charset.partition('*') cte = cte.lower() # Recover the original bytes and do CTE decoding. bstring = cte_string.encode('ascii', 'surrogateescape') bstring, defects = _cte_decoders[cte](bstring) # Turn the CTE decoded bytes into unicode. try: string = bstring.decode(charset) except UnicodeError: defects.append(errors.UndecodableBytesDefect("Encoded word " "contains bytes not decodable using {} charset".format(charset))) string = bstring.decode(charset, 'surrogateescape') except LookupError: string = bstring.decode('ascii', 'surrogateescape') if charset.lower() != 'unknown-8bit': defects.append(errors.CharsetError("Unknown charset {} " "in encoded word; decoded as unknown bytes".format(charset))) return string, charset, lang, defects _cte_encoders = { 'q': encode_q, 'b': encode_b, } _cte_encode_length = { 'q': len_q, 'b': len_b, }
python3.10.4/Lib/email/_encoded_words.py
304
XX-Net
{ "docstring": "Decode encoded word and return (string, charset, lang, defects) tuple.\n\n An RFC 2047/2243 encoded word has the form:\n\n =?charset*lang?cte?encoded_string?=\n\n where '*lang' may be omitted but the other parts may not be.\n\n This function expects exactly such a string (that is, it does not check the\n syntax and may raise errors if the string is not well formed), and returns\n the encoded_string decoded first from its Content Transfer Encoding and\n then from the resulting bytes into unicode using the specified charset. If\n the cte-decoded string does not successfully decode using the specified\n character set, a defect is added to the defects list and the unknown octets\n are replaced by the unicode 'unknown' character \\\\uFDFF.\n\n The specified charset and language are returned. The default for language,\n which is rarely if ever encountered, is the empty string.\n\n ", "language": "en", "n_whitespaces": 179, "n_words": 134, "vocab_size": 94 }
104
Python
74
8198943edd73a363c266633e1aa5b2a9e9c9f526
_encoded_words.py
223,504
18
149
decode
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
228
0
56,938
18
5
29
def add_supercategory_ann(self, annotations): for i, ann in enumerate(annotations): assert len(ann['labels']) == len(ann['bboxes']) == \ len(ann['gt_is_group_ofs']) gt_bboxes = [] gt_is_group_ofs = [] gt_labels = [] for j in range(len(ann['labels'])): label = ann['labels'][j] bbox = ann['bboxes'][j] is_group = ann['gt_is_group_ofs'][j] label = np.where(self.class_label_tree[label])[0] if len(label) > 1: for k in range(len(label)): gt_bboxes.append(bbox) gt_is_group_ofs.append(is_group) gt_labels.append(label[k]) else: gt_bboxes.append(bbox) gt_is_group_ofs.append(is_group) gt_labels.append(label[0]) annotations[i] = dict( bboxes=np.array(gt_bboxes).astype(np.float32), labels=np.array(gt_labels).astype(np.int64), bboxes_ignore=ann['bboxes_ignore'], gt_is_group_ofs=np.array(gt_is_group_ofs).astype(np.bool)) return annotations
mmdet/datasets/openimages.py
384
mmdetection
{ "docstring": "Add parent classes of the corresponding class of the ground truth\n bboxes.", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 10 }
64
Python
47
1516986a616fee8bb741d0ab2be40683045efccd
openimages.py
243,996
27
239
add_supercategory_ann
https://github.com/open-mmlab/mmdetection.git
[Feature] Support OpenImages Dataset (#6331) * [Feature] support openimage group of eval * [Feature] support openimage group of eval * support openimage dataset * support openimage challenge dataset * fully support OpenImages-V6 and OpenImages Challenge 2019 * Fix some logic error * update config file * fix get data_infos error * fully support OpenImages evaluation * update OpenImages config files * [Feature] support OpenImages datasets * fix bug * support load image metas from pipeline * fix bug * fix get classes logic error * update code * support get image metas * support openimags * support collect image metas * support Open Images * fix openimages logic * minor fix * add a new function to compute openimages tpfp * minor fix * fix ci error * minor fix * fix indication * minor fix * fix returns * fix returns * fix returns * fix returns * fix returns * minor fix * update readme * support loading image level labels and fix some logic * minor fix * minor fix * add class names * minor fix * minor fix * minor fix * add openimages test unit * minor fix * minor fix * fix test unit * minor fix * fix logic error * minor fix * fully support openimages * minor fix * fix docstring * fix docstrings in readthedocs * update get image metas script * label_description_file -> label_file * update openimages readme * fix test unit * fix test unit * minor fix * update readme file * Update get_image_metas.py
464
0
70,186
16
2
5
def close(self) -> None: if self.worker: self.worker.stop() return super().close()
src/prefect/logging/handlers.py
50
prefect
{ "docstring": "\n Shuts down this handler and the `OrionLogWorker`.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
9
Python
9
fc20231ae7707ca9ca51a3e25fe8991482a02e2e
handlers.py
53,059
7
28
close
https://github.com/PrefectHQ/prefect.git
Add more docstrings
41
0
10,698
10
1
9
def _object2proto(self) -> GetEnumAttributeAction_PB: return GetEnumAttributeAction_PB( path=self.path, id_at_location=serialize(self.id_at_location), address=serialize(self.address), msg_id=serialize(self.id), )
packages/syft/src/syft/core/node/common/action/get_enum_attribute_action.py
70
PySyft
{ "docstring": "Returns a protobuf serialization of self.\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n :return: returns a protobuf object\n :rtype: GetOrSetPropertyAction_PB\n .. note::\n This method is purely an internal method. Please use serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ", "language": "en", "n_whitespaces": 150, "n_words": 68, "vocab_size": 56 }
11
Python
11
e272ed2fa4c58e0a89e273a3e85da7d13a85e04c
get_enum_attribute_action.py
2,712
18
45
_object2proto
https://github.com/OpenMined/PySyft.git
[syft.core.node.common.action] Change syft import absolute -> relative
76
0
343
11
1
27
async def test_doorbell_event_session_update(hass, auth): events = async_capture_events(hass, NEST_EVENT) subscriber = await async_setup_devices( hass, "sdm.devices.types.DOORBELL", create_device_traits( [ "sdm.devices.traits.CameraClipPreview", "sdm.devices.traits.CameraPerson", "sdm.devices.traits.CameraMotion", ] ), auth, ) registry = er.async_get(hass) entry = registry.async_get("camera.front") assert entry is not None # Message #1 has a motion event timestamp1 = utcnow() await subscriber.async_receive_event( create_events( { "sdm.devices.events.CameraMotion.Motion": { "eventSessionId": EVENT_SESSION_ID, "eventId": "n:1", }, "sdm.devices.events.CameraClipPreview.ClipPreview": { "eventSessionId": EVENT_SESSION_ID, "previewUrl": "image-url-1", }, }, timestamp=timestamp1, ) ) # Message #2 has an extra person event timestamp2 = utcnow() await subscriber.async_receive_event( create_events( { "sdm.devices.events.CameraMotion.Motion": { "eventSessionId": EVENT_SESSION_ID, "eventId": "n:1", }, "sdm.devices.events.CameraPerson.Person": { "eventSessionId": EVENT_SESSION_ID, "eventId": "n:2", }, "sdm.devices.events.CameraClipPreview.ClipPreview": { "eventSessionId": EVENT_SESSION_ID, "previewUrl": "image-url-1", }, }, timestamp=timestamp2, ) ) await hass.async_block_till_done() assert len(events) == 2 assert event_view(events[0].data) == { "device_id": entry.device_id, "type": "camera_motion", "timestamp": timestamp1.replace(microsecond=0), } assert event_view(events[1].data) == { "device_id": entry.device_id, "type": "camera_person", "timestamp": timestamp2.replace(microsecond=0), }
tests/components/nest/test_events.py
434
core
{ "docstring": "Test a pubsub message with updates to an existing session.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
134
Python
75
789c0a24dd558207b712ddf10a919d9353853e40
test_events.py
309,309
65
249
test_doorbell_event_session_update
https://github.com/home-assistant/core.git
Improve nest media player clip/image and event handling for multiple events in a short time range (#63149)
775
0
108,015
15
1
7
def test_delete_files_from_storage_task_files_not_existing_files(media_root): # given path = "random/test-path" path_2 = "random/test-path-2" assert not default_storage.exists(path) assert not default_storage.exists(path_2) # when delete_files_from_storage_task([path, path_2])
saleor/core/tests/test_tasks.py
67
saleor
{ "docstring": "Ensure method not fail when trying to remove not existing file.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
20
Python
16
2611883cda3b84ccbfcbf37221f5b62a08bc9af1
test_tasks.py
28,452
6
36
test_delete_files_from_storage_task_files_not_existing_files
https://github.com/saleor/saleor.git
Fix the migration for removing media marked as to remove (#10429) * Add celery task for removing multiple files from storage * Fix the migration for removing media marked as to remove
44
0
5,170
8
2
8
def get_project_ids() -> List[str]: return [project["project_id"] for project in PROJECTS_DATA] @log_start_end(log=logger)
openbb_terminal/cryptocurrency/due_diligence/tokenterminal_model.py
48
@log_start_end(log=logger)
OpenBBTerminal
{ "docstring": "This function returns the available project ids.\n\n Returns\n ----------\n List[str]\n A list with the all the project IDs\n ", "language": "en", "n_whitespaces": 37, "n_words": 18, "vocab_size": 15 }
11
Python
11
7979b1fc071a1c3e7463044bea617d7305b4a17e
tokenterminal_model.py
286,002
9
21
get_project_ids
https://github.com/OpenBB-finance/OpenBBTerminal.git
Add 3 Token Terminal commands (#2447) * add crypto/ov/fun * add tokenterminal to dependencies * update website content * add to main.yml * fix tests * add tests * Update _index.md * Update _index.md * fix tests * fix test * List hint added * improve code based on Jose input * fix tests * requirements for token terminal * add source and fix source bug * some improvements * colors bars * fix dependencies * update kaleido version * update setuptools for pkg_resources * replace pkg_resources by importlib_metadata * Added fixes * Fixed tests * fix stuff for Josecas Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: colin99d <[email protected]>
16
1
85,500
8
2
8
def encodePythonUnicodeToC(value): assert type(value) is unicode, type(value) result = "" for c in value: cv = ord(c) result += r"\%o" % cv return 'L"%s"' % result
nuitka/utils/CStrings.py
73
Nuitka
{ "docstring": "Encode a string, so that it gives a wide C string literal.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
26
Python
20
70b7eee9555c8d5599d096eaf600521475b001d9
CStrings.py
178,838
7
42
encodePythonUnicodeToC
https://github.com/Nuitka/Nuitka.git
Python3.7+: Added support for get_resource_reader to our loader * This allows to avoid a useless file copy to a temporary file in case a "importlib.resources.path" is used. * Also fixed a few typos in tests. * And avoid compiling the meta path based loader separately, so it can use compiled code helpers easily.
55
0
42,841
10
2
26
def test_search_users(self) -> None: realm = get_realm("zulip") # A payload to find all users whose email ends with @zulip.com payload = { "schemas": ["urn:ietf:params:scim:api:messages:2.0:SearchRequest"], "filter": 'userName ew "@zulip.com"', } result = self.client_post( "/scim/v2/Users/.search", payload, content_type="application/json", **self.scim_headers(), ) self.assertEqual(result.status_code, 200) output_data = orjson.loads(result.content) user_query = UserProfile.objects.filter( realm=realm, is_bot=False, delivery_email__endswith="@zulip.com" ) expected_response_schema = { "schemas": ["urn:ietf:params:scim:api:messages:2.0:ListResponse"], "totalResults": user_query.count(), "itemsPerPage": 50, "startIndex": 1, "Resources": [ self.generate_user_schema(user_profile) for user_profile in UserProfile.objects.filter(realm=realm, is_bot=False).order_by( "id" ) ], } self.assertEqual(output_data, expected_response_schema)
zerver/tests/test_scim.py
266
zulip
{ "docstring": "\n Tests a basic .search POST query:\n https://datatracker.ietf.org/doc/html/rfc7644#section-3.4.3\n ", "language": "en", "n_whitespaces": 29, "n_words": 7, "vocab_size": 7 }
74
Python
63
b775639f425d257d3367d6e462582ca926b1f7ee
test_scim.py
84,333
34
157
test_search_users
https://github.com/zulip/zulip.git
test: Use list comprehension for dictionary values. Signed-off-by: Zixuan James Li <[email protected]>
379
0
17,818
15
4
13
def _row_lengths(self): if self._row_lengths_cache is None: row_lengths_list = DaskWrapper.materialize( [ self._get_partition_size_along_axis(obj, axis=0) for obj in self._partitions.T[0] ] ) self._row_lengths_cache = [sum(len_list) for len_list in row_lengths_list] return self._row_lengths_cache
modin/core/execution/dask/implementations/pandas_on_dask/dataframe/dataframe.py
95
modin
{ "docstring": "\n Compute ther row partitions lengths if they are not cached.\n\n Returns\n -------\n list\n A list of row partitions lengths.\n ", "language": "en", "n_whitespaces": 66, "n_words": 19, "vocab_size": 16 }
27
Python
22
a7354c9ca76525a265da98f2afe882c53f378840
dataframe.py
153,954
10
61
_row_lengths
https://github.com/modin-project/modin.git
FEAT-#4419: Extend virtual partitioning API to pandas on Dask (#4420) Signed-off-by: Rehan Durrani <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]>
149
0
35,722
15
1
14
def test_multi_page_mixed_no_archive(self): parser = RasterisedDocumentParser(None) parser.parse( os.path.join(self.SAMPLE_FILES, "multi-page-mixed.pdf"), "application/pdf", ) self.assertIsNone(parser.archive_path) self.assertContainsStrings( parser.get_text().lower(), ["page 4", "page 5", "page 6"], )
src/paperless_tesseract/tests/test_parser.py
109
paperless-ngx
{ "docstring": "\n GIVEN:\n - File with some text contained in images and some in text layer\n - OCR mode set to skip_noarchive\n WHEN:\n - Document is parsed\n THEN:\n - Text from images is extracted\n - No archive file is created\n ", "language": "en", "n_whitespaces": 122, "n_words": 38, "vocab_size": 28 }
20
Python
18
b3b2519bf03185aa12028fa68d3b8f8860555e6e
test_parser.py
319,926
11
63
test_multi_page_mixed_no_archive
https://github.com/paperless-ngx/paperless-ngx.git
Fixes the creation of an archive file, even if noarchive was specified
113
0
117,021
11
6
25
def fit(self, X, y=None): X = self._validate_data( X, accept_sparse=["csr", "csc"], dtype=[np.float64, np.float32] ) n_samples, n_features = X.shape if self.n_components == "auto": self.n_components_ = johnson_lindenstrauss_min_dim( n_samples=n_samples, eps=self.eps ) if self.n_components_ <= 0: raise ValueError( "eps=%f and n_samples=%d lead to a target dimension of " "%d which is invalid" % (self.eps, n_samples, self.n_components_) ) elif self.n_components_ > n_features: raise ValueError( "eps=%f and n_samples=%d lead to a target dimension of " "%d which is larger than the original space with " "n_features=%d" % (self.eps, n_samples, self.n_components_, n_features) ) else: if self.n_components <= 0: raise ValueError( "n_components must be greater than 0, got %s" % self.n_components ) elif self.n_components > n_features: warnings.warn( "The number of components is higher than the number of" " features: n_features < n_components (%s < %s)." "The dimensionality of the problem will not be reduced." % (n_features, self.n_components), DataDimensionalityWarning, ) self.n_components_ = self.n_components # Generate a projection matrix of size [n_components, n_features] self.components_ = self._make_random_matrix( self.n_components_, n_features ).astype(X.dtype, copy=False) # Check contract assert self.components_.shape == (self.n_components_, n_features), ( "An error has occurred the self.components_ matrix has " " not the proper shape." ) return self
sklearn/random_projection.py
357
scikit-learn
{ "docstring": "Generate a sparse random projection matrix.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Training set: only the shape is used to find optimal random\n matrix dimensions based on the theory referenced in the\n afore mentioned papers.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n BaseRandomProjection class instance.\n ", "language": "en", "n_whitespaces": 171, "n_words": 60, "vocab_size": 53 }
185
Python
109
8b6b519caf3b3b9602958a859b4d3a7eb1d9eadd
random_projection.py
258,487
43
220
fit
https://github.com/scikit-learn/scikit-learn.git
ENH Preserving dtype for np.float32 in RandomProjection (#22114) Co-authored-by: takoika <> Co-authored-by: Thomas J. Fan <[email protected]>
760
0
75,247
16
1
10
def xbutton(self, name, title, next, xpos): return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
python3.10.4/Lib/distutils/command/bdist_msi.py
66
XX-Net
{ "docstring": "Add a button with a given title, the tab-next button,\n its name in the Control table, giving its x position; the\n y-position is aligned with the other buttons.\n\n Return the button, so that events can be associated", "language": "en", "n_whitespaces": 57, "n_words": 37, "vocab_size": 29 }
17
Python
16
8198943edd73a363c266633e1aa5b2a9e9c9f526
bdist_msi.py
222,639
2
48
xbutton
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
31
0
56,681
12
1
7
def to_svg(self, size=500): from dask.array.svg import svg return svg(self.chunks, size=size)
dask/array/core.py
45
dask
{ "docstring": "Convert chunks from Dask Array into an SVG Image\n\n Parameters\n ----------\n chunks: tuple\n size: int\n Rough size of the image\n\n Examples\n --------\n >>> x.to_svg(size=500) # doctest: +SKIP\n\n Returns\n -------\n text: An svg string depicting the array as a grid of chunks\n ", "language": "en", "n_whitespaces": 130, "n_words": 41, "vocab_size": 38 }
10
Python
10
cccb9d8d8e33a891396b1275c2448c352ef40c27
core.py
156,038
3
29
to_svg
https://github.com/dask/dask.git
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
31
0
36,515
8
5
10
def tokenize(self, text, properties=None): default_properties = {"annotators": "tokenize,ssplit"} default_properties.update(properties or {}) result = self.api_call(text, properties=default_properties) for sentence in result["sentences"]: for token in sentence["tokens"]: yield token["originalText"] or token["word"]
nltk/parse/corenlp.py
116
nltk
{ "docstring": "Tokenize a string of text.\n\n Skip these tests if CoreNLP is likely not ready.\n >>> if \"CLASSPATH\" not in os.environ: import pytest; pytest.skip(\"CoreNLP jars unavailable\")\n\n The CoreNLP server can be started using the following notation, although\n we recommend the `with CoreNLPServer() as server:` context manager notation\n to ensure that the server is always stopped.\n >>> server = CoreNLPServer()\n >>> server.start()\n >>> parser = CoreNLPParser(url=server.url)\n\n >>> text = 'Good muffins cost $3.88\\\\nin New York. Please buy me\\\\ntwo of them.\\\\nThanks.'\n >>> list(parser.tokenize(text))\n ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']\n\n >>> s = \"The colour of the wall is blue.\"\n >>> list(\n ... parser.tokenize(\n ... 'The colour of the wall is blue.',\n ... properties={'tokenize.options': 'americanize=true'},\n ... )\n ... )\n ['The', 'colour', 'of', 'the', 'wall', 'is', 'blue', '.']\n >>> server.stop()\n\n ", "language": "en", "n_whitespaces": 313, "n_words": 137, "vocab_size": 100 }
27
Python
23
1f4a121aa781117bc0daa3b4485cf7757f8112ee
corenlp.py
42,558
7
66
tokenize
https://github.com/nltk/nltk.git
Rework CoreNLP tests for 4.5.1, make them work if CoreNLP is on CLASSPATH If not, they are skipped. Sadly this does make the docstrings a bit more confusing
88
0
7,620
13
1
4
async def async_will_remove_from_hass(self) -> None: await super().async_will_remove_from_hass() await self.async_disable()
homeassistant/components/automation/__init__.py
43
core
{ "docstring": "Remove listeners when removing automation from Home Assistant.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
9
Python
8
5e338d21665cb04f66fcebd9376cdda389c30c01
__init__.py
307,671
4
22
async_will_remove_from_hass
https://github.com/home-assistant/core.git
Improve type hints in automation (#78368) * Improve type hints in automation * Apply suggestion * Apply suggestion * Apply suggestion * Add Protocol for IfAction * Use ConfigType for IfAction * Rename variable
30
0
106,439
10
1
14
def test_reading_post_data_raises_os_error(self): mw = CsrfViewMiddleware(post_form_view) req = self._get_POST_request_with_token(request_class=PostErrorRequest) req.post_error = OSError("Deleted directories/Missing permissions.") mw.process_request(req) with self.assertRaises(OSError): mw.process_view(req, post_form_view, (), {})
tests/csrf_tests/tests.py
99
django
{ "docstring": "\n An OSError raised while reading the POST data should not be handled by\n the middleware.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
20
Python
18
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
202,399
7
58
test_reading_post_data_raises_os_error
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
73
0
50,107
10
2
10
def require_mps(test_case): is_mps_supported = hasattr(torch.backends, "mps") and torch.backends.mps.is_available() return unittest.skipUnless(is_mps_supported, "test requires a `mps` backend support in `torch`")(test_case)
src/accelerate/test_utils/testing.py
66
accelerate
{ "docstring": "\n Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps`\n backend.\n ", "language": "en", "n_whitespaces": 28, "n_words": 18, "vocab_size": 17 }
18
Python
18
bb6ee0b7bc72cb29e496a6d05aee9e11d6f745b1
testing.py
338,450
3
38
require_mps
https://github.com/huggingface/accelerate.git
Support `init_on_device` (#926) * Support init_on_device * Support mps backend as well in testing
27
0
121,217
11
3
28
def request_hook(self, method, path, data, params, **kwargs): # handle params that are already part of the path url_params = dict(parse_qs(urlsplit(path).query)) url_params.update(params or {}) path = path.split("?")[0] jwt_payload = { "iss": JIRA_KEY, "iat": datetime.datetime.utcnow(), "exp": datetime.datetime.utcnow() + datetime.timedelta(seconds=5 * 60), "qsh": get_query_hash(path, method.upper(), url_params), } encoded_jwt = jwt.encode(jwt_payload, self.shared_secret) params = dict(jwt=encoded_jwt, **(url_params or {})) request_spec = kwargs.copy() request_spec.update(dict(method=method, path=path, data=data, params=params)) return request_spec
src/sentry/integrations/jira/client.py
262
sentry
{ "docstring": "\n Used by Jira Client to apply the jira-cloud authentication\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
63
Python
54
2fbf550ec05c8501cbc9eca62e73526e717dcbdf
client.py
93,687
15
165
request_hook
https://github.com/getsentry/sentry.git
ref(Jira): Split Jira Cloud and Jira Server (#37034) * Split Jira Cloud and Jira Server
191
0
19,009
13
10
34
def _tensorflow_dependency_install(self): # TODO This will need to be more robust if/when we accept multiple Tensorflow Versions versions = list(TENSORFLOW_REQUIREMENTS.values())[-1] condaexe = ["conda", "search"] pkgs = ["cudatoolkit", "cudnn"] shell = self.env.os_version[0] == "Windows" for pkg in pkgs: with Popen(condaexe + [pkg], shell=shell, stdout=PIPE) as chk: available = [line.split() for line in chk.communicate()[0].decode(self.env.encoding).splitlines() if line.startswith(pkg)] compatible = [req for req in available if (pkg == "cudatoolkit" and req[1].startswith(versions[0])) or (pkg == "cudnn" and versions[0] in req[2] and req[1].startswith(versions[1]))] candidate = "==".join(sorted(compatible, key=lambda x: x[1])[-1][:2]) self.conda_installer(candidate, verbose=True, conda_only=True)
setup.py
340
faceswap
{ "docstring": " Install the Cuda/cuDNN dependencies from Conda when tensorflow is not available\n in Conda.\n\n This was used whilst Tensorflow 2.2 was not available for Windows in Conda. It is kept\n here in case it is required again in the future.\n ", "language": "en", "n_whitespaces": 68, "n_words": 39, "vocab_size": 29 }
86
Python
68
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
setup.py
100,402
17
211
_tensorflow_dependency_install
https://github.com/deepfakes/faceswap.git
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
373
0
19,885
19
10
22
def join(self, join, reuse=None): reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j.equals(join) ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias( join.table_name, create=True, filtered_relation=join.filtered_relation ) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias
django/db/models/sql/query.py
239
django
{ "docstring": "\n Return an alias for the 'join', either reusing an existing alias for\n that join or creating a new one. 'join' is either a base_table_class or\n join_class.\n\n The 'reuse' parameter can be either None which means all joins are\n reusable, or it can be a set containing the aliases that can be reused.\n\n A join is always created as LOUTER if the lhs alias is LOUTER to make\n sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new\n joins are created as LOUTER if the join is nullable.\n ", "language": "en", "n_whitespaces": 153, "n_words": 89, "vocab_size": 57 }
104
Python
70
9c19aff7c7561e3a82978a272ecdaad40dda5c00
query.py
205,866
25
151
join
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
392
0
51,251
13
3
49
def estimate_blur_fft(cls, image, metadata=None): if metadata is not None: alignments = metadata["alignments"] det_face = DetectedFace() det_face.from_png_meta(alignments) aln_face = AlignedFace(np.array(alignments["landmarks_xy"], dtype="float32"), image=image, centering="legacy", size=256, is_aligned=True) mask = det_face.mask["components"] mask.set_sub_crop(aln_face.pose.offset[mask.stored_centering], aln_face.pose.offset["legacy"], centering="legacy") mask = cv2.resize(mask.mask, (256, 256), interpolation=cv2.INTER_CUBIC)[..., None] image = np.minimum(aln_face.face, mask) if image.ndim == 3: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) height, width = image.shape c_height, c_width = (int(height / 2.0), int(width / 2.0)) fft = np.fft.fft2(image) fft_shift = np.fft.fftshift(fft) fft_shift[c_height - 75:c_height + 75, c_width - 75:c_width + 75] = 0 ifft_shift = np.fft.ifftshift(fft_shift) shift_back = np.fft.ifft2(ifft_shift) magnitude = np.log(np.abs(shift_back)) score = np.mean(magnitude) return score
tools/sort/sort.py
424
faceswap
{ "docstring": " Estimate the amount of blur a fft filtered image has.\n\n Parameters\n ----------\n image: :class:`numpy.ndarray`\n Use Fourier Transform to analyze the frequency characteristics of the masked\n face using 2D Discrete Fourier Transform (DFT) filter to find the frequency domain.\n A mean value is assigned to the magnitude spectrum and returns a blur score.\n Adapted from https://www.pyimagesearch.com/2020/06/15/\n opencv-fast-fourier-transform-fft-for-blur-detection-in-images-and-video-streams/\n metadata: dict, optional\n The metadata for the face image or ``None`` if no metadata is available. If metadata is\n provided the face will be masked by the \"components\" mask prior to calculating blur.\n Default:``None``\n\n Returns\n -------\n float\n The estimated fft blur score for the face\n ", "language": "en", "n_whitespaces": 257, "n_words": 101, "vocab_size": 71 }
94
Python
71
32950897376b48e0f08b46385602e4df902cf49e
sort.py
101,185
28
276
estimate_blur_fft
https://github.com/deepfakes/faceswap.git
lib.detected_face.Mask - Add source + target offset and coverage to set_sub_crop method
478
0
20,606
14
13
29
def execute_info(self): roles_path = context.CLIARGS['roles_path'] data = '' for role in context.CLIARGS['args']: role_info = {'path': roles_path} gr = GalaxyRole(self.galaxy, self.lazy_role_api, role) install_info = gr.install_info if install_info: if 'version' in install_info: install_info['installed_version'] = install_info['version'] del install_info['version'] role_info.update(install_info) if not context.CLIARGS['offline']: remote_data = None try: remote_data = self.api.lookup_role_by_name(role, False) except AnsibleError as e: if e.http_code == 400 and 'Bad Request' in e.message: # Role does not exist in Ansible Galaxy data = u"- the role %s was not found" % role break raise AnsibleError("Unable to find info about '%s': %s" % (role, e)) if remote_data: role_info.update(remote_data) elif context.CLIARGS['offline'] and not gr._exists: data = u"- the role %s was not found" % role break if gr.metadata: role_info.update(gr.metadata) req = RoleRequirement() role_spec = req.role_yaml_parse({'role': role}) if role_spec: role_info.update(role_spec) data += self._display_role_info(role_info) self.pager(data)
lib/ansible/cli/galaxy.py
385
ansible
{ "docstring": "\n prints out detailed information about an installed role as well as info available from the galaxy API.\n ", "language": "en", "n_whitespaces": 32, "n_words": 17, "vocab_size": 16 }
128
Python
85
cb2e434dd2359a9fe1c00e75431f4abeff7381e8
galaxy.py
268,620
34
225
execute_info
https://github.com/ansible/ansible.git
ansible-galaxy install - fix unnecessary api check when installing a role from git repo (#79090) * delay server api evaluation until a GalaxyRole needs to make an api call for info, list, and install
617
0
79,567
17
5
3
def unwrap(func, *, stop=None): if stop is None:
python3.10.4/Lib/inspect.py
29
XX-Net
{ "docstring": "Get the object wrapped by *func*.\n\n Follows the chain of :attr:`__wrapped__` attributes returning the last\n object in the chain.\n\n *stop* is an optional callback accepting an object in the wrapper chain\n as its sole argument that allows the unwrapping to be terminated early if\n the callback returns a true value. If the callback never returns a true\n value, the last object in the chain is returned as usual. For example,\n :func:`signature` uses this to stop unwrapping if any object in the\n chain has a ``__signature__`` attribute defined.\n\n :exc:`ValueError` is raised if a cycle is encountered.\n\n ", "language": "en", "n_whitespaces": 116, "n_words": 95, "vocab_size": 58 }
8
Python
8
8198943edd73a363c266633e1aa5b2a9e9c9f526
inspect.py
218,388
15
94
unwrap
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
14
0
55,276
6
2
21
def tick_params(self, axis='both', **kwargs): for ax in self.figure.axes: ax.tick_params(axis=axis, **kwargs) return self _facet_docs = dict( data=dedent(), rowcol=dedent(), rowcol_order=dedent(), col_wrap=dedent(), share_xy=dedent(), height=dedent(), aspect=dedent(), palette=dedent(), legend_out=dedent(), margin_titles=dedent(), facet_kws=dedent(), )
seaborn/axisgrid.py
219
seaborn
{ "docstring": "Modify the ticks, tick labels, and gridlines.\n\n Parameters\n ----------\n axis : {'x', 'y', 'both'}\n The axis on which to apply the formatting.\n kwargs : keyword arguments\n Additional keyword arguments to pass to\n :meth:`matplotlib.axes.Axes.tick_params`.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \\\n data : DataFrame\n Tidy (\"long-form\") dataframe where each column is a variable and each\n row is an observation.\\\n \\\n row, col : vectors or keys in ``data``\n Variables that define subsets to plot on different facets.\\\n \\\n {row,col}_order : vector of strings\n Specify the order in which levels of the ``row`` and/or ``col`` variables\n appear in the grid of subplots.\\\n \\\n col_wrap : int\n \"Wrap\" the column variable at this width, so that the column facets\n span multiple rows. Incompatible with a ``row`` facet.\\\n \\\n share{x,y} : bool, 'col', or 'row' optional\n If true, the facets will share y axes across columns and/or x axes\n across rows.\\\n \\\n height : scalar\n Height (in inches) of each facet. See also: ``aspect``.\\\n \\\n aspect : scalar\n Aspect ratio of each facet, so that ``aspect * height`` gives the width\n of each facet in inches.\\\n \\\n palette : palette name, list, or dict\n Colors to use for the different levels of the ``hue`` variable. Should\n be something that can be interpreted by :func:`color_palette`, or a\n dictionary mapping hue levels to matplotlib colors.\\\n \\\n legend_out : bool\n If ``True``, the figure size will be extended, and the legend will be\n drawn outside the plot on the center right.\\\n \\\n margin_titles : bool\n If ``True``, the titles for the row variable are drawn to the right of\n the last column. This option is experimental and may not work in all\n cases.\\\n \\\n facet_kws : dict\n Additional parameters passed to :class:`FacetGrid`.\n ", "language": "en", "n_whitespaces": 603, "n_words": 290, "vocab_size": 175 }
27
Python
27
72d1322ee583eb481346e5e661c2998c8a7445dd
axisgrid.py
42,118
4
35
tick_params
https://github.com/mwaskom/seaborn.git
Adding Grid.tick_params() method. (#2944) * Adding Grid.tick_params() method. * Address PR comments. * Add What's New entry. * Switch tick_params() test to use pad.
90
0
7,487
10
15
24
def mac_platforms(version=None, arch=None): # type: (Optional[MacVersion], Optional[str]) -> Iterator[str] version_str, _, cpu_arch = platform.mac_ver() # type: ignore if version is None: version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) else: version = version if arch is None: arch = _mac_arch(cpu_arch) else: arch = arch if (10, 0) <= version and version < (11, 0): # Prior to Mac OS 11, each yearly release of Mac OS bumped the # "minor" version number. The major version was always 10. for minor_version in range(version[1], -1, -1): compat_version = 10, minor_version binary_formats = _mac_binary_formats(compat_version, arch) for binary_format in binary_formats: yield "macosx_{major}_{minor}_{binary_format}".format( major=10, minor=minor_version, binary_format=binary_format ) if version >= (11, 0): # Starting with Mac OS 11, each yearly release bumps the major version # number. The minor versions are now the midyear updates. for major_version in range(version[0], 10, -1): compat_version = major_version, 0 binary_formats = _mac_binary_formats(compat_version, arch) for binary_format in binary_formats: yield "macosx_{major}_{minor}_{binary_format}".format( major=major_version, minor=0, binary_format=binary_format ) if version >= (11, 0): # Mac OS 11 on x86_64 is compatible with binaries from previous releases. # Arm64 support was introduced in 11.0, so no Arm binaries from previous # releases exist. # # However, the "universal2" binary format can have a # macOS version earlier than 11.0 when the x86_64 part of the binary supports # that version of macOS. if arch == "x86_64": for minor_version in range(16, 3, -1): compat_version = 10, minor_version binary_formats = _mac_binary_formats(compat_version, arch) for binary_format in binary_formats: yield "macosx_{major}_{minor}_{binary_format}".format( major=compat_version[0], minor=compat_version[1], binary_format=binary_format, ) else: for minor_version in range(16, 3, -1): compat_version = 10, minor_version binary_format = "universal2" yield "macosx_{major}_{minor}_{binary_format}".format( major=compat_version[0], minor=compat_version[1], binary_format=binary_format, ) # From PEP 513, PEP 600
.venv/lib/python3.8/site-packages/pip/_vendor/packaging/tags.py
506
transferlearning
{ "docstring": "\n Yields the platform tags for a macOS system.\n\n The `version` parameter is a two-item tuple specifying the macOS version to\n generate platform tags for. The `arch` parameter is the CPU architecture to\n generate platform tags for. Both parameters default to the appropriate value\n for the current system.\n ", "language": "en", "n_whitespaces": 66, "n_words": 47, "vocab_size": 28 }
268
Python
129
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
tags.py
62,909
46
319
mac_platforms
https://github.com/jindongwang/transferlearning.git
upd; format
889
0
13,068
18
2
13
def test_asarray_with_order(is_array_api): if is_array_api: xp = pytest.importorskip("numpy.array_api") else: xp = numpy X = xp.asarray([1.2, 3.4, 5.1]) X_new = _asarray_with_order(X, order="F") X_new_np = numpy.asarray(X_new) assert X_new_np.flags["F_CONTIGUOUS"]
sklearn/utils/tests/test_array_api.py
104
scikit-learn
{ "docstring": "Test _asarray_with_order passes along order for NumPy arrays.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
25
Python
20
2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b
test_array_api.py
261,040
9
67
test_asarray_with_order
https://github.com/scikit-learn/scikit-learn.git
ENH Adds Array API support to LinearDiscriminantAnalysis (#22554) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
60
0
76,641
11
8
8
def _total_channels(self) -> int: channels = 3 if self._config["mask_type"] and (self._config["learn_mask"] or self._config["penalized_mask_loss"]): channels += 1 mults = [area for area in ["eye", "mouth"] if int(self._config[f"{area}_multiplier"]) > 1] if self._config["penalized_mask_loss"] and mults: channels += len(mults) return channels
lib/training/generator.py
143
faceswap
{ "docstring": "int: The total number of channels, including mask channels that the target image\n should hold. ", "language": "en", "n_whitespaces": 22, "n_words": 15, "vocab_size": 15 }
37
Python
29
2beceffad9b15c1fd78f06b9b272563321c5a41e
generator.py
101,287
11
82
_total_channels
https://github.com/deepfakes/faceswap.git
Data Augmentation update (#1263) - lib.detected_face - Subclass Masks for Landmark based masks - Add training mask propery + methods to DetectedFace - lib.training_training - subclass TrainingDataGenerator for training and preview data - Split cache into own module - Reduce thread count to 1 to prevent image corruption + data re-use - Process on largest model input/output size rather than stored image size - Size and crop masks during caching stage - Implement ring buffer for data flow - Fix preview reload bug - augmentation - typing - switch color aug order - better initialization - Fix warp + landmark warp to correctly apply at different image scales - Slightly improved warp caching - Don't store whether image is_preview. Handle all data as training images implicitly - plugins.trainer: Typing and fixes to work with trainingdata refactor
142
0
20,706
14
1
20
def start(self, workflow_state, user=None): task_state = self.get_task_state_class()(workflow_state=workflow_state) task_state.status = TaskState.STATUS_IN_PROGRESS task_state.page_revision = workflow_state.page.get_latest_revision() task_state.task = self task_state.save() task_submitted.send( sender=task_state.specific.__class__, instance=task_state.specific, user=user, ) return task_state
wagtail/core/models/__init__.py
122
wagtail
{ "docstring": "Start this task on the provided workflow state by creating an instance of TaskState", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
24
Python
20
d10f15e55806c6944827d801cd9c2d53f5da4186
__init__.py
73,775
12
77
start
https://github.com/wagtail/wagtail.git
Reformat with black
120
0
16,102
10
2
18
def setup_persistent_compute_target(workspace, cluster_name, vm_size, max_nodes): # setting vmsize and num nodes creates a persistent AzureML # compute resource logger.debug("setup: cluster_name {}".format(cluster_name)) # https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets try: cpu_cluster = ComputeTarget(workspace=workspace, name=cluster_name) logger.debug("setup: Found existing cluster, use it.") except ComputeTargetException: logger.debug("setup: create cluster") compute_config = AmlCompute.provisioning_configuration( vm_size=vm_size, max_nodes=max_nodes ) cpu_cluster = ComputeTarget.create(workspace, cluster_name, compute_config) cpu_cluster.wait_for_completion(show_output=True) return cpu_cluster
tests/ci/aml_tests_old/submit_azureml_pytest.py
147
recommenders
{ "docstring": "\n Set up a persistent compute target on AzureML.\n A persistent compute target runs noticeably faster than a\n regular compute target for subsequent runs. The benefit\n is that AzureML manages turning the compute on/off as needed for\n each job so the user does not need to do this.\n\n Args:\n workspace (str): Centralized location on Azure to work with\n all the\n artifacts used by AzureML service\n cluster_name (str): the Azure cluster for this run. It can\n already exist or it will be created.\n vm_size (str): Azure VM size, like STANDARD_D3_V2\n max_nodes (int): Number of VMs, max_nodes=4 will\n autoscale up to 4 VMs\n Returns:\n cpu_cluster : cluster reference\n ", "language": "en", "n_whitespaces": 286, "n_words": 105, "vocab_size": 82 }
53
Python
44
f1b06e2f758b5b4a965f7bf428d006621d19c0b0
submit_azureml_pytest.py
39,217
13
88
setup_persistent_compute_target
https://github.com/microsoft/recommenders.git
changed folder structure for aml tests
133
0
7,139
12
10
68
def upgrade(): op.drop_table('ai_table') op.create_table( 'analysis', sa.Column('id', sa.Integer(), nullable=False), sa.Column('analysis', mindsdb.interfaces.storage.db.Json(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id') ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.add_column(sa.Column('analysis_id', sa.Integer(), nullable=True)) batch_op.create_foreign_key('fk_analysis_id', 'analysis', ['analysis_id'], ['id']) batch_op.add_column(sa.Column('ds_class', sa.String(), nullable=True)) conn = op.get_bind() session = sa.orm.Session(bind=conn) dsatasources = conn.execute('select id, analysis from datasource').fetchall() for row in dsatasources: if row['analysis'] is not None: # NOTE 'returning' is relatively new in sqlite, so better will be use select after insert. conn.execute( text(), { 'id': row['id'] } ) analysis_id = conn.execute(text()).fetchall() conn.execute( text(), { 'analysis_id': analysis_id[0][0], 'id': row['id'] } ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.drop_column('analysis') op.create_table( 'file', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.Column('source_file_path', sa.String(), nullable=False), sa.Column('file_path', sa.String(), nullable=False), sa.Column('row_count', sa.Integer(), nullable=False), sa.Column('columns', mindsdb.interfaces.storage.db.Json(), nullable=False), # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ????? # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now() # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now), # ????? # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now, server_onupdate=datetime.datetime.now), # ????? erver_default=func.now() sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ????? sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now() sa.Column('analysis_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['analysis_id'], ['analysis.id'], name='fk_analysis_id'), sa.PrimaryKeyConstraint('id') ) # delete ds where data is none dsatasources = conn.execute(text('select * from datasource')).fetchall() for ds in dsatasources: if ds['data'] is None: conn.execute(text('delete from datasource where id = :id'), {'id': ds['id']}) continue ds_data = json.loads(ds['data']) creation_info = json.loads(ds['creation_info']) datasource_name = ds_data.get('source_type') if datasource_name == 'file': created_at = None if isinstance(ds['created_at'], str): created_at = datetime.datetime.fromisoformat(ds['created_at']) elif isinstance(ds['created_at'], [float, int]): created_at = datetime.fromtimestamp(ds['created_at']) updated_at = None if isinstance(ds['updated_at'], str): updated_at = datetime.datetime.fromisoformat(ds['updated_at']) elif isinstance(ds['updated_at'], [float, int]): updated_at = datetime.fromtimestamp(ds['updated_at']) file = mindsdb.interfaces.storage.db.File( name=ds['name'], company_id=ds['company_id'], source_file_path=ds_data['source'], file_path=creation_info['args'][0], row_count=ds_data['row_count'], columns=ds_data['columns'], created_at=created_at, updated_at=updated_at, analysis_id=ds['analysis_id'] ) session.add(file) conn.execute( text(), { 'datasource_name': datasource_name, 'company_id': ds['company_id'], 'ds_class': creation_info['class'], 'id': ds['id'] } ) session.commit() op.rename_table('datasource', 'dataset') op.rename_table('integration', 'datasource') with op.batch_alter_table('dataset', schema=None) as batch_op: batch_op.alter_column('integration_id', new_column_name='datasource_id') batch_op.create_foreign_key('fk_datasource_id', 'datasource', ['datasource_id'], ['id']) # NOTE two different 'batch' is necessary, in other way FK is not creating with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.alter_column('datasource_id', new_column_name='dataset_id') with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.create_foreign_key('fk_dataset_id', 'dataset', ['dataset_id'], ['id'])
mindsdb/migrations/versions/2022-02-09_27c5aca9e47e_test.py
1,602
mindsdb
{ "docstring": "\n insert into analysis (analysis) select analysis from datasource where id = :id;\n \n select id from analysis order by id desc limit 1;\n \n update datasource set analysis_id = :analysis_id where id = :id\n \n update datasource\n set integration_id = (select id from integration where name = :datasource_name and company_id = :company_id),\n ds_class = :ds_class\n where id = :id\n ", "language": "en", "n_whitespaces": 229, "n_words": 56, "vocab_size": 31 }
323
Python
174
e8740eecac16c34cba133ba37939831bb66deea7
2022-02-09_27c5aca9e47e_test.py
114,080
108
948
upgrade
https://github.com/mindsdb/mindsdb.git
changes from parent branch
1,245
0
25,088
17
3
12
def _verbose_message(message, *args, verbosity=1): if sys.flags.verbose >= verbosity: if not message.startswith(('#', 'import ')): message = '# ' + message print(message.format(*args), file=sys.stderr)
python3.10.4/Lib/importlib/_bootstrap.py
95
XX-Net
{ "docstring": "Print the message to stderr if -v/PYTHONVERBOSE is turned on.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
21
Python
19
8198943edd73a363c266633e1aa5b2a9e9c9f526
_bootstrap.py
218,044
5
56
_verbose_message
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
52
0
55,095
12
1
5
def readinto(self, b): self._check_can_read() return self._buffer.readinto(b)
python3.10.4/Lib/bz2.py
38
XX-Net
{ "docstring": "Read bytes into b.\n\n Returns the number of bytes read (0 for EOF).\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 12 }
6
Python
6
8198943edd73a363c266633e1aa5b2a9e9c9f526
bz2.py
221,182
3
22
readinto
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
27
0
56,252
8
8
21
def check_send_to_kindle(entry): formats = list() book_formats = list() if len(entry.data): for ele in iter(entry.data): if ele.uncompressed_size < config.mail_size: formats.append(ele.format) if 'MOBI' in formats: book_formats.append({'format': 'Mobi', 'convert': 0, 'text': _('Send %(format)s to Kindle', format='Mobi')}) if 'PDF' in formats: book_formats.append({'format': 'Pdf', 'convert': 0, 'text': _('Send %(format)s to Kindle', format='Pdf')}) if 'AZW' in formats: book_formats.append({'format': 'Azw', 'convert': 0, 'text': _('Send %(format)s to Kindle', format='Azw')}) if config.config_converterpath: book_formats.extend(check_send_to_kindle_with_converter(formats)) return book_formats else: log.error(u'Cannot find book entry %d', entry.id) return None # Check if a reader is existing for any of the book formats, if not, return empty list, otherwise return # list with supported formats
cps/helper.py
312
calibre-web
{ "docstring": "\n returns all available book formats for sending to Kindle\n ", "language": "en", "n_whitespaces": 20, "n_words": 9, "vocab_size": 9 }
100
Python
62
2e007a160e652b2e7bbdeb5a8319560188324502
helper.py
173,231
25
172
check_send_to_kindle
https://github.com/janeczku/calibre-web.git
reenable startup logging Bugfixes from refactoring and merge
431
0
40,831
17
3
26
def get_actual_details(name, filters): budget_against = frappe.scrub(filters.get("budget_against")) cond = "" if filters.get("budget_against") == "Cost Center": cc_lft, cc_rgt = frappe.db.get_value("Cost Center", name, ["lft", "rgt"]) cond = .format( lft=cc_lft, rgt=cc_rgt ) ac_details = frappe.db.sql( .format( tab=filters.budget_against, budget_against=budget_against, cond=cond ), (filters.from_fiscal_year, filters.to_fiscal_year, name), as_dict=1, ) cc_actual_details = {} for d in ac_details: cc_actual_details.setdefault(d.account, []).append(d) return cc_actual_details
erpnext/accounts/report/budget_variance_report/budget_variance_report.py
223
erpnext
{ "docstring": "\n\t\t\t\tand lft >= \"{lft}\"\n\t\t\t\tand rgt <= \"{rgt}\"\n\t\t\t\n\t\t\tselect\n\t\t\t\tgl.account,\n\t\t\t\tgl.debit,\n\t\t\t\tgl.credit,\n\t\t\t\tgl.fiscal_year,\n\t\t\t\tMONTHNAME(gl.posting_date) as month_name,\n\t\t\t\tb.{budget_against} as budget_against\n\t\t\tfrom\n\t\t\t\t`tabGL Entry` gl,\n\t\t\t\t`tabBudget Account` ba,\n\t\t\t\t`tabBudget` b\n\t\t\twhere\n\t\t\t\tb.name = ba.parent\n\t\t\t\tand b.docstatus = 1\n\t\t\t\tand ba.account=gl.account\n\t\t\t\tand b.{budget_against} = gl.{budget_against}\n\t\t\t\tand gl.fiscal_year between %s and %s\n\t\t\t\tand b.{budget_against} = %s\n\t\t\t\tand exists(\n\t\t\t\t\tselect\n\t\t\t\t\t\tname\n\t\t\t\t\tfrom\n\t\t\t\t\t\t`tab{tab}`\n\t\t\t\t\twhere\n\t\t\t\t\t\tname = gl.{budget_against}\n\t\t\t\t\t\t{cond}\n\t\t\t\t)\n\t\t\t\tgroup by\n\t\t\t\t\tgl.name\n\t\t\t\torder by gl.fiscal_year\n\t\t", "language": "en", "n_whitespaces": 38, "n_words": 70, "vocab_size": 46 }
52
Python
43
494bd9ef78313436f0424b918f200dab8fc7c20b
budget_variance_report.py
65,173
53
138
get_actual_details
https://github.com/frappe/erpnext.git
style: format code with black
33
0
13,815
12
4
7
def broadcast_apply(cls, axis, apply_func, left, right, other_name="right"):
modin/core/dataframe/pandas/partitioning/partition_manager.py
28
modin
{ "docstring": "\n Broadcast the `right` partitions to `left` and apply `apply_func` function.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to apply and broadcast over.\n apply_func : callable\n Function to apply.\n left : np.ndarray\n NumPy array of left partitions.\n right : np.ndarray\n NumPy array of right partitions.\n other_name : str, default: \"right\"\n Name of key-value argument for `apply_func` that\n is used to pass `right` to `apply_func`.\n\n Returns\n -------\n np.ndarray\n NumPy array of result partition objects.\n\n Notes\n -----\n This will often be overridden by implementations. It materializes the\n entire partitions of the right and applies them to the left through `apply`.\n ", "language": "en", "n_whitespaces": 287, "n_words": 97, "vocab_size": 64 }
7
Python
7
e4ef652ead6e3fd4bf97deff992fb9065eab4b44
partition_manager.py
155,506
20
98
broadcast_apply
https://github.com/modin-project/modin.git
REFACTOR-#5459: Install code linters through conda and unpin flake8 (#5450) Co-authored-by: Vasily Litvinov <[email protected]> Signed-off-by: Anatoly Myachev <[email protected]>
14
0
36,414
6
1
6
def writelines(self, list_of_data): data = b''.join(list_of_data) self.write(data)
python3.10.4/Lib/asyncio/transports.py
40
XX-Net
{ "docstring": "Write a list (or any iterable) of data bytes to the transport.\n\n The default implementation concatenates the arguments and\n calls write() on the result.\n ", "language": "en", "n_whitespaces": 45, "n_words": 24, "vocab_size": 22 }
7
Python
7
8198943edd73a363c266633e1aa5b2a9e9c9f526
transports.py
220,852
3
23
writelines
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
28
0
56,155
9
11
34
def make_zipfile(base_name, base_dir, verbose=0, dry_run=0): zip_filename = base_name + ".zip" mkpath(os.path.dirname(zip_filename), dry_run=dry_run) # If zipfile module is not available, try spawning an external # 'zip' command. if zipfile is None: if verbose: zipoptions = "-r" else: zipoptions = "-rq" try: spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed". raise DistutilsExecError(("unable to create zip file '%s': " "could neither import the 'zipfile' module nor " "find a standalone zip utility") % zip_filename) else: log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) if not dry_run: try: zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) except RuntimeError: zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_STORED) with zip: if base_dir != os.curdir: path = os.path.normpath(os.path.join(base_dir, '')) zip.write(path, path) log.info("adding '%s'", path) for dirpath, dirnames, filenames in os.walk(base_dir): for name in dirnames: path = os.path.normpath(os.path.join(dirpath, name, '')) zip.write(path, path) log.info("adding '%s'", path) for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): zip.write(path, path) log.info("adding '%s'", path) return zip_filename ARCHIVE_FORMATS = { 'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), 'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), 'xztar': (make_tarball, [('compress', 'xz')], "xz'ed tar-file"), 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"), 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"), 'zip': (make_zipfile, [],"ZIP file") }
python3.10.4/Lib/distutils/archive_util.py
638
XX-Net
{ "docstring": "Create a zip file from all the files under 'base_dir'.\n\n The output zip file will be named 'base_name' + \".zip\". Uses either the\n \"zipfile\" Python module (if available) or the InfoZIP \"zip\" utility\n (if installed and found on the default search path). If neither tool is\n available, raises DistutilsExecError. Returns the name of the output zip\n file.\n ", "language": "en", "n_whitespaces": 78, "n_words": 57, "vocab_size": 47 }
203
Python
134
8198943edd73a363c266633e1aa5b2a9e9c9f526
archive_util.py
222,552
41
290
make_zipfile
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
860
0
56,631
22
1
8
def project_state(self, nodes=None, at_end=True): return self.graph.make_state( nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps )
django/db/migrations/loader.py
53
django
{ "docstring": "\n Return a ProjectState object representing the most recent state\n that the loaded migrations represent.\n\n See graph.make_state() for the meaning of \"nodes\" and \"at_end\".\n ", "language": "en", "n_whitespaces": 52, "n_words": 23, "vocab_size": 21 }
10
Python
10
9c19aff7c7561e3a82978a272ecdaad40dda5c00
loader.py
205,304
4
35
project_state
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
42
0
51,080
9
10
20
def provide_database_interface() -> OrionDBInterface: connection_url = PREFECT_ORION_DATABASE_CONNECTION_URL.value() database_config = MODELS_DEPENDENCIES.get("database_config") query_components = MODELS_DEPENDENCIES.get("query_components") orm = MODELS_DEPENDENCIES.get("orm") dialect = get_dialect(connection_url) if database_config is None: if dialect.name == "postgresql": database_config = AsyncPostgresConfiguration(connection_url=connection_url) elif dialect.name == "sqlite": database_config = AioSqliteConfiguration(connection_url=connection_url) else: raise ValueError( f"Unable to infer database configuration from provided dialect. Got dialect name {dialect.name!r}" ) MODELS_DEPENDENCIES["database_config"] = database_config if query_components is None: if dialect.name == "postgresql": query_components = AsyncPostgresQueryComponents() elif dialect.name == "sqlite": query_components = AioSqliteQueryComponents() else: raise ValueError( f"Unable to infer query components from provided dialect. Got dialect name {dialect.name!r}" ) MODELS_DEPENDENCIES["query_components"] = query_components if orm is None: if dialect.name == "postgresql": orm = AsyncPostgresORMConfiguration() elif dialect.name == "sqlite": orm = AioSqliteORMConfiguration() else: raise ValueError( f"Unable to infer orm configuration from provided dialect. Got dialect name {dialect.name!r}" ) MODELS_DEPENDENCIES["orm"] = orm return OrionDBInterface( database_config=database_config, query_components=query_components, orm=orm, )
src/prefect/orion/database/dependencies.py
367
prefect
{ "docstring": "\n Get the current orion database interface.\n\n If components of the interface are not set, defaults will be inferred\n based on the dialect of the connection url.\n ", "language": "en", "n_whitespaces": 39, "n_words": 26, "vocab_size": 22 }
136
Python
54
110742d1fee98e793ccdbf47a0a55eeaf70e81e0
dependencies.py
54,644
47
195
provide_database_interface
https://github.com/PrefectHQ/prefect.git
Add temporary_database_interface
451
0
11,117
15
1
10
def test_image_inside_link(self): # https://github.com/wagtail/wagtail/issues/4602 - ensure that an <embed> inside # a link is handled. This is not valid in Draftail as images are block-level, # but should be handled without errors, splitting the image into its own block converter = ContentstateConverter(features=['image', 'link']) result = json.loads(converter.from_database_format( )) self.assertContentStateEqual(result, { 'blocks': [ {'key': '00000', 'inlineStyleRanges': [], 'entityRanges': [{'key': 0, 'offset': 0, 'length': 6}], 'depth': 0, 'text': 'before', 'type': 'unstyled'}, {'key': '00000', 'inlineStyleRanges': [], 'entityRanges': [{'key': 1, 'offset': 0, 'length': 1}], 'depth': 0, 'text': ' ', 'type': 'atomic'}, {'key': '00000', 'inlineStyleRanges': [], 'entityRanges': [{'key': 0, 'offset': 0, 'length': 5}], 'depth': 0, 'text': 'after', 'type': 'unstyled'}, {'key': '00000', 'inlineStyleRanges': [], 'entityRanges': [{'key': 2, 'offset': 0, 'length': 0}], 'depth': 0, 'text': '', 'type': 'unstyled'}, {'key': '00000', 'inlineStyleRanges': [], 'entityRanges': [{'key': 3, 'offset': 0, 'length': 1}], 'depth': 0, 'text': ' ', 'type': 'atomic'}, {'key': '00000', 'inlineStyleRanges': [], 'entityRanges': [{'key': 2, 'offset': 0, 'length': 0}], 'depth': 0, 'text': '', 'type': 'unstyled'}, ], 'entityMap': { '0': {'mutability': 'MUTABLE', 'type': 'LINK', 'data': {'url': 'https://wagtail.org'}}, '1': { 'data': {'format': 'left', 'alt': 'an image', 'id': '1', 'src': '/media/not-found'}, 'mutability': 'IMMUTABLE', 'type': 'IMAGE' }, '2': {'mutability': 'MUTABLE', 'type': 'LINK', 'data': {'url': 'https://wagtail.org'}}, '3': { 'data': {'format': 'left', 'alt': 'an image', 'id': '1', 'src': '/media/not-found'}, 'mutability': 'IMMUTABLE', 'type': 'IMAGE' }, } })
wagtail/admin/tests/test_contentstate.py
782
wagtail
{ "docstring": "\n <p><a href=\"https://wagtail.org\">before <embed embedtype=\"image\" alt=\"an image\" id=\"1\" format=\"left\" /> after</a></p>\n <p><a href=\"https://wagtail.org\"><embed embedtype=\"image\" alt=\"an image\" id=\"1\" format=\"left\" /></a></p>\n ", "language": "en", "n_whitespaces": 52, "n_words": 18, "vocab_size": 12 }
210
Python
100
0a9b23979bbc55c0a95ff357ee589dae5363dc18
test_contentstate.py
70,567
30
398
test_image_inside_link
https://github.com/wagtail/wagtail.git
Update links to wagtail.io website to point to wagtail.org This covers only links to the website, not other sites
577
0
15,525
16
1
8
def unstack(self, level=-1, fill_value=None) -> DataFrame: from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value) # ---------------------------------------------------------------------- # function application
pandas/core/series.py
55
pandas
{ "docstring": "\n Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.\n\n Parameters\n ----------\n level : int, str, or list of these, default last level\n Level(s) to unstack, can pass level name.\n fill_value : scalar value, default None\n Value to use when replacing NaN values.\n\n Returns\n -------\n DataFrame\n Unstacked Series.\n\n Notes\n -----\n Reference :ref:`the user guide <reshaping.stacking>` for more examples.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4],\n ... index=pd.MultiIndex.from_product([['one', 'two'],\n ... ['a', 'b']]))\n >>> s\n one a 1\n b 2\n two a 3\n b 4\n dtype: int64\n\n >>> s.unstack(level=-1)\n a b\n one 1 2\n two 3 4\n\n >>> s.unstack(level=0)\n one two\n a 1 3\n b 2 4\n ", "language": "en", "n_whitespaces": 471, "n_words": 108, "vocab_size": 79 }
19
Python
17
6294d8490162442f9e73186f38b5545e5f22f7cb
series.py
163,809
44
36
unstack
https://github.com/pandas-dev/pandas.git
DOC: Improve reshaping.rst (#45612)
46
0
39,502
7
1
10
def sync_to_async_iter(iter): loop = asyncio.get_event_loop() q = asyncio.Queue(1) exception = None _END = object()
freqtrade/misc.py
52
freqtrade
{ "docstring": "\n Wrap blocking iterator into an asynchronous by\n offloading computation to thread and using\n pubsub pattern for yielding results\n\n :param iter: A synchronous iterator\n :returns: An asynchronous iterator\n ", "language": "en", "n_whitespaces": 46, "n_words": 27, "vocab_size": 24 }
14
Python
11
f268187e9b357127151ae45704538aed6c89f7f5
misc.py
151,752
9
50
sync_to_async_iter
https://github.com/freqtrade/freqtrade.git
offload initial df computation to thread
29
0
35,131
8
3
6
def is_right(self): s = self.sides return Segment.is_perpendicular(s[0], s[1]) or \ Segment.is_perpendicular(s[1], s[2]) or \ Segment.is_perpendicular(s[0], s[2])
sympy/geometry/polygon.py
84
sympy
{ "docstring": "Is the triangle right-angled.\n\n Returns\n =======\n\n is_right : boolean\n\n See Also\n ========\n\n sympy.geometry.line.LinearEntity.is_perpendicular\n is_equilateral, is_isosceles, is_scalene\n\n Examples\n ========\n\n >>> from sympy import Triangle, Point\n >>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))\n >>> t1.is_right()\n True\n\n ", "language": "en", "n_whitespaces": 134, "n_words": 36, "vocab_size": 31 }
16
Python
12
498015021131af4dbb07eb110e5badaba8250c7b
polygon.py
196,288
5
58
is_right
https://github.com/sympy/sympy.git
Updated import locations
59
0
47,788
10
2
6
def killable(self): return not (self.error and self.error.msg == Error.KILLED_MESSAGE)
mitmproxy/flow.py
39
mitmproxy
{ "docstring": "*Read-only:* `True` if this flow can be killed, `False` otherwise.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
9
Python
9
fd43ca19c4a34915bdbfb9c127716fb5a63156e1
flow.py
250,578
2
23
killable
https://github.com/mitmproxy/mitmproxy.git
Flow.kill: don't depend on reply status. In principle, a flow is killable as long as the connection handler is still checking the error status of the flow. This is patch 2/4 of the reply-ectomy.
23
0
73,510
11
4
13
def _check_valid_data(self) -> bool: logger.debug("Validating data. %s", {key: len(val) for key, val in self._display_data.stats.items()}) if any(len(val) == 0 # pylint:disable=len-as-condition for val in self._display_data.stats.values()): return False return True
lib/gui/popup_session.py
105
faceswap
{ "docstring": " Check that the selections holds valid data to display\n NB: len-as-condition is used as data could be a list or a numpy array\n\n Returns\n -------\n bool\n ``True` if there is data to be displayed, otherwise ``False``\n ", "language": "en", "n_whitespaces": 87, "n_words": 36, "vocab_size": 30 }
28
Python
24
afec52309326304f4323029039e49bfcf928ef43
popup_session.py
100,727
15
64
_check_valid_data
https://github.com/deepfakes/faceswap.git
Bugfixes: - Stats graph - Handle NaNs in data - logger - de-elevate matplotlib font messages
102
0
20,182
13
5
9
def promote_types(a, b): # Note: we deliberately avoid `if a in _weak_types` here because we want to check # object identity, not object equality, due to the behavior of np.dtype.__eq__ a = a if any(a is t for t in _weak_types) else np.dtype(a) b = b if any(b is t for t in _weak_types) else np.dtype(b) return np.dtype(_least_upper_bound(a, b))
jax/_src/dtypes.py
97
jax
{ "docstring": "Returns the type to which a binary operation should cast its arguments.\n\n For details of JAX's type promotion semantics, see :ref:`type-promotion`.\n\n Args:\n a: a :class:`numpy.dtype` or a dtype specifier.\n b: a :class:`numpy.dtype` or a dtype specifier.\n\n Returns:\n A :class:`numpy.dtype` object.\n ", "language": "en", "n_whitespaces": 53, "n_words": 40, "vocab_size": 30 }
59
Python
41
2588c98586400a9a457670eabeba67085528e95f
dtypes.py
120,237
4
62
promote_types
https://github.com/google/jax.git
Add comment explaining implementation in promote_types
65
0
26,803
10
7
19
def _get_mask_channels(self) -> List[int]: eye_multiplier = self._config["eye_multiplier"] mouth_multiplier = self._config["mouth_multiplier"] if not self._config["penalized_mask_loss"] and (eye_multiplier > 1 or mouth_multiplier > 1): logger.warning("You have selected eye/mouth loss multipliers greater than 1x, but " "Penalized Mask Loss is disabled. Disabling all multipliers.") eye_multiplier = 1 mouth_multiplier = 1 uses_masks = (self._config["penalized_mask_loss"], eye_multiplier > 1, mouth_multiplier > 1) mask_channels = [-1 for _ in range(len(uses_masks))] current_channel = 3 for idx, mask_required in enumerate(uses_masks): if mask_required: mask_channels[idx] = current_channel current_channel += 1 logger.debug("uses_masks: %s, mask_channels: %s", uses_masks, mask_channels) return mask_channels
plugins/train/model/_base/settings.py
215
faceswap
{ "docstring": " Obtain the channels from the face targets that the masks reside in from the training\n data generator.\n\n Returns\n -------\n list:\n A list of channel indices that contain the mask for the corresponding config item\n ", "language": "en", "n_whitespaces": 81, "n_words": 34, "vocab_size": 27 }
86
Python
62
ff6b0209dd5ad57b81b0aca570df7f39a7119bfb
settings.py
100,848
28
130
_get_mask_channels
https://github.com/deepfakes/faceswap.git
Refactoring and TravisCI to Github Actions (#1239) * refactor training * travis to actions
353
0
20,299
12
1
4
def testDynamicScalingForegroundLauncher(self): self.helperDynamicScaling(foreground_node_launcher=True)
python/ray/tests/test_autoscaler.py
26
ray
{ "docstring": "Test autoscaling with node launcher in the foreground.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
3
Python
3
c51b0c9a5664e5c6df3d92f9093b56e61b48f514
test_autoscaler.py
136,563
2
14
testDynamicScalingForegroundLauncher
https://github.com/ray-project/ray.git
[autoscaler][kuberay] Batching node provider (#29933) Implements the abstract subclass of NodeProvider proposed in https://docs.google.com/document/d/1JyQINBFirZw7YenA_14zize0R3hIII1_fnfQytIXTPo/ The goal is to simplify the autoscaler's interactions with external cluster managers like the KubeRay operator. A follow-up PR will implement KuberayNodeProvider as a subclass of the BatchingNodeProvider added here. Signed-off-by: Dmitri Gekhtman <[email protected]>
17
0
30,943
8
1
14
async def test_switch_change_lock_state(hass, utcnow): helper = await setup_test_component(hass, create_lock_service) await hass.services.async_call( "lock", "lock", {"entity_id": "lock.testdevice"}, blocking=True ) helper.async_assert_service_values( ServicesTypes.LOCK_MECHANISM, { CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 1, }, ) await hass.services.async_call( "lock", "unlock", {"entity_id": "lock.testdevice"}, blocking=True ) helper.async_assert_service_values( ServicesTypes.LOCK_MECHANISM, { CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 0, }, )
tests/components/homekit_controller/test_lock.py
158
core
{ "docstring": "Test that we can turn a HomeKit lock on and off again.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
39
Python
23
58b8c30221a6f6e5acbbe98b7e3298b03fb741f5
test_lock.py
311,520
20
95
test_switch_change_lock_state
https://github.com/home-assistant/core.git
Improve homekit_controller tests (#65266)
147
0
110,185
11
2
22
def test_edit_get_locked_by_self(self): cases = [ (["change", "unlock"]), (["change"]), # Can unlock even without unlock permission ] for permissions in cases: with self.subTest( "User can edit and unlock an object they have locked", permissions=permissions, ): # Lock the snippet self.lock_snippet(self.user) # Use the specified permissions self.set_permissions(permissions) # Get the edit page response = self.client.get(self.get_url("edit")) html = response.content.decode() unlock_url = self.get_url("unlock") # Should show lock message self.assertContains( response, "<b>'I&#x27;m a lockable snippet!' was locked</b> by <b>you</b> on", ) # Should show Save action menu item self.assertContains( response, f"<em>{self.save_button_label}</em>", html=True, ) # Should not show Locked action menu item self.assertTagInHTML( '<button type="submit" disabled>Locked</button>', html, count=0, allow_extra_attrs=True, ) # Should show lock information in the side panel self.assertContains( response, ( f"You can edit this {self.model_name}, but others may not. " "Unlock it to allow others to edit." ), ) # Should show unlock buttons, one in the message and one in the side panel self.assertTagInHTML( f'<button type="button" data-url="{unlock_url}" data-action-lock-unlock>Unlock</button>', html, count=2, allow_extra_attrs=True, )
wagtail/snippets/tests/test_locking.py
287
wagtail
{ "docstring": "A user can edit and unlock a snippet that is locked by themselves.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
159
Python
103
10dbbddaf35607e4257f50dd960520a1268dd225
test_locking.py
80,225
43
159
test_edit_get_locked_by_self
https://github.com/wagtail/wagtail.git
Add tests for locking snippets
957
0
17,034
16
11
39
def linear_eq_to_matrix(equations, *symbols): r if not symbols: raise ValueError(filldedent()) if hasattr(symbols[0], '__iter__'): symbols = symbols[0] if has_dups(symbols): raise ValueError('Symbols must be unique') equations = sympify(equations) if isinstance(equations, MatrixBase): equations = list(equations) elif isinstance(equations, (Expr, Eq)): equations = [equations] elif not is_sequence(equations): raise ValueError(filldedent()) # construct the dictionaries try: eq, c = _linear_eq_to_dict(equations, symbols) except PolyNonlinearError as err: raise NonlinearError(str(err)) # prepare output matrices n, m = shape = len(eq), len(symbols) ix = dict(zip(symbols, range(m))) dat = {(row, ix[k]): d[k] for row, d in enumerate(eq) for k in d} rhs = [-i for i in c] del c A = SparseMatrix(*shape, dat) b = SparseMatrix(n, 1, rhs) return A, b
sympy/solvers/solveset.py
353
sympy
{ "docstring": "\n Converts a given System of Equations into Matrix form.\n Here `equations` must be a linear system of equations in\n `symbols`. Element ``M[i, j]`` corresponds to the coefficient\n of the jth symbol in the ith equation.\n\n The Matrix form corresponds to the augmented matrix form.\n For example:\n\n .. math:: 4x + 2y + 3z = 1\n .. math:: 3x + y + z = -6\n .. math:: 2x + 4y + 9z = 2\n\n This system will return $A$ and $b$ as:\n\n $$ A = \\left[\\begin{array}{ccc}\n 4 & 2 & 3 \\\\\n 3 & 1 & 1 \\\\\n 2 & 4 & 9\n \\end{array}\\right] \\ \\ b = \\left[\\begin{array}{c}\n 1 \\\\ -6 \\\\ 2\n \\end{array}\\right] $$\n\n The only simplification performed is to convert\n ``Eq(a, b)`` $\\Rightarrow a - b$.\n\n Raises\n ======\n\n NonlinearError\n The equations contain a nonlinear term.\n ValueError\n The symbols are not given or are not unique.\n\n Examples\n ========\n\n >>> from sympy import linear_eq_to_matrix, symbols\n >>> c, x, y, z = symbols('c, x, y, z')\n\n The coefficients (numerical or symbolic) of the symbols will\n be returned as matrices:\n\n >>> eqns = [c*x + z - 1 - c, y + z, x - y]\n >>> A, b = linear_eq_to_matrix(eqns, [x, y, z])\n >>> A\n Matrix([\n [c, 0, 1],\n [0, 1, 1],\n [1, -1, 0]])\n >>> b\n Matrix([\n [c + 1],\n [ 0],\n [ 0]])\n\n This routine does not simplify expressions and will raise an error\n if nonlinearity is encountered:\n\n >>> eqns = [\n ... (x**2 - 3*x)/(x - 3) - 3,\n ... y**2 - 3*y - y*(y - 4) + x - 4]\n >>> linear_eq_to_matrix(eqns, [x, y])\n Traceback (most recent call last):\n ...\n NonlinearError:\n symbol-dependent term can be ignored using `strict=False`\n\n Simplifying these equations will discard the removable singularity\n in the first and reveal the linear structure of the second:\n\n >>> [e.simplify() for e in eqns]\n [x - 3, x + y - 4]\n\n Any such simplification needed to eliminate nonlinear terms must\n be done *before* calling this routine.\n \n Symbols must be given, for which coefficients\n are to be found.\n \n Equation(s) must be given as a sequence, Expr,\n Eq or Matrix.\n ", "language": "en", "n_whitespaces": 798, "n_words": 351, "vocab_size": 197 }
109
Python
79
e0aaa724190c49f2725bb7880eddd13ce4fef4b7
solveset.py
199,158
109
221
linear_eq_to_matrix
https://github.com/sympy/sympy.git
more efficient coefficient extraction
224
0
49,172
13
4
25
def _make_attention_mask(self) -> None: # Make masks for shift case if any(self.shift_size): # calculate attention mask for SW-MSA H, W = self.feat_size img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 cnt = 0 for h in ( slice(0, -self.window_size[0]), slice(-self.window_size[0], -self.shift_size[0]), slice(-self.shift_size[0], None)): for w in ( slice(0, -self.window_size[1]), slice(-self.window_size[1], -self.shift_size[1]), slice(-self.shift_size[1], None)): img_mask[:, h, w, :] = cnt cnt += 1 mask_windows = window_partition(img_mask, self.window_size) # num_windows, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_area) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None self.register_buffer("attn_mask", attn_mask, persistent=False)
timm/models/swin_transformer_v2_cr.py
365
pytorch-image-models
{ "docstring": "Method generates the attention mask used in shift case.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
99
Python
69
c6e4b7895a7dbcd9b98396cbef383dd1c72b0ad3
swin_transformer_v2_cr.py
331,786
23
244
_make_attention_mask
https://github.com/huggingface/pytorch-image-models.git
Swin V2 CR impl refactor. * reformat and change some naming so closer to existing timm vision transformers * remove typing that wasn't adding clarity (or causing torchscript issues) * support non-square windows * auto window size adjust from image size * post-norm + main-branch no
425
0
119,924
15
1
8
def _map_multiprocess(func, iterable, chunksize=1): # type: (Callable[[S], T], Iterable[S], int) -> Iterator[T] with closing(ProcessPool()) as pool: return pool.imap_unordered(func, iterable, chunksize)
.venv/lib/python3.8/site-packages/pip/_internal/utils/parallel.py
56
transferlearning
{ "docstring": "Chop iterable into chunks and submit them to a process pool.\n\n For very long iterables using a large value for chunksize can make\n the job complete much faster than using the default value of 1.\n\n Return an unordered iterator of the results.\n ", "language": "en", "n_whitespaces": 54, "n_words": 42, "vocab_size": 36 }
20
Python
19
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
parallel.py
61,287
3
33
_map_multiprocess
https://github.com/jindongwang/transferlearning.git
upd; format
36
0
12,489
12
7
26
def serialize(items): data = QByteArray() stream = QDataStream(data, QIODevice.OpenModeFlag.ReadWrite) user_data: List[Mapping[str, Any]] = [] current_idx = None for i, item in enumerate(items): if item.active: if current_idx is not None: raise ValueError("Multiple active items ({} and {}) " "found!".format(current_idx, i)) current_idx = i if items: if current_idx is None: raise ValueError("No active item found!") else: current_idx = 0 _serialize_items(items, current_idx, stream) user_data += [item.user_data for item in items] stream.device().reset() qtutils.check_qdatastream(stream) return stream, data, user_data
qutebrowser/browser/webkit/tabhistory.py
219
qutebrowser
{ "docstring": "Serialize a list of TabHistoryItems to a data stream.\n\n Args:\n items: An iterable of TabHistoryItems.\n\n Return:\n A (stream, data, user_data) tuple.\n stream: The reset QDataStream.\n data: The QByteArray with the raw data.\n user_data: A list with each item's user data.\n\n Warning:\n If 'data' goes out of scope, reading from 'stream' will result in a\n segfault!\n ", "language": "en", "n_whitespaces": 128, "n_words": 55, "vocab_size": 46 }
73
Python
52
0877fb0d78635692e481c8bde224fac5ad0dd430
tabhistory.py
321,174
21
135
serialize
https://github.com/qutebrowser/qutebrowser.git
Run scripts/dev/rewrite_enums.py
213
0
117,577
17
1
17
def test_copy_with_expression(self): expression = "col1, col2" op = DatabricksCopyIntoOperator( file_location=COPY_FILE_LOCATION, file_format='CSV', table_name='test', task_id=TASK_ID, pattern='folder1/file_[a-g].csv', expression_list=expression, format_options={'header': 'true'}, force_copy=True, ) assert ( op._create_sql_query() == f.strip() )
tests/providers/databricks/operators/test_databricks_sql.py
114
airflow
{ "docstring": "COPY INTO test\nFROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}')\nFILEFORMAT = CSV\nPATTERN = 'folder1/file_[a-g].csv'\nFORMAT_OPTIONS ('header' = 'true')\nCOPY_OPTIONS ('force' = 'true')\n", "language": "en", "n_whitespaces": 16, "n_words": 22, "vocab_size": 17 }
25
Python
23
27d19e7626ef80687997a6799762fa00162c1328
test_databricks_sql.py
45,337
22
64
test_copy_with_expression
https://github.com/apache/airflow.git
Databricks SQL operators (#21363)
169
0
8,547
12
1
9
def _register_serializers(self): import ray.serialization_addons from ray.util.serialization import StandaloneSerializationContext ctx = StandaloneSerializationContext() ray.serialization_addons.apply(ctx)
python/ray/util/client/__init__.py
52
ray
{ "docstring": "Register the custom serializer addons at the client side.\n\n The server side should have already registered the serializers via\n regular worker's serialization_context mechanism.\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 21 }
12
Python
11
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
__init__.py
132,908
5
31
_register_serializers
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
47
0
29,864
8
2
18
def _postprocess_tf(self, policy, sample_batch, tf_sess): if self.framework == "tf": obs_embeds = tf_sess.run( self._obs_embeds, feed_dict={self._obs_ph: sample_batch[SampleBatch.OBS]}, ) else: obs_embeds = tf.stop_gradient( self._encoder_net({SampleBatch.OBS: sample_batch[SampleBatch.OBS]})[0] ).numpy() sample_batch[SampleBatch.OBS_EMBEDS] = obs_embeds return sample_batch
rllib/utils/exploration/random_encoder.py
137
ray
{ "docstring": "Calculate states' embeddings and add it to SampleBatch.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
28
Python
24
b32c784c7fbeab39f77ec47e66c18e987efb582d
random_encoder.py
125,607
12
88
_postprocess_tf
https://github.com/ray-project/ray.git
[RLLib] RE3 exploration algorithm TF2 framework support (#25221)
152
0
27,923
20
3
23
async def test_backfill_state_name(db, flow): connection_url = PREFECT_ORION_DATABASE_CONNECTION_URL.value() dialect = get_dialect(connection_url) # get the proper migration revisions if dialect.name == "postgresql": revisions = ("605ebb4e9155", "14dc68cc5853") else: revisions = ("7f5f335cace3", "db6bde582447") flow_run_id = uuid4() null_state_flow_run_id = uuid4() flow_run_state_1_id = uuid4() flow_run_state_2_id = uuid4() task_run_id = uuid4() null_state_task_run_id = uuid4() task_run_state_1_id = uuid4() task_run_state_2_id = uuid4() try: # downgrade to the previous revision await run_sync_in_worker_thread(alembic_downgrade, revision=revisions[0]) session = await db.session()
tests/orion/database/test_migrations.py
191
async def test_backfill_state_name(db, flow): """ Tests state_name is backfilled correctly for the flow_run and task_run tables by a specific migration """ connection_url = PREFECT_ORION_DATABASE_CONNECTION_URL.value() dialect = get_dialect(connection_url) # get the proper migration revisions if dialect.name == "postgresql": revisions = ("605ebb4e9155", "14dc68cc5853") else: revisions = ("7f5f335cace3", "db6bde582447") flow_run_id = uuid4() null_state_flow_run_id = uuid4() flow_run_state_1_id = uuid4() flow_run_state_2_id = uuid4() task_run_id = uuid4() null_state_task_run_id = uuid4() task_run_state_1_id = uuid4() task_run_state_2_id = uuid4() try: # downgrade to the previous revision await run_sync_in_worker_thread(alembic_downgrade, revision=revisions[0]) session = await db.session()
prefect
{ "docstring": "\n Tests state_name is backfilled correctly for the flow_run\n and task_run tables by a specific migration\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 15 }
67
Python
43
fc9f253912945e088e48cc723af383e6a9f46faf
test_migrations.py
54,795
99
385
test_backfill_state_name
https://github.com/PrefectHQ/prefect.git
Add run.state_name columns
147
1
11,146
10
1
13
def test_legacy_check_event_allowed(self) -> None: channel = self.make_request( "PUT", "/_matrix/client/r0/rooms/%s/send/m.room.message/1" % self.room_id, { "msgtype": "m.text", "body": "Original body", }, access_token=self.tok, ) self.assertEqual(channel.result["code"], b"200", channel.result) event_id = channel.json_body["event_id"] channel = self.make_request( "GET", "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id), access_token=self.tok, ) self.assertEqual(channel.result["code"], b"200", channel.result) self.assertIn("foo", channel.json_body["content"].keys()) self.assertEqual(channel.json_body["content"]["foo"], "bar")
tests/rest/client/test_third_party_rules.py
240
synapse
{ "docstring": "Tests that the wrapper for legacy check_event_allowed callbacks works\n correctly.\n ", "language": "en", "n_whitespaces": 24, "n_words": 10, "vocab_size": 10 }
43
Python
33
2ffaf30803f93273a4d8a65c9e6c3110c8433488
test_third_party_rules.py
247,323
23
142
test_legacy_check_event_allowed
https://github.com/matrix-org/synapse.git
Add type hints to `tests/rest/client` (#12108) * Add type hints to `tests/rest/client` * newsfile * fix imports * add `test_account.py` * Remove one type hint in `test_report_event.py` * change `on_create_room` to `async` * update new functions in `test_third_party_rules.py` * Add `test_filter.py` * add `test_rooms.py` * change to `assertEquals` to `assertEqual` * lint
231
0
71,589
11
2
8
def bulk_to_python(self, values): objects = self.target_model.objects.in_bulk(values) return [ objects.get(id) for id in values ] # Keeps the ordering the same as in values.
wagtail/core/blocks/field_block.py
54
wagtail
{ "docstring": "Return the model instances for the given list of primary keys.\n\n The instances must be returned in the same order as the values and keep None values.\n ", "language": "en", "n_whitespaces": 41, "n_words": 27, "vocab_size": 23 }
23
Python
21
d10f15e55806c6944827d801cd9c2d53f5da4186
field_block.py
73,658
5
33
bulk_to_python
https://github.com/wagtail/wagtail.git
Reformat with black
63
0
16,083
10
7
9
def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]] versions = super().supported_python_versions if self.minimum_python_version: versions = tuple(version for version in versions if str_to_version(version) >= str_to_version(self.minimum_python_version)) if self.maximum_python_version: versions = tuple(version for version in versions if str_to_version(version) <= str_to_version(self.maximum_python_version)) return versions
test/lib/ansible_test/_internal/commands/sanity/__init__.py
113
ansible
{ "docstring": "A tuple of supported Python versions or None if the test does not depend on specific Python versions.", "language": "en", "n_whitespaces": 17, "n_words": 18, "vocab_size": 17 }
39
Python
24
dfde4be444ee66a1a0e44751b80bcf1afd6661d7
__init__.py
267,309
7
69
supported_python_versions
https://github.com/ansible/ansible.git
Add Python 3.11 support. ci_complete ci_coverage
97
0
78,848
15
8
37
def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs): client = self.get_client() jira_issue = client.get_issue(external_issue.key) jira_project = jira_issue["fields"]["project"] try: external_project = IntegrationExternalProject.objects.get( external_id=jira_project["id"], organization_integration_id__in=OrganizationIntegration.objects.filter( organization_id=external_issue.organization_id, integration_id=external_issue.integration_id, ), ) except IntegrationExternalProject.DoesNotExist: return jira_status = ( external_project.resolved_status if is_resolved else external_project.unresolved_status ) # don't bother updating if it's already the status we'd change it to if jira_issue["fields"]["status"]["id"] == jira_status: return try: transitions = client.get_transitions(external_issue.key) except ApiHostError: raise IntegrationError("Could not reach host to get transitions.") try: transition = [t for t in transitions if t.get("to", {}).get("id") == jira_status][0] except IndexError: # TODO(jess): Email for failure logger.warning( "jira.status-sync-fail", extra={ "organization_id": external_issue.organization_id, "integration_id": external_issue.integration_id, "issue_key": external_issue.key, }, ) return client.transition_issue(external_issue.key, transition["id"])
src/sentry/integrations/jira_server/integration.py
352
sentry
{ "docstring": "\n Propagate a sentry issue's status to a linked issue's status.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 8 }
103
Python
81
2fbf550ec05c8501cbc9eca62e73526e717dcbdf
integration.py
93,732
36
213
sync_status_outbound
https://github.com/getsentry/sentry.git
ref(Jira): Split Jira Cloud and Jira Server (#37034) * Split Jira Cloud and Jira Server
525
0
19,015
17
1
12
def test_return_expanded(self): self.assertEqual(StateFilter.all().return_expanded(), StateFilter.all()) self.assertEqual(StateFilter.none().return_expanded(), StateFilter.none()) # Concrete-only state filters stay the same # (Case: mixed filter) self.assertEqual( StateFilter.freeze( { EventTypes.Member: {"@wombat:test", "@alicia:test"}, "some.other.state.type": {""}, }, include_others=False, ).return_expanded(), StateFilter.freeze( { EventTypes.Member: {"@wombat:test", "@alicia:test"}, "some.other.state.type": {""}, }, include_others=False, ), ) # Concrete-only state filters stay the same # (Case: non-member-only filter) self.assertEqual( StateFilter.freeze( {"some.other.state.type": {""}}, include_others=False ).return_expanded(), StateFilter.freeze({"some.other.state.type": {""}}, include_others=False), ) # Concrete-only state filters stay the same # (Case: member-only filter) self.assertEqual( StateFilter.freeze( { EventTypes.Member: {"@wombat:test", "@alicia:test"}, }, include_others=False, ).return_expanded(), StateFilter.freeze( { EventTypes.Member: {"@wombat:test", "@alicia:test"}, }, include_others=False, ), ) # Wildcard member-only state filters stay the same self.assertEqual( StateFilter.freeze( {EventTypes.Member: None}, include_others=False, ).return_expanded(), StateFilter.freeze( {EventTypes.Member: None}, include_others=False, ), ) # If there is a wildcard in the non-member portion of the filter, # it's expanded to include ALL non-member events. # (Case: mixed filter) self.assertEqual( StateFilter.freeze( { EventTypes.Member: {"@wombat:test", "@alicia:test"}, "some.other.state.type": None, }, include_others=False, ).return_expanded(), StateFilter.freeze( {EventTypes.Member: {"@wombat:test", "@alicia:test"}}, include_others=True, ), ) # If there is a wildcard in the non-member portion of the filter, # it's expanded to include ALL non-member events. # (Case: non-member-only filter) self.assertEqual( StateFilter.freeze( { "some.other.state.type": None, }, include_others=False, ).return_expanded(), StateFilter.freeze({EventTypes.Member: set()}, include_others=True), ) self.assertEqual( StateFilter.freeze( { "some.other.state.type": None, "yet.another.state.type": {"wombat"}, }, include_others=False, ).return_expanded(), StateFilter.freeze({EventTypes.Member: set()}, include_others=True), )
tests/storage/test_state.py
668
synapse
{ "docstring": "\n Tests the behaviour of the return_expanded() function that expands\n StateFilters to include more state types (for the sake of cache hit rate).\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 19 }
203
Python
63
eb609c65d0794dd49efcd924bdc8743fd4253a93
test_state.py
246,360
81
410
test_return_expanded
https://github.com/matrix-org/synapse.git
Fix bug in `StateFilter.return_expanded()` and add some tests. (#12016)
1,317
0
71,177
15
2
6
def parent(self) -> DOMNode: if self._parent is None: raise NoParent(f"{self} has no parent") assert isinstance(self._parent, DOMNode) return self._parent
src/textual/dom.py
60
textual
{ "docstring": "Get the parent node.\n\n Raises:\n NoParent: If this is the root node.\n\n Returns:\n DOMNode: The node which is the direct parent of this node.\n ", "language": "en", "n_whitespaces": 67, "n_words": 24, "vocab_size": 17 }
18
Python
17
2635f58e7c3d10b161ee69a15ebfe6499ac26daa
dom.py
181,946
13
34
parent
https://github.com/Textualize/textual.git
docstrings and tidy
57
0
43,685
11
5
10
def _maybe_add_default_serving_output(export_outputs): if len(export_outputs) == 1: ((key, value),) = export_outputs.items() if key != tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: export_outputs[ tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY ] = value if len(export_outputs) > 1: if ( tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY not in export_outputs ): raise ValueError( "Multiple `export_outputs` were provided, but none of them are " "specified as the default. Use" "`tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY` to " "specify a default." ) return export_outputs # LINT.ThenChange(//tensorflow/python/saved_model/model_utils/export_utils.py)
keras/saving/utils_v1/export_utils.py
131
keras
{ "docstring": "Add a default serving output to the export_outputs if not present.\n\n Args:\n export_outputs: Describes the output signatures to be exported to\n `SavedModel` and used during serving. Should be a dict.\n\n Returns:\n export_outputs dict with default serving signature added if necessary\n\n Raises:\n ValueError: if multiple export_outputs were provided without a default\n serving key.\n ", "language": "en", "n_whitespaces": 93, "n_words": 52, "vocab_size": 37 }
58
Python
49
84afc5193d38057e2e2badf9c889ea87d80d8fbf
export_utils.py
276,301
19
77
_maybe_add_default_serving_output
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
238
0
81,623
13