complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
16
27
def find_python(finder, line=None): if line and not isinstance(line, str): raise TypeError( f"Invalid python search type: expected string, received {line!r}" ) if line and os.path.isabs(line): if os.name == "nt": line = make_posix(line) return line if not finder: from pipenv.vendor.pythonfinder import Finder finder = Finder(global_search=True) if not line: result = next(iter(finder.find_all_python_versions()), None) elif line and line[0].isdigit() or re.match(r'[\d\.]+', line): result = finder.find_python_version(line) else: result = finder.find_python_version(name=line) if not result: result = finder.which(line) if not result and not line.startswith("python"): line = f"python{line}" result = find_python(finder, line) if result: if not isinstance(result, str): return result.path.as_posix() return result return
pipenv/utils/shell.py
326
pipenv
{ "docstring": "\n Given a `pythonfinder.Finder` instance and an optional line, find a corresponding python\n\n :param finder: A :class:`pythonfinder.Finder` instance to use for searching\n :type finder: :class:pythonfinder.Finder`\n :param str line: A version, path, name, or nothing, defaults to None\n :return: A path to python\n :rtype: str\n ", "language": "en", "n_whitespaces": 65, "n_words": 43, "vocab_size": 33 }
94
Python
53
3387881a6d4fc2d8bdc0f05c484cb2f7222acfb8
shell.py
19,575
28
194
find_python
https://github.com/pypa/pipenv.git
Code reorg utils into utils module reduces complexity (#4990) * Split apart the massive utils.py into a utils module
258
0
3,027
14
1
4
def val(self, request): return request.param
pandas/tests/series/indexing/test_setitem.py
21
pandas
{ "docstring": "\n NA values that should generally be valid_na for *all* dtypes.\n\n Include both python float NaN and np.float64; only np.float64 has a\n `dtype` attribute.\n ", "language": "en", "n_whitespaces": 52, "n_words": 23, "vocab_size": 23 }
5
Python
5
3510b1fd2a9cf752638f4af751bdeb33496db766
test_setitem.py
163,656
2
12
val
https://github.com/pandas-dev/pandas.git
BUG: setting pd.NA into Series casts to object (#45431)
19
0
39,479
6
4
18
def arange(start, stop=None, step=1, dtype="int32"): # Match the behavior of numpy and Theano by returning an empty sequence. if stop is None and start < 0: start = 0 result = tf.range(start, limit=stop, delta=step, name="arange") if dtype != "int32": result = cast(result, dtype) return result @keras_export("keras.backend.tile") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
keras/backend.py
134
@keras_export("keras.backend.tile") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
keras
{ "docstring": "Creates a 1D tensor containing a sequence of integers.\n\n The function arguments use the same convention as\n Theano's arange: if only one argument is provided,\n it is in fact the \"stop\" argument and \"start\" is 0.\n\n The default type of the returned tensor is `'int32'` to\n match TensorFlow's default.\n\n Args:\n start: Start value.\n stop: Stop value.\n step: Difference between two successive values.\n dtype: Integer dtype to use.\n\n Returns:\n An integer tensor.\n\n Example:\n\n >>> tf.keras.backend.arange(start=0, stop=10, step=1.5)\n <tf.Tensor: shape=(7,), dtype=float32,\n numpy=array([0. , 1.5, 3. , 4.5, 6. , 7.5, 9. ], dtype=float32)>\n\n\n\n ", "language": "en", "n_whitespaces": 178, "n_words": 91, "vocab_size": 77 }
48
Python
41
84afc5193d38057e2e2badf9c889ea87d80d8fbf
backend.py
269,614
7
65
arange
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
77
1
80,233
10
1
15
def test_guest_access_token(self): token = self.macaroon_generator.generate_guest_access_token("@user:tesths") user_id = self.macaroon_generator.verify_guest_token(token) self.assertEqual(user_id, "@user:tesths") # Raises with another secret key with self.assertRaises(MacaroonVerificationFailedException): self.other_macaroon_generator.verify_guest_token(token) # Check that an old access token without the guest caveat does not work macaroon = self.macaroon_generator._generate_base_macaroon("access") macaroon.add_first_party_caveat(f"user_id = {user_id}") macaroon.add_first_party_caveat("nonce = 0123456789abcdef") token = macaroon.serialize() with self.assertRaises(MacaroonVerificationFailedException): self.macaroon_generator.verify_guest_token(token)
tests/util/test_macaroons.py
177
synapse
{ "docstring": "Test the generation and verification of guest access tokens", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
48
Python
36
fe1daad67237c2154a3d8d8cdf6c603f0d33682e
test_macaroons.py
248,583
12
96
test_guest_access_token
https://github.com/matrix-org/synapse.git
Move the "email unsubscribe" resource, refactor the macaroon generator & simplify the access token verification logic. (#12986) This simplifies the access token verification logic by removing the `rights` parameter which was only ever used for the unsubscribe link in email notifications. The latter has been moved under the `/_synapse` namespace, since it is not a standard API. This also makes the email verification link more secure, by embedding the app_id and pushkey in the macaroon and verifying it. This prevents the user from tampering the query parameters of that unsubscribe link. Macaroon generation is refactored: - Centralised all macaroon generation and verification logic to the `MacaroonGenerator` - Moved to `synapse.utils` - Changed the constructor to require only a `Clock`, hostname, and a secret key (instead of a full `Homeserver`). - Added tests for all methods.
154
0
72,364
10
6
29
def get_docker_network(client) -> Optional[str]: import docker if TYPE_CHECKING: # pragma: no cover from docker.models.containers import Container container: 'Container' = None try: hostname = socket.gethostname() container = client.containers.get(hostname) except docker.errors.NotFound: try: # https://stackoverflow.com/a/52988227/15683245 with open('/proc/1/cpuset') as f: hostname = os.path.basename(f.read().rstrip()) container = client.containers.get(hostname) except Exception: return None try: networks = container.attrs['NetworkSettings']['Networks'] if networks: net_mode = list(networks.keys())[0] return networks[net_mode]['NetworkID'] else: return None except Exception: return None
jina/orchestrate/pods/container_helper.py
249
jina
{ "docstring": "Do a best-effort guess if the caller is already in a docker network\n\n Check if `hostname` exists in list of docker containers.\n If a container is found, check its network id\n\n :param client: docker client object\n :return: network id if exists\n ", "language": "en", "n_whitespaces": 56, "n_words": 41, "vocab_size": 29 }
64
Python
41
f5a362f0ffc5070c104c840ab7833689d39b7bdb
container_helper.py
13,330
32
144
get_docker_network
https://github.com/jina-ai/jina.git
chore: add pragma no cover to TYPE_CHECKING branch (#5299)
243
0
2,601
19
1
2
def valueminus(self): return self["valueminus"]
packages/python/plotly/plotly/graph_objs/bar/_error_x.py
22
plotly.py
{ "docstring": "\n Sets the value of either the percentage (if `type` is set to\n \"percent\") or the constant (if `type` is set to \"constant\")\n corresponding to the lengths of the error bars in the bottom\n (left) direction for vertical (horizontal) bars\n\n The 'valueminus' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 136, "n_words": 63, "vocab_size": 46 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_error_x.py
228,639
2
11
valueminus
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
60,312
7
1
3
def required_packages() -> List[Text]: return ["sklearn"]
rasa/nlu/classifiers/logistic_regression_classifier.py
27
rasa
{ "docstring": "Any extra python dependencies required for this component to run.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
6
Python
6
dc762814317ce46873a5226ee09033031a7d3604
logistic_regression_classifier.py
159,357
3
14
required_packages
https://github.com/RasaHQ/rasa.git
Add Logistic Regression to our NLU classifiers. (#10650) * added-logistic-regression * added * d0h! gotta copy the imports correctly * run black * black issues fixed * stash * added tolerance hyperparam * added random seed * fixed testing path * ran black * use joblib directly * insurance against sklearn changes * added try except * ran black * make code more DRY * flake8 * added type information * add train -> persists -> load -> load * add to test_train.py * fixed style issues * actually persist model * persist, i insist * fixed-bug * added-documentation * black * added changelog * added * moar-whitespace * removed stale param * added comments
20
0
38,212
7
5
15
def get_uom_conv_factor(uom, stock_uom): if uom == stock_uom: return 1.0 from_uom, to_uom = uom, stock_uom # renaming for readability exact_match = frappe.db.get_value( "UOM Conversion Factor", {"to_uom": to_uom, "from_uom": from_uom}, ["value"], as_dict=1 ) if exact_match: return exact_match.value inverse_match = frappe.db.get_value( "UOM Conversion Factor", {"to_uom": from_uom, "from_uom": to_uom}, ["value"], as_dict=1 ) if inverse_match: return 1 / inverse_match.value # This attempts to try and get conversion from intermediate UOM. # case: # g -> mg = 1000 # g -> kg = 0.001 # therefore kg -> mg = 1000 / 0.001 = 1,000,000 intermediate_match = frappe.db.sql( , {"to_uom": to_uom, "from_uom": from_uom}, as_dict=1, ) if intermediate_match: return intermediate_match[0].value @frappe.whitelist()
erpnext/stock/doctype/item/item.py
229
@frappe.whitelist()
erpnext
{ "docstring": "Get UOM conversion factor from uom to stock_uom\n\te.g. uom = \"Kg\", stock_uom = \"Gram\" then returns 1000.0\n\t\n\t\t\tselect (first.value / second.value) as value\n\t\t\tfrom `tabUOM Conversion Factor` first\n\t\t\tjoin `tabUOM Conversion Factor` second\n\t\t\t\ton first.from_uom = second.from_uom\n\t\t\twhere\n\t\t\t\tfirst.to_uom = %(to_uom)s\n\t\t\t\tand second.to_uom = %(from_uom)s\n\t\t\tlimit 1\n\t\t\t", "language": "en", "n_whitespaces": 38, "n_words": 48, "vocab_size": 38 }
105
Python
64
494bd9ef78313436f0424b918f200dab8fc7c20b
item.py
67,625
30
131
get_uom_conv_factor
https://github.com/frappe/erpnext.git
style: format code with black
82
1
14,581
11
2
12
def stack1(x, filters, blocks, stride1=2, name=None): x = block1(x, filters, stride=stride1, name=name + "_block1") for i in range(2, blocks + 1): x = block1( x, filters, conv_shortcut=False, name=name + "_block" + str(i) ) return x
keras/applications/resnet.py
109
keras
{ "docstring": "A set of stacked residual blocks.\n\n Args:\n x: input tensor.\n filters: integer, filters of the bottleneck layer in a block.\n blocks: integer, blocks in the stacked blocks.\n stride1: default 2, stride of the first layer in the first block.\n name: string, stack label.\n\n Returns:\n Output tensor for the stacked blocks.\n ", "language": "en", "n_whitespaces": 89, "n_words": 50, "vocab_size": 34 }
35
Python
26
84afc5193d38057e2e2badf9c889ea87d80d8fbf
resnet.py
269,422
7
73
stack1
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
72
0
80,070
14
1
8
def print_rules(self) -> Iterator[str]: yield from self._defined_facts_lines() yield '' yield '' yield from self._full_implications_lines() yield '' yield '' yield from self._prereq_lines() yield '' yield '' yield from self._beta_rules_lines() yield '' yield '' yield "generated_assumptions = {'defined_facts': defined_facts, 'full_implications': full_implications," yield " 'prereq': prereq, 'beta_rules': beta_rules, 'beta_triggers': beta_triggers}" yield '' yield ''
sympy/core/facts.py
140
sympy
{ "docstring": " Returns a generator with lines to represent the facts and rules ", "language": "en", "n_whitespaces": 12, "n_words": 11, "vocab_size": 11 }
51
Python
24
f68e8de4252200cfc74b9433d00f77c4510ac68d
facts.py
199,955
18
63
print_rules
https://github.com/sympy/sympy.git
refactor
184
0
49,448
8
10
27
def _process_input(self, batch): if not self._additional_keys: existing_keys = list(batch.keys()) original_boxes = np.array([(face.left, face.top, face.width, face.height) for face in batch["detected_faces"]]) adjusted_boxes = self._get_adjusted_boxes(original_boxes) retval = {} for bounding_boxes in adjusted_boxes: for face, box in zip(batch["detected_faces"], bounding_boxes): face.left, face.top, face.width, face.height = box result = self.process_input(batch) if not self._additional_keys: self._additional_keys = [key for key in result if key not in existing_keys] for key in self._additional_keys: retval.setdefault(key, []).append(batch[key]) del batch[key] # Place the original bounding box back to detected face objects for face, box in zip(batch["detected_faces"], original_boxes): face.left, face.top, face.width, face.height = box batch.update(retval) return batch
plugins/extract/align/_base.py
314
faceswap
{ "docstring": " Process the input to the aligner model multiple times based on the user selected\n `re-feed` command line option. This adjusts the bounding box for the face to be fed\n into the model by a random amount within 0.05 pixels of the detected face's shortest axis.\n\n References\n ----------\n https://studios.disneyresearch.com/2020/06/29/high-resolution-neural-face-swapping-for-visual-effects/\n\n Parameters\n ----------\n batch: dict\n Contains the batch that is currently being passed through the plugin process\n\n Returns\n -------\n dict\n The batch with input processed\n ", "language": "en", "n_whitespaces": 179, "n_words": 72, "vocab_size": 58 }
93
Python
53
5e73437be47f2410439a3c6716de96354e6a0c94
_base.py
101,239
20
205
_process_input
https://github.com/deepfakes/faceswap.git
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
323
0
20,659
14
3
6
def reduce_per_replica(values, strategy, reduction): if reduction == "auto": reduction = "first" if backend.is_tpu_strategy(strategy) else "sum"
keras/engine/training.py
49
keras
{ "docstring": "Attempt to reduce the structure `values` to single values.\n\n Given `values` (a `tf.Tensor` or a `PerReplica` structure),\n which represents the values across all the replicas, `reduce_per_replica`\n attempts to \"reduce\" those values and returns the corresponding structure\n that represents only single values.\n\n Currently, `reduce_per_replica` is only used for reducing the metric results\n from `tf.distribute.Strategy.run()`. Depending on the underlying\n `Strategy` implementation, `values` may be a `PerReplica` object,\n which can be thought of as a collection of values across the replicas,\n or a `tf.Tensor`, if the strategy has already conducted the reduction\n for the downstream library.\n\n There are five possible outcomes of reduction:\n\n 1) if the `values` is a structure of simple `tf.Tensor`s, meaning that\n reduction is not actually needed, `reduce_per_replica` returns the\n structure as-is.\n 2) else, if `reduction=\"auto\"`, then it assumes \"first\" if running\n under `TPUStrategy`, and \"sum\" otherwise. This should only be used\n for training cases (`fit()`).\n 3) else, if `reduction=\"first\"`, then `reduce_per_replica`\n returns the values of the first replica. This is used in the case of\n training and evaluation, where `values` is expected to hold the same\n value across the replicas as a result of `Strategy`'s synchronization\n across the replicas.\n `reduce_per_replica` does not synchronize the values.\n 4) else, if `reduction=\"sum\"`, then `reduce_per_replica` returns the sum\n of values for all replicas. This is used in the custom training loop\n case, where each replica contain different values which are not\n synchronized.\n 5) else, if `reduction=\"concat\"`, then `reduce_per_replica`\n returns the concatenation of the values across the replicas, along the\n axis of dimension 0. This is used in the inference case (`predict()`).\n\n Args:\n values: Structure of `PerReplica` objects or `tf.Tensor`s. `tf.Tensor`s\n are returned as-is.\n strategy: `tf.distribute.Strategy` object.\n reduction: One of `\"auto\"`, `\"first\"`, `\"concat\"`, or `\"sum\"`.\n `\"auto\"` will select `\"first\"` when used under a TPUStrategy, or\n `\"sum\"` otherwise.\n\n Returns:\n Structure of `Tensor`s, representing the result of reduction.\n\n Raises:\n ValueError: if the reduction method is not supported.\n ", "language": "en", "n_whitespaces": 502, "n_words": 311, "vocab_size": 161 }
15
Python
13
47a4cfe06faf54e271ab50e6d0aae73b06a35f86
training.py
279,867
5
40
reduce_per_replica
https://github.com/keras-team/keras.git
Update training.py
28
0
83,158
11
5
20
def auc(x, y): check_consistent_length(x, y) x = column_or_1d(x) y = column_or_1d(y) if x.shape[0] < 2: raise ValueError( "At least 2 points are needed to compute area under curve, but x.shape = %s" % x.shape ) direction = 1 dx = np.diff(x) if np.any(dx < 0): if np.all(dx <= 0): direction = -1 else: raise ValueError("x is neither increasing nor decreasing : {}.".format(x)) area = direction * np.trapz(y, x) if isinstance(area, np.memmap): # Reductions such as .sum used internally in np.trapz do not return a # scalar by default for numpy.memmap instances contrary to # regular numpy.ndarray instances. area = area.dtype.type(area) return area
sklearn/metrics/_ranking.py
209
scikit-learn
{ "docstring": "Compute Area Under the Curve (AUC) using the trapezoidal rule.\n\n This is a general function, given points on a curve. For computing the\n area under the ROC-curve, see :func:`roc_auc_score`. For an alternative\n way to summarize a precision-recall curve, see\n :func:`average_precision_score`.\n\n Parameters\n ----------\n x : ndarray of shape (n,)\n X coordinates. These must be either monotonic increasing or monotonic\n decreasing.\n y : ndarray of shape, (n,)\n Y coordinates.\n\n Returns\n -------\n auc : float\n Area Under the Curve.\n\n See Also\n --------\n roc_auc_score : Compute the area under the ROC curve.\n average_precision_score : Compute average precision from prediction scores.\n precision_recall_curve : Compute precision-recall pairs for different\n probability thresholds.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import metrics\n >>> y = np.array([1, 1, 2, 2])\n >>> pred = np.array([0.1, 0.4, 0.35, 0.8])\n >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)\n >>> metrics.auc(fpr, tpr)\n 0.75\n ", "language": "en", "n_whitespaces": 258, "n_words": 143, "vocab_size": 103 }
102
Python
79
f5871a39f445d84b55c5d7897c875a86d590408e
_ranking.py
260,010
20
126
auc
https://github.com/scikit-learn/scikit-learn.git
DOC Ensures that sklearn.metrics._ranking.auc passes numpydoc validation (#23433)
235
0
76,030
15
2
10
def _get_stem(self): filename = os.path.basename(self.src_path) stem, ext = os.path.splitext(filename) return 'index' if stem in ('index', 'README') else stem
mkdocs/structure/files.py
73
mkdocs
{ "docstring": "Return the name of the file without it's extension.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
18
Python
16
e7f07cc82ab2be920ab426ba07456d8b2592714d
files.py
224,035
4
42
_get_stem
https://github.com/mkdocs/mkdocs.git
Remove spaces at the ends of docstrings, normalize quotes
46
0
57,182
9
2
6
def heappop(heap): lastelt = heap.pop() # raises appropriate IndexError if heap is empty if heap: returnitem = heap[0] heap[0] = lastelt _siftup(heap, 0) return returnitem return lastelt
python3.10.4/Lib/heapq.py
64
XX-Net
{ "docstring": "Pop the smallest item off the heap, maintaining the heap invariant.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 9 }
27
Python
19
8198943edd73a363c266633e1aa5b2a9e9c9f526
heapq.py
217,640
8
38
heappop
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
70
0
54,859
9
2
4
def test_get_significant_states_only(hass_history): hass = hass_history entity_id = "sensor.test"
tests/components/history/test_init.py
25
core
{ "docstring": "Test significant states when significant_states_only is set.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
8
Python
7
29bda196b5e0a90a2bea7e1797742236114afc1c
test_init.py
299,828
36
264
test_get_significant_states_only
https://github.com/home-assistant/core.git
Break apart recorder into tasks and core modules (#71222)
17
0
98,730
7
1
4
def get_labels(self): return ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC"], \ self.BUILDER_CONFIGS[self.name]['pos_tags']
paddlenlp/datasets/conll2002.py
72
PaddleNLP
{ "docstring": "\n Returns labels of ner tags and pos tags.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
14
Python
14
7b455cce47204d4d664deea9661670a838ec8d35
conll2002.py
322,232
3
39
get_labels
https://github.com/PaddlePaddle/PaddleNLP.git
feat: add conll2002 dataset (#1561) Co-authored-by: Zeyu Chen <[email protected]>
42
0
118,099
9
2
21
def supplier_query(doctype, txt, searchfield, start, page_len, filters): supp_master_name = frappe.defaults.get_user_default("supp_master_name") if supp_master_name == "Supplier Name": fields = ["name", "supplier_group"] else: fields = ["name", "supplier_name", "supplier_group"] fields = get_fields("Supplier", fields) return frappe.db.sql( .format( **{"field": ", ".join(fields), "key": searchfield, "mcond": get_match_cond(doctype)} ), {"txt": "%%%s%%" % txt, "_txt": txt.replace("%", ""), "start": start, "page_len": page_len}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
erpnext/controllers/queries.py
228
@frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
erpnext
{ "docstring": "select {field} from `tabSupplier`\n\t\twhere docstatus < 2\n\t\t\tand ({key} like %(txt)s\n\t\t\tor supplier_name like %(txt)s) and disabled=0\n\t\t\tand (on_hold = 0 or (on_hold = 1 and CURDATE() > release_date))\n\t\t\t{mcond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tif(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999),\n\t\t\tidx desc,\n\t\t\tname, supplier_name\n\t\tlimit %(start)s, %(page_len)s ", "language": "en", "n_whitespaces": 39, "n_words": 50, "vocab_size": 37 }
54
Python
43
494bd9ef78313436f0424b918f200dab8fc7c20b
queries.py
65,658
24
119
supplier_query
https://github.com/frappe/erpnext.git
style: format code with black
39
1
13,978
15
2
9
def _eval_evalf(self, prec): return Quaternion(*[arg.evalf(n=prec_to_dps(prec)) for arg in self.args])
sympy/algebras/quaternion.py
52
sympy
{ "docstring": "Returns the floating point approximations (decimal numbers) of the quaternion.\n\n Returns\n =======\n\n Quaternion\n Floating point approximations of quaternion(self)\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy import sqrt\n >>> q = Quaternion(1/sqrt(1), 1/sqrt(2), 1/sqrt(3), 1/sqrt(4))\n >>> q.evalf()\n 1.00000000000000\n + 0.707106781186547*i\n + 0.577350269189626*j\n + 0.500000000000000*k\n\n ", "language": "en", "n_whitespaces": 155, "n_words": 46, "vocab_size": 32 }
9
Python
9
498015021131af4dbb07eb110e5badaba8250c7b
quaternion.py
196,019
2
32
_eval_evalf
https://github.com/sympy/sympy.git
Updated import locations
23
0
47,519
14
2
16
def call_deploy(cls, fname, col_partitions, **kwargs): return np.array( [ cls.deploy( cls.parse, num_returns=NPartitions.get() + 2, fname=fname, columns=cols, num_splits=NPartitions.get(), **kwargs, ) for cols in col_partitions ] ).T
modin/core/io/column_stores/column_store_dispatcher.py
96
modin
{ "docstring": "\n Deploy remote tasks to the workers with passed parameters.\n\n Parameters\n ----------\n fname : str, path object or file-like object\n Name of the file to read.\n col_partitions : list\n List of arrays with columns names that should be read\n by each partition.\n **kwargs : dict\n Parameters of deploying read_* function.\n\n Returns\n -------\n np.ndarray\n Array with references to the task deploy result for each partition.\n ", "language": "en", "n_whitespaces": 189, "n_words": 63, "vocab_size": 49 }
24
Python
24
97769988a6f19e4b76f34238c97bf159ee7626a5
column_store_dispatcher.py
153,543
14
65
call_deploy
https://github.com/modin-project/modin.git
REFACTOR-#3853: interacting with Dask interface through 'DaskWrapper' class (#3854) Co-authored-by: Devin Petersohn <[email protected]> Co-authored-by: Dmitry Chigarev <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Anatoly Myachev <[email protected]>
226
0
35,432
15
8
53
def load_and_dump(self) -> None: with ExitStack() as stack: # set env vars stack.enter_context(change_env('JINA_FULL_CLI', 'true')) # change directory to `workspace` stack.enter_context(change_cwd(get_workspace_path(self.workspace_id))) # load and build f: Flow = Flow.load_config( str(self.localpath()), substitute=True, context=self.envs ).build() # get & set the ports mapping, set `runs_in_docker` port_mapping = [] port_mapping.append( PortMapping( pod_name='gateway', pea_name='gateway', ports=Ports(port_expose=f.port_expose), ) ) for pod_name, pod in f._pod_nodes.items(): runtime_cls = update_runtime_cls(pod.args, copy=True).runtime_cls if runtime_cls in ['WorkerRuntime'] + list( GATEWAY_RUNTIME_DICT.values() ): pod.args.runs_in_docker = True current_ports = Ports() for port_name in Ports.__fields__: setattr( current_ports, port_name, getattr(pod.args, port_name, None), ) port_mapping.append( PortMapping(pod_name=pod_name, pea_name='', ports=current_ports) ) elif ( runtime_cls in ['ContainerRuntime'] and hasattr(pod.args, 'replicas') and pod.args.replicas > 1 ): for pea_args in [pod.peas_args['head']]: self._update_port_mapping(pea_args, pod_name, port_mapping) self.ports = port_mapping # save to a new file & set it for partial-daemon f.save_config(filename=self.newfile) self.params.uses = self.newname
daemon/api/dependencies.py
446
jina
{ "docstring": "\n every Flow created inside JinaD lives inside a container. It is important to know the\n list of ports to be published with localhost before actually starting the container.\n\n 1. `load` the flow yaml here.\n - yaml is stored in `workspace` directory, so we'll `cd` there\n - yaml might include env vars. so we'll set them (passed via query params)\n 2. `build` the Flow so that `gateway` gets added.\n - get the list of ports to be published (port_expose, port_in, port_out, port_ctrl)\n - ports need to be published for gateway & executors that are not `ContainerRuntime` or `JinadRuntime` based\n - Pod level args for ports are enough, as we don't need to publish Pea ports\n - all the above Pods also run in docker, hence we set `runs_in_docker`\n 3. `save` the Flow config.\n - saves port configs of all `executors` into the new yaml.\n - set `JINA_FULL_CLI` envvar, so that `gateway` args are also added.\n - save the config into a new file.\n 4. pass this new file as filename to `partial-daemon` to start the Flow\n ", "language": "en", "n_whitespaces": 324, "n_words": 175, "vocab_size": 109 }
128
Python
94
933415bfa1f9eb89f935037014dfed816eb9815d
dependencies.py
9,812
59
273
load_and_dump
https://github.com/jina-ai/jina.git
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
858
0
1,704
18
5
12
def hvac_action(self) -> HVACAction: if self._model != NA_VALVE and self._boilerstatus is not None: return CURRENT_HVAC_MAP_NETATMO[self._boilerstatus] # Maybe it is a valve if ( heating_req := getattr(self._room, "heating_power_request", 0) ) is not None and heating_req > 0: return HVACAction.HEATING return HVACAction.IDLE
homeassistant/components/netatmo/climate.py
95
core
{ "docstring": "Return the current running hvac operation if supported.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
40
Python
32
81abeac83ed85c5753cb8f2ac317caf079cf1868
climate.py
287,828
9
60
hvac_action
https://github.com/home-assistant/core.git
Netatmo refactor to use pyatmo 7.0.1 (#73482) (#78523) Co-authored-by: Robert Svensson <[email protected]>
115
0
87,015
12
2
18
def test_actual_timeouts(mock_build_dir): query = bazel_sharding.get_target_expansion_query( ["..."], tests_only=False, exclude_manual=False ) xml_output = bazel_sharding.run_bazel_query(query, debug=False) rules = set(bazel_sharding.extract_rules_from_xml(xml_output)) expected_timeouts = { "test_default": 60 * 5, "test_small": 60, "test_medium": 60 * 5, "test_large": 60 * 15, "test_enormous": 60 * 60, "test_short": 60, "test_moderate": 60 * 5, "test_long": 60 * 15, "test_eternal": 60 * 60, "test_both_size_and_timeout": 60 * 15, } assert len(rules) == len(expected_timeouts) assert (rule.actual_timeout_s == expected_timeouts[rule.name] for rule in rules)
ci/run/bazel_sharding/tests/test_bazel_sharding.py
218
ray
{ "docstring": "Test that size and timeout attrs are mapped to seconds correctly.\n\n Assert that each of the fake rules is mapped correctly.\n ", "language": "en", "n_whitespaces": 27, "n_words": 21, "vocab_size": 18 }
68
Python
42
d1aa5608979891e3dd859c07fa919fa01cfead5f
test_bazel_sharding.py
134,048
20
134
test_actual_timeouts
https://github.com/ray-project/ray.git
[CI] Make bazel sharding for parallel buildkite more intelligent (#29221) This PR implements two changes to our `bazel-sharding.py` script, used for determining which bazel tests to run on each instance when buildkite parallelism is used: * An ability to filter tests before they are sharded, using the same logic as `bazel test`. This is done by specifying the `--tag_filters` argument, eg. `--tag_filters=air,-gpu`. If we filter tests with `bazel test` *after* they are sharded, we can end up with imbalanced shards as eg. all tests we want to filter out are assigned to one shard. This feature is enabled for Serve tests and it will be required for the changes I want to make to AIR CI. * A new algorithm to balance the shards, finally implementing what that comment was asking for all this time. Instead of trying to assign the same number of tests (which have variable timeouts) to each shard, the new algorithm tries to make sure each shard will finish in more or less the same time. This is achieved through a simple but good enough heuristic. The old algorithm can still be accessed through the `--sharding_strategy` argument. Those two changes do cause the complexity of the script to increase, necessitating proper testing. In order to facilitate that, this PR also adds a basic buildkite test harness for CI tools/scripts. After this PR is merged, the next step will be to move most of our manually parallelized jobs to use buildkite parallelism with the new logic here. Signed-off-by: Antoni Baum <[email protected]>
172
0
30,180
10
2
3
def configTestMesh(device_type_mesh_map): # pylint: disable=invalid-name reset_context()
keras/dtensor/test_util.py
20
keras
{ "docstring": "Configs corresponding mesh given test context.\n\n If runs on a CPU mesh, set virtual device on CPU.\n If runs on a GPU mesh, sets virtual device on GPU with proper memory limits.\n if runs on a TPU mesh, initializes TPU system.\n\n Args:\n device_type_mesh_map: A dictionary containing device_type -> mesh mapping.\n\n Returns:\n A properly configured mesh for use in test.\n ", "language": "en", "n_whitespaces": 119, "n_words": 59, "vocab_size": 41 }
6
Python
6
84afc5193d38057e2e2badf9c889ea87d80d8fbf
test_util.py
270,641
12
75
configTestMesh
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
21
0
80,502
7
1
22
async def test_lights(hass, mock_bridge_v2, v2_resources_test_data): await mock_bridge_v2.api.load_test_data(v2_resources_test_data) await setup_platform(hass, mock_bridge_v2, "light") # there shouldn't have been any requests at this point assert len(mock_bridge_v2.mock_requests) == 0 # 6 entities should be created from test data (grouped_lights are disabled by default) assert len(hass.states.async_all()) == 6 # test light which supports color and color temperature light_1 = hass.states.get("light.hue_light_with_color_and_color_temperature_1") assert light_1 is not None assert ( light_1.attributes["friendly_name"] == "Hue light with color and color temperature 1" ) assert light_1.state == "on" assert light_1.attributes["brightness"] == int(46.85 / 100 * 255) assert light_1.attributes["mode"] == "normal" assert light_1.attributes["color_mode"] == COLOR_MODE_XY assert set(light_1.attributes["supported_color_modes"]) == { COLOR_MODE_COLOR_TEMP, COLOR_MODE_XY, } assert light_1.attributes["xy_color"] == (0.5614, 0.4058) assert light_1.attributes["min_mireds"] == 153 assert light_1.attributes["max_mireds"] == 500 assert light_1.attributes["dynamics"] == "dynamic_palette" assert light_1.attributes["effect_list"] == ["None", "candle", "fire"] assert light_1.attributes["effect"] == "None" # test light which supports color temperature only light_2 = hass.states.get("light.hue_light_with_color_temperature_only") assert light_2 is not None assert ( light_2.attributes["friendly_name"] == "Hue light with color temperature only" ) assert light_2.state == "off" assert light_2.attributes["mode"] == "normal" assert light_2.attributes["supported_color_modes"] == [COLOR_MODE_COLOR_TEMP] assert light_2.attributes["min_mireds"] == 153 assert light_2.attributes["max_mireds"] == 454 assert light_2.attributes["dynamics"] == "none" assert light_2.attributes["effect_list"] == ["None", "candle", "sunrise"] # test light which supports color only light_3 = hass.states.get("light.hue_light_with_color_only") assert light_3 is not None assert light_3.attributes["friendly_name"] == "Hue light with color only" assert light_3.state == "on" assert light_3.attributes["brightness"] == 128 assert light_3.attributes["mode"] == "normal" assert light_3.attributes["supported_color_modes"] == [COLOR_MODE_XY] assert light_3.attributes["color_mode"] == COLOR_MODE_XY assert light_3.attributes["dynamics"] == "dynamic_palette" # test light which supports on/off only light_4 = hass.states.get("light.hue_on_off_light") assert light_4 is not None assert light_4.attributes["friendly_name"] == "Hue on/off light" assert light_4.state == "off" assert light_4.attributes["mode"] == "normal" assert light_4.attributes["supported_color_modes"] == []
tests/components/hue/test_light_v2.py
729
core
{ "docstring": "Test if all v2 lights get created with correct features.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
264
Python
124
dbef90654f3693401a2df88fa00afbbffbdffcd2
test_light_v2.py
294,272
52
423
test_lights
https://github.com/home-assistant/core.git
Add effects feature to Hue lights (#68567)
458
0
93,309
11
1
5
def get_devices(self) -> dict[str, dict]: return self.devices
tests/components/lutron_caseta/__init__.py
28
core
{ "docstring": "Will return all known devices connected to the Smart Bridge.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
7
Python
7
8b1713a691bd0c90824261be785f1998ad89f66f
__init__.py
304,501
3
17
get_devices
https://github.com/home-assistant/core.git
Add support for non-serialized devices (light, switch, cover, fan in RA3 Zones) (#75323) Co-authored-by: J. Nick Koston <[email protected]>
21
0
103,308
6
5
14
def SearchBackend(params): if connection.vendor == 'postgresql': from .postgres.postgres import PostgresSearchBackend return PostgresSearchBackend(params) elif connection.vendor == 'mysql': from .mysql.mysql import MySQLSearchBackend return MySQLSearchBackend(params) elif connection.vendor == 'sqlite': from .sqlite.utils import fts5_available if fts5_available(): from .sqlite.sqlite import SQLiteSearchBackend return SQLiteSearchBackend(params) else: from .fallback import DatabaseSearchBackend return DatabaseSearchBackend(params) else: from .fallback import DatabaseSearchBackend return DatabaseSearchBackend(params)
wagtail/search/backends/database/__init__.py
177
wagtail
{ "docstring": "\n Returns the appropriate search backend for the current 'default' database system\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 10 }
52
Python
28
4248d406c011d6ba6207bb0e0e9b885813d961be
__init__.py
70,498
18
99
SearchBackend
https://github.com/wagtail/wagtail.git
Test for presence of fts5 extension in sqlite backend initialisation and migration
174
0
15,513
13
3
13
def at_time(self, time, asof=False, axis=None): # noqa: PR01, RT01, D200 axis = self._get_axis_number(axis) idx = self.index if axis == 0 else self.columns indexer = pandas.Series(index=idx).at_time(time, asof=asof).index return self.loc[indexer] if axis == 0 else self.loc[:, indexer]
modin/pandas/base.py
118
modin
{ "docstring": "\n Select values at particular time of day (e.g., 9:30AM).\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
35
Python
27
605efa618e7994681f57b11d04d417f353ef8d50
base.py
153,617
5
78
at_time
https://github.com/modin-project/modin.git
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
71
0
35,498
12
1
7
def autocorrelation_plot(series, ax=None, **kwargs) -> Axes: plot_backend = _get_plot_backend("matplotlib") return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs)
pandas/plotting/_misc.py
60
pandas
{ "docstring": "\n Autocorrelation plot for time series.\n\n Parameters\n ----------\n series : Time series\n ax : Matplotlib axis object, optional\n **kwargs\n Options to pass to matplotlib plotting method.\n\n Returns\n -------\n class:`matplotlib.axis.Axes`\n\n Examples\n --------\n\n The horizontal lines in the plot correspond to 95% and 99% confidence bands.\n\n The dashed line is 99% confidence band.\n\n .. plot::\n :context: close-figs\n\n >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000)\n >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing))\n >>> pd.plotting.autocorrelation_plot(s)\n <AxesSubplot:title={'center':'width'}, xlabel='Lag', ylabel='Autocorrelation'>\n ", "language": "en", "n_whitespaces": 167, "n_words": 79, "vocab_size": 64 }
13
Python
12
4bb1fd50a63badd38b5d96d9c4323dae7bc36d8d
_misc.py
167,386
32
37
autocorrelation_plot
https://github.com/pandas-dev/pandas.git
TYP: Missing return annotations in util/tseries/plotting (#47510) * TYP: Missing return annotations in util/tseries/plotting * the more tricky parts
22
0
39,990
9
9
20
def _atomic(e, recursive=False): pot = _preorder_traversal(e) seen = set() if isinstance(e, Basic): free = getattr(e, "free_symbols", None) if free is None: return {e} else: return set() from .symbol import Symbol from .function import Derivative, Function atoms = set() for p in pot: if p in seen: pot.skip() continue seen.add(p) if isinstance(p, Symbol) and p in free: atoms.add(p) elif isinstance(p, (Derivative, Function)): if not recursive: pot.skip() atoms.add(p) return atoms
sympy/core/basic.py
232
sympy
{ "docstring": "Return atom-like quantities as far as substitution is\n concerned: Derivatives, Functions and Symbols. Do not\n return any 'atoms' that are inside such quantities unless\n they also appear outside, too, unless `recursive` is True.\n\n Examples\n ========\n\n >>> from sympy import Derivative, Function, cos\n >>> from sympy.abc import x, y\n >>> from sympy.core.basic import _atomic\n >>> f = Function('f')\n >>> _atomic(x + y)\n {x, y}\n >>> _atomic(x + f(y))\n {x, f(y)}\n >>> _atomic(Derivative(f(x), x) + cos(x) + y)\n {y, cos(x), Derivative(f(x), x)}\n\n ", "language": "en", "n_whitespaces": 128, "n_words": 80, "vocab_size": 60 }
68
Python
46
65be461082dda54c8748922f9c29a19af1279fe1
basic.py
197,309
24
140
_atomic
https://github.com/sympy/sympy.git
Remove abbreviations in documentation
228
0
48,452
14
6
14
def ode_order(expr, func): a = Wild('a', exclude=[func]) if expr.match(a): return 0 if isinstance(expr, Derivative): if expr.args[0] == func: return len(expr.variables) else: return max(ode_order(arg, func) for arg in expr.args[0].args) + len(expr.variables) else: return max(ode_order(arg, func) for arg in expr.args)
sympy/solvers/deutils.py
161
sympy
{ "docstring": "\n Returns the order of a given differential\n equation with respect to func.\n\n This function is implemented recursively.\n\n Examples\n ========\n\n >>> from sympy import Function\n >>> from sympy.solvers.deutils import ode_order\n >>> from sympy.abc import x\n >>> f, g = map(Function, ['f', 'g'])\n >>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 +\n ... f(x).diff(x), f(x))\n 2\n >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x))\n 2\n >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x))\n 3\n\n ", "language": "en", "n_whitespaces": 119, "n_words": 67, "vocab_size": 46 }
38
Python
26
bd9f607176c58dfba01e27c05c2b7d49ff97c901
deutils.py
198,418
11
103
ode_order
https://github.com/sympy/sympy.git
Improve loop performance in solvers
103
0
48,925
17
1
9
def add_to_apply_calls(self, func, *args, **kwargs): return type(self)( self.list_of_partitions_to_combine, full_axis=self.full_axis, call_queue=self.call_queue + [(func, args, kwargs)], )
modin/core/execution/ray/implementations/pandas_on_ray/partitioning/virtual_partition.py
69
modin
{ "docstring": "\n Add a function to the call queue.\n\n Parameters\n ----------\n func : callable or ray.ObjectRef\n Function to be added to the call queue.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n **kwargs : dict\n Additional keyword arguments to be passed in `func`.\n\n Returns\n -------\n PandasOnRayDataframeVirtualPartition\n A new ``PandasOnRayDataframeVirtualPartition`` object.\n\n Notes\n -----\n It does not matter if `func` is callable or an ``ray.ObjectRef``. Ray will\n handle it correctly either way. The keyword arguments are sent as a dictionary.\n ", "language": "en", "n_whitespaces": 222, "n_words": 79, "vocab_size": 58 }
15
Python
15
8d1004fdbdaa05700613c8e6287641a732acf606
virtual_partition.py
153,194
6
47
add_to_apply_calls
https://github.com/modin-project/modin.git
FIX-#3675: Expand virtual partitioning utility (#3886) Co-authored-by: mvashishtha <[email protected]> Co-authored-by: jeffreykennethli <[email protected]> Co-authored-by: Anatoly Myachev <[email protected]> Co-authored-by: Vasily Litvinov <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]> Co-authored-by: Naren Krishna <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Dmitry Chigarev <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Doris Lee <[email protected]> Co-authored-by: Aditya Parameswaran <[email protected]> Co-authored-by: Rehan Sohail Durrani <[email protected]> Co-authored-by: Susmit Vengurlekar <[email protected]> Signed-off-by: Devin Petersohn <[email protected]>
69
0
35,294
11
2
5
def is_active_loop_rejected(self) -> bool: return self.active_loop is not None and self.active_loop.rejected
rasa/shared/core/trackers.py
35
rasa
{ "docstring": "Return True if there is an active loop and it's rejected.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
11
Python
11
e798bf049f036a5865c14d4343ed8a833864aabe
trackers.py
159,565
3
21
is_active_loop_rejected
https://github.com/RasaHQ/rasa.git
convert TrackerActiveLoop to a dataclass
25
0
38,337
8
1
3
def clear(self): self._value = False
python3.10.4/Lib/asyncio/locks.py
21
XX-Net
{ "docstring": "Reset the internal flag to false. Subsequently, coroutines calling\n wait() will block until set() is called to set the internal flag\n to true again.", "language": "en", "n_whitespaces": 37, "n_words": 24, "vocab_size": 19 }
5
Python
5
8198943edd73a363c266633e1aa5b2a9e9c9f526
locks.py
220,548
2
11
clear
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
19
0
56,047
7
2
13
def unmap(self) -> BaseOperator: dag = self.get_dag() if not dag: raise RuntimeError("Cannot unmap a task unless it has a DAG") dag._remove_task(self.task_id) return self.create_unmapped_operator(dag) # TODO: Deprecate for Airflow 3.0 Chainable = Union[DependencyMixin, Sequence[DependencyMixin]]
airflow/models/baseoperator.py
85
airflow
{ "docstring": "Get the \"normal\" Operator after applying the current mapping", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
33
Python
31
fded2ca0b9c995737b401896b89e5c9fd7f24c91
baseoperator.py
44,593
7
39
unmap
https://github.com/apache/airflow.git
Rewrite decorated task mapping (#21328)
77
0
8,308
10
2
5
def get_admin_urls_for_registration(self): urls = () for instance in self.modeladmin_instances: urls += instance.get_admin_urls_for_registration() return urls
wagtail/contrib/modeladmin/options.py
45
wagtail
{ "docstring": "\n Utilised by Wagtail's 'register_admin_urls' hook to register urls for\n used by any associated ModelAdmin instances\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
14
Python
12
de3fcba9e95818e9634ab7de6bfcb1f4221f2775
options.py
70,994
5
26
get_admin_urls_for_registration
https://github.com/wagtail/wagtail.git
Fix warnings from flake8-comprehensions.
53
0
15,593
10
1
9
def print_help(self): help_text = f console.print(text=help_text, menu="Stocks - Backtesting")
gamestonk_terminal/stocks/backtesting/bt_controller.py
54
OpenBBTerminal
{ "docstring": "Print help\n[param]Ticker: [/param]{self.ticker.upper()}[cmds]\n\n whatif what if you had bought X shares on day Y\n\n ema buy when price exceeds EMA(l)\n ema_cross buy when EMA(short) > EMA(long)\n rsi buy when RSI < low and sell when RSI > high[/cmds]\n ", "language": "en", "n_whitespaces": 80, "n_words": 39, "vocab_size": 32 }
9
Python
9
82747072c511beb1b2672846ae2ee4aec53eb562
bt_controller.py
281,535
11
22
print_help
https://github.com/OpenBB-finance/OpenBBTerminal.git
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: james <[email protected]> Co-authored-by: jose-donato <[email protected]>
30
0
83,833
11
4
17
def resolve_url(root, info): open_as = root.get("open_as", AppExtensionOpenAs.POPUP) app_url = root["app_url"] url = root["url"] if url.startswith("/") and app_url and open_as == AppExtensionOpenAs.POPUP: parsed_url = urlparse(app_url) new_path = urljoin(parsed_url.path, url[1:]) return parsed_url._replace(path=new_path).geturl() return url
saleor/graphql/app/types.py
139
saleor
{ "docstring": "Return an extension url.\n\n Apply url stitching when these 3 conditions are met:\n - url starts with /\n - openAs == \"POPUP\"\n - appUrl is defined\n ", "language": "en", "n_whitespaces": 73, "n_words": 26, "vocab_size": 23 }
32
Python
23
4e6dca3085479e0ed0c471fe64dbd4ccd7a77a12
types.py
25,584
9
83
resolve_url
https://github.com/saleor/saleor.git
Add new type of target and include openAs option
107
0
4,908
12
2
13
def get_train_dataloader(self): if self.train_dataset is None: raise ValueError("Trainer: training requires a train_dataset.") train_dataset = self.train_dataset train_sampler = self._get_train_sampler() return DataLoader( train_dataset, batch_sampler=train_sampler, collate_fn=self.data_collator, num_workers=self.args.dataloader_num_workers, )
paddlenlp/trainer/trainer_base.py
87
PaddleNLP
{ "docstring": "\n Returns the training [`~paddle.io.DataLoader`].\n\n Will use no sampler if `self.train_dataset` does not implement `__len__`, a random sampler (adapted to\n distributed training if necessary) otherwise.\n\n Subclass and override this method if you want to inject some custom behavior.\n ", "language": "en", "n_whitespaces": 73, "n_words": 37, "vocab_size": 32 }
25
Python
23
44a290e94d1becd1f09fddc3d873f9e19c9d6919
trainer_base.py
323,137
10
54
get_train_dataloader
https://github.com/PaddlePaddle/PaddleNLP.git
[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761) * add some datasets for finetune. * support fine tune for all tastks. * add trainer prototype. * init verison for paddlenlp trainer. * refine trainer. * update for some details. * support multi-cards training evaluation. * support load from ckpt. * support for export inference model. * first version of trainer. * seq cls support clue. * trainer support for token classification and question answersing tasks. * fix as reviews. Co-authored-by: Zeyu Chen <[email protected]>
115
0
118,380
10
13
38
def __call__(self, samples, context=None): coarsest_stride = self.pad_to_stride # multi scale input is nested list if isinstance(samples, typing.Sequence) and len(samples) > 0 and isinstance( samples[0], typing.Sequence): inner_samples = samples[0] else: inner_samples = samples max_shape = np.array( [data['image'].shape for data in inner_samples]).max(axis=0) if coarsest_stride > 0: max_shape[1] = int( np.ceil(max_shape[1] / coarsest_stride) * coarsest_stride) max_shape[2] = int( np.ceil(max_shape[2] / coarsest_stride) * coarsest_stride) for data in inner_samples: im = data['image'] im_c, im_h, im_w = im.shape[:] padding_im = np.zeros( (im_c, max_shape[1], max_shape[2]), dtype=np.float32) padding_im[:, :im_h, :im_w] = im data['image'] = padding_im if 'semantic' in data and data['semantic'] is not None: semantic = data['semantic'] padding_sem = np.zeros( (1, max_shape[1], max_shape[2]), dtype=np.float32) padding_sem[:, :im_h, :im_w] = semantic data['semantic'] = padding_sem if 'gt_segm' in data and data['gt_segm'] is not None: gt_segm = data['gt_segm'] padding_segm = np.zeros( (gt_segm.shape[0], max_shape[1], max_shape[2]), dtype=np.uint8) padding_segm[:, :im_h, :im_w] = gt_segm data['gt_segm'] = padding_segm if 'gt_rbox2poly' in data and data['gt_rbox2poly'] is not None: # ploy to rbox polys = data['gt_rbox2poly'] rbox = bbox_utils.poly2rbox(polys) data['gt_rbox'] = rbox return samples @register_op
ppdet/data/transform/batch_operators.py
575
@register_op
PaddleDetection
{ "docstring": "\n Args:\n samples (list): a batch of sample, each is dict.\n ", "language": "en", "n_whitespaces": 36, "n_words": 10, "vocab_size": 10 }
166
Python
85
0a3d768ce3464fca945ba58f0742fbe003930ec7
batch_operators.py
210,019
40
363
__call__
https://github.com/PaddlePaddle/PaddleDetection.git
[dev] update assigner and tood_head (#5169)
699
1
52,850
15
2
11
def is_categorical(arr) -> bool: warnings.warn( "is_categorical is deprecated and will be removed in a future version. " "Use is_categorical_dtype instead.", FutureWarning, stacklevel=find_stack_level(), ) return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr)
pandas/core/dtypes/common.py
62
pandas
{ "docstring": "\n Check whether an array-like is a Categorical instance.\n\n .. deprecated:: 1.1.0\n Use ``is_categorical_dtype`` instead.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is of a Categorical instance.\n\n Examples\n --------\n >>> is_categorical([1, 2, 3])\n False\n\n Categoricals, Series Categoricals, and CategoricalIndex will return True.\n\n >>> cat = pd.Categorical([1, 2, 3])\n >>> is_categorical(cat)\n True\n >>> is_categorical(pd.Series(cat))\n True\n >>> is_categorical(pd.CategoricalIndex([1, 2, 3]))\n True\n ", "language": "en", "n_whitespaces": 153, "n_words": 68, "vocab_size": 51 }
28
Python
28
0106c26529900bad0561efb9c9180f7f016365b0
common.py
169,777
39
36
is_categorical
https://github.com/pandas-dev/pandas.git
REVERT caching in find_stack_level (#49053) Revert "PERF cache find_stack_level (#48023)" This reverts commit 2f8d0a36703e81e4dca52ca9fe4f58c910c1b304. Co-authored-by: MarcoGorelli <>
68
0
40,464
10
1
18
def regularization_terms(self) -> torch.Tensor: off_diagonal_entries = torch.masked_select(self.w, ~torch.eye(self.num_classes, dtype=bool)) weight_matrix_loss = self.off_diagonal_l2 * torch.linalg.vector_norm(off_diagonal_entries) bias_vector_loss = self.mu * torch.linalg.vector_norm(self.b, 2) return bias_vector_loss + weight_matrix_loss
ludwig/utils/calibration.py
110
ludwig
{ "docstring": "Off-Diagonal and Intercept Regularisation (ODIR).\n\n Described in \"Beyond temperature scaling: Obtaining well-calibrated multiclass probabilities with Dirichlet\n calibration\"\n https://proceedings.neurips.cc/paper/2019/file/8ca01ea920679a0fe3728441494041b9-Paper.pdf\n ", "language": "en", "n_whitespaces": 46, "n_words": 18, "vocab_size": 18 }
24
Python
19
e65f74e87e8e29922f4e9f9d839978ffb2c5b029
calibration.py
7,068
11
70
regularization_terms
https://github.com/ludwig-ai/ludwig.git
Adds mechanism for calibrating probabilities for category and binary features (#1949) * Started adding files for calibration implementation. * Adds option to return logits and labels in predictor. * Pre-commit fixes * First pass temperature scaling working. * Fixes calibration for categorical feature. * Separate calibrated logits from logits. * Adds option to revert temperature scaling. * Refactoring, move binary prediction logic into calibration class. * Reverted accidental commit to simple_model_training.py * Adds checks and comments. * Fixes matrix scaling, convert pandas series to numpy arrays. * Fixes number of classes for categorical features. * Adds structured calibration result, unit tests. * Make create_calibration_module not abstract, default implementation returns None. * Relax precision requirement for calibration test. * Save weights after calibration, so calibration results are included in save file. * Implemented dirichlet scaling with l2 off-diagonal regularization. * Adds masked_select off_diagonal method. * Change back to matrix scaling. * Updates test expectations to reflect learning rate settings. * Tuned default regularization weight. * Comments. * Set random seed, testing to see if that makes a difference. * Remove checks for exact NLL, ECE values post calibration. * Restored LOGITS to EXCLUDE_PRED_SET, added another option to return logits in batch_predict. * Factor calibration method out of Trainer into Calibrator * Removed horovod argument from calibrator. * Return batch_size if eval_batch_size not specified. * Fix calibration_module docstring. * Updates comment, adds fallback method of calibrating on training set if no validation set available. * Adds calibration registry, replaces if statements for instantiating calibration. * Raise ValueError if unsupported calibration method specified. * Remove calibrate method from Trainer * f string * Use backend to create predictor for calibration. * Moves saving out of calibrator * Fix comment. * Adds ray test of calibration. * Implements collect_logits in ray predictor. * First pass implementation of collect_labels. * Implements collect_logits and collect_labels in ray backend. * Merge predictions and labels in ray backend * Reverts collect_labels, get labels from dataset in calibrate. * Allow overriding EXCLUDE_PRED_SET when getting preds. * Changes 'calibration' config option to binary. * Test both binary and category output features in ray test. * Comments/ * Adds type hints. Co-authored-by: Daniel Treiman <[email protected]>
59
0
1,113
12
7
38
def get_video_input_devices_names() -> List[str]: # based on https://docs.microsoft.com/ru-ru/windows/win32/directshow/selecting-a-capture-device names = [] sys_dev_enum = strmif.ICreateDevEnum() if ole32.CoCreateInstance(uuids.CLSID_SystemDeviceEnum, None, ole32.CLSCTX.CLSCTX_INPROC_SERVER, strmif.ICreateDevEnum.IID, sys_dev_enum) == wintypes.ERROR.SUCCESS: pEnumCat = objidl.IEnumMoniker() if sys_dev_enum.CreateClassEnumerator(uuids.CLSID_VideoInputDeviceCategory, pEnumCat, 0) == wintypes.ERROR.SUCCESS: moniker = objidl.IMoniker() while pEnumCat.Next(1, moniker, None) == wintypes.ERROR.SUCCESS: prop_bag = oaidl.IPropertyBag() if moniker.BindToStorage(None, None, oaidl.IPropertyBag.IID, prop_bag) == wintypes.ERROR.SUCCESS: var = wintypes.VARIANT() hr = prop_bag.Read(wintypes.LPCOLESTR('Description'), var, None ) if hr != wintypes.ERROR.SUCCESS: hr = prop_bag.Read(wintypes.LPCOLESTR('FriendlyName'), var, None ) names.append(var.value.bstrVal.value if hr == wintypes.ERROR.SUCCESS else 'unnamed') prop_bag.Release() moniker.Release() pEnumCat.Release() sys_dev_enum.Release() return names
xlib/api/win32/dshow/helper.py
363
DeepFaceLive
{ "docstring": "\n returns a list of available names of VideoInputDevice's\n\n ole32 should be initialized before use\n ", "language": "en", "n_whitespaces": 24, "n_words": 14, "vocab_size": 13 }
82
Python
55
2be32787538f1b0ef83f648ee60d2d4d4868d3fd
helper.py
179,091
25
230
get_video_input_devices_names
https://github.com/iperov/DeepFaceLive.git
update xlib.api.win32
317
0
42,899
21
1
2
def more_better_error_messages(func):
src/sentry/db/postgres/decorators.py
13
sentry
{ "docstring": "\n Wraps functions where the first param is a SQL statement and enforces\n any exceptions thrown will also contain the statement in the message.\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 20 }
2
Python
2
71583b888a5c079749333875a0bbb277188ef693
decorators.py
96,697
4
15
more_better_error_messages
https://github.com/getsentry/sentry.git
ref(lang): 🙊 (#32292)
5
0
19,339
6
40
42
def meta_from_array(x, ndim=None, dtype=None): # If using x._meta, x must be a Dask Array, some libraries (e.g. zarr) # implement a _meta attribute that are incompatible with Dask Array._meta if hasattr(x, "_meta") and isinstance(x, Array): x = x._meta if dtype is None and x is None: raise ValueError("You must specify the meta or dtype of the array") if np.isscalar(x): x = np.array(x) if x is None: x = np.ndarray elif dtype is None and hasattr(x, "dtype"): dtype = x.dtype if isinstance(x, type): x = x(shape=(0,) * (ndim or 0), dtype=dtype) if isinstance(x, list) or isinstance(x, tuple): ndims = [ 0 if isinstance(a, numbers.Number) else a.ndim if hasattr(a, "ndim") else len(a) for a in x ] a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)] return a if isinstance(x, list) else tuple(x) if ( not hasattr(x, "shape") or not hasattr(x, "dtype") or not isinstance(x.shape, tuple) ): return x if ndim is None: ndim = x.ndim try: meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))] if meta.ndim != ndim: if ndim > x.ndim: meta = meta[(Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))] meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))] elif ndim == 0: meta = meta.sum() else: meta = meta.reshape((0,) * ndim) if meta is np.ma.masked: meta = np.ma.array(np.empty((0,) * ndim, dtype=dtype or x.dtype), mask=True) except Exception: meta = np.empty((0,) * ndim, dtype=dtype or x.dtype) if np.isscalar(meta): meta = np.array(meta) if dtype and meta.dtype != dtype: try: meta = meta.astype(dtype) except ValueError as e: if ( any( s in str(e) for s in [ "invalid literal", "could not convert string to float", ] ) and meta.dtype.kind in "SU" ): meta = np.array([]).astype(dtype) else: raise e return meta
dask/array/utils.py
816
dask
{ "docstring": "Normalize an array to appropriate meta object\n\n Parameters\n ----------\n x: array-like, callable\n Either an object that looks sufficiently like a Numpy array,\n or a callable that accepts shape and dtype keywords\n ndim: int\n Number of dimensions of the array\n dtype: Numpy dtype\n A valid input for ``np.dtype``\n\n Returns\n -------\n array-like with zero elements of the correct dtype\n ", "language": "en", "n_whitespaces": 112, "n_words": 57, "vocab_size": 45 }
287
Python
149
7471eb3d1e9ccf085b70b219413aa891c8c2c167
utils.py
156,257
66
524
meta_from_array
https://github.com/dask/dask.git
masked scalars input to da.from_array (#8895)
887
0
36,621
21
14
49
def generate_deleted_models(self): new_keys = self.new_model_keys | self.new_unmanaged_keys deleted_models = self.old_model_keys - new_keys deleted_unmanaged_models = self.old_unmanaged_keys - new_keys all_deleted_models = chain( sorted(deleted_models), sorted(deleted_unmanaged_models) ) for app_label, model_name in all_deleted_models: model_state = self.from_state.models[app_label, model_name] # Gather related fields related_fields = {} for field_name, field in model_state.fields.items(): if field.remote_field: if field.remote_field.model: related_fields[field_name] = field if getattr(field.remote_field, "through", None): related_fields[field_name] = field # Generate option removal first unique_together = model_state.options.pop("unique_together", None) index_together = model_state.options.pop("index_together", None) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=None, ), ) if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=None, ), ) # Then remove each related field for name in sorted(related_fields): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=name, ), ) # Finally, remove the model. # This depends on both the removal/alteration of all incoming fields # and the removal of all its own related fields, and if it's # a through model the field that references it. dependencies = [] relations = self.from_state.relations for ( related_object_app_label, object_name, ), relation_related_fields in relations[app_label, model_name].items(): for field_name, field in relation_related_fields.items(): dependencies.append( (related_object_app_label, object_name, field_name, False), ) if not field.many_to_many: dependencies.append( ( related_object_app_label, object_name, field_name, "alter", ), ) for name in sorted(related_fields): dependencies.append((app_label, model_name, name, False)) # We're referenced in another field's through= through_user = self.through_users.get((app_label, model_state.name_lower)) if through_user: dependencies.append( (through_user[0], through_user[1], through_user[2], False) ) # Finally, make the operation, deduping any dependencies self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), dependencies=list(set(dependencies)), )
django/db/migrations/autodetector.py
593
django
{ "docstring": "\n Find all deleted models (managed and unmanaged) and make delete\n operations for them as well as separate operations to delete any\n foreign key or M2M relationships (these are optimized later, if\n possible).\n\n Also bring forward removal of any model options that refer to\n collections of fields - the inverse of generate_created_models().\n ", "language": "en", "n_whitespaces": 101, "n_words": 51, "vocab_size": 43 }
223
Python
130
9c19aff7c7561e3a82978a272ecdaad40dda5c00
autodetector.py
205,280
75
389
generate_deleted_models
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
1,559
0
51,062
17
3
7
def all_subclasses(cls): return set(cls.__subclasses__()).union([s for c in cls.__subclasses__() for s in all_subclasses(c)])
scripts/extract_schema.py
61
ludwig
{ "docstring": "Returns recursively-generated list of all children classes inheriting from given `cls`.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
12
Python
10
23a33eef3bc7ea3ba33ec56dc9b56ba38462648a
extract_schema.py
6,557
2
37
all_subclasses
https://github.com/ludwig-ai/ludwig.git
feat: Modify Trainer to use marshmallow_dataclass syntax for handling hyperparameters. Add basic scripting for docstring extraction to marshmallow schema. Fix some existing marshmallow issues. (#1606)
18
0
1,029
11
1
11
def test_second_upgrade_after_delay(self) -> None: channel1 = self._upgrade_room() self.assertEqual(200, channel1.code, channel1.result) channel2 = self._upgrade_room(expire_cache=True) self.assertEqual(200, channel2.code, channel2.result) self.assertNotEqual( channel1.json_body["replacement_room"], channel2.json_body["replacement_room"], )
tests/rest/client/test_upgrade_room.py
115
synapse
{ "docstring": "A second room upgrade is not deduplicated after some time has passed.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
20
Python
18
99d3931974e65865d1102ee79d7b7e2b017a3180
test_upgrade_room.py
248,621
10
72
test_second_upgrade_after_delay
https://github.com/matrix-org/synapse.git
Add more tests for room upgrades (#13074) Signed-off-by: Sean Quah <[email protected]>
91
0
72,379
9
1
8
def test_setup_connection_for_dialect_sqlite(sqlite_version, db_supports_row_number): instance_mock = MagicMock(_db_supports_row_number=True) execute_args = [] close_mock = MagicMock()
tests/components/recorder/test_util.py
44
core
{ "docstring": "Test setting up the connection for a sqlite dialect.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
12
Python
10
a4c1bcefb9d2a6f2aa0bc189fca496d46c78e3b0
test_util.py
300,937
22
143
test_setup_connection_for_dialect_sqlite
https://github.com/home-assistant/core.git
Tune sqlite based on configured settings (#72016)
24
0
99,791
9
1
10
def dry_run(self) -> None: pod = self.build_pod_request_obj() print(yaml.dump(prune_dict(pod.to_dict(), mode='strict')))
airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py
62
airflow
{ "docstring": "\n Prints out the pod definition that would be created by this operator.\n Does not include labels specific to the task instance (since there isn't\n one in a dry_run) and excludes all empty elements.\n ", "language": "en", "n_whitespaces": 62, "n_words": 33, "vocab_size": 32 }
9
Python
9
04082ac091e92587b22c8323170ebe38bc68a19a
kubernetes_pod.py
46,963
8
35
dry_run
https://github.com/apache/airflow.git
Cleanup dup code now that k8s provider requires 2.3.0+ (#22845)
30
0
9,046
13
2
19
def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False): grid_h = tf.range(grid_size, dtype=tf.float32) grid_w = tf.range(grid_size, dtype=tf.float32) grid = tf.meshgrid(grid_w, grid_h) # here w goes first grid = tf.stack(grid, axis=0) grid = tf.reshape(grid, [2, 1, grid_size, grid_size]) pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) if add_cls_token: pos_embed = tf.concat([tf.zeros((1, embed_dim)), pos_embed], axis=0) return pos_embed
src/transformers/models/vit_mae/modeling_tf_vit_mae.py
176
transformers
{ "docstring": "\n Create 2D sin/cos positional embeddings.\n\n Args:\n embed_dim (`int`):\n Embedding dimension.\n grid_size (`int`):\n The grid height and width.\n add_cls_token (`bool`, *optional*, defaults to `False`):\n Whether or not to add a classification (CLS) token.\n\n Returns:\n (`tf.Tensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the position\n embeddings (with or without classification token)\n ", "language": "en", "n_whitespaces": 130, "n_words": 49, "vocab_size": 44 }
46
Python
32
5b40a37bc4da9dc6cd33876ce9bb3f7f48450a03
modeling_tf_vit_mae.py
36,650
10
118
get_2d_sincos_pos_embed
https://github.com/huggingface/transformers.git
Add TF ViT MAE (#16255) * ported TFViTMAEIntermediate and TFViTMAEOutput. * added TFViTMAEModel and TFViTMAEDecoder. * feat: added a noise argument in the implementation for reproducibility. * feat: vit mae models with an additional noise argument for reproducibility. Co-authored-by: ariG23498 <[email protected]> Co-authored-by: ydshieh <[email protected]>
81
0
6,659
14
5
26
def _test_for_audio_stream(self) -> bool: exe = im_ffm.get_ffmpeg_exe() cmd = [exe, "-hide_banner", "-i", self._source_video, "-f", "ffmetadata", "-"] try: out = check_output(cmd, stderr=STDOUT) except CalledProcessError as err: out = err.output.decode(errors="ignore") raise ValueError("Error checking audio stream. Status: " f"{err.returncode}\n{out}") from err retval = False for line in out.splitlines(): if not line.strip().startswith(b"Stream #"): continue logger.debug("scanning Stream line: %s", line.decode(errors="ignore").strip()) if b"Audio" in line: retval = True break logger.debug("Audio found: %s", retval) return retval
plugins/convert/writer/ffmpeg.py
251
faceswap
{ "docstring": " Check whether the source video file contains an audio stream.\n\n If we attempt to mux audio from a source video that does not contain an audio stream\n ffmpeg will crash faceswap in a fairly ugly manner.\n\n Returns\n -------\n bool\n ``True if an audio stream is found in the source video file, otherwise ``False``\n\n Raises\n ------\n RuntimeError\n If a subprocess error is raised scanning the input video file\n ", "language": "en", "n_whitespaces": 153, "n_words": 67, "vocab_size": 48 }
69
Python
57
5a8b5d7b3c6b0b413fe2b4d9247b9dd0cd692fa0
ffmpeg.py
100,688
34
138
_test_for_audio_stream
https://github.com/deepfakes/faceswap.git
bugfix: ffmpeg writer - prevent crash if no audio in source
271
0
20,144
15
4
12
def process_dict_for_yaml_dump(data): for k, v in data.items(): if isinstance(v, dict): data[k] = process_dict_for_yaml_dump(v) elif isinstance(v, str): data[k] = remove_ansi_escape_sequences(v) return data @click.group(help="CLI for managing Serve instances on a Ray cluster.")
python/ray/serve/scripts.py
102
@click.group(help="CLI for managing Serve instances on a Ray cluster.")
ray
{ "docstring": "\n Removes ANSI escape sequences recursively for all strings in dict.\n\n We often need to use yaml.dump() to print dictionaries that contain exception\n tracebacks, which can contain ANSI escape sequences that color printed text. However\n yaml.dump() will format the tracebacks incorrectly if ANSI escape sequences are\n present, so we need to remove them before dumping.\n ", "language": "en", "n_whitespaces": 73, "n_words": 54, "vocab_size": 42 }
30
Python
26
b856daebbdc923a216ce412be477c61e6cc5707e
scripts.py
125,444
7
53
process_dict_for_yaml_dump
https://github.com/ray-project/ray.git
[Serve] Fix Formatting of Error Messages printed in `serve status` (#26578)
74
1
27,872
13
1
2
async def test_inject_db(db):
tests/orion/database/test_dependencies.py
14
prefect
{ "docstring": "\n Regression test for async-mangling behavior of inject_db() decorator.\n\n Previously, when wrapping a coroutine function, the decorator returned\n that function's coroutine object, instead of the coroutine function.\n\n This worked fine in most cases because both a coroutine function and a\n coroutine object can be awaited, but it broke our Pytest setup because\n we were auto-marking coroutine functions as async, and any async test\n wrapped by inject_db() was no longer a coroutine function, but instead\n a coroutine object, so we skipped marking it.\n ", "language": "en", "n_whitespaces": 109, "n_words": 81, "vocab_size": 59 }
3
Python
3
86956bde0a7efe9699703c5a318afdc76a59efab
test_dependencies.py
52,989
5
25
test_inject_db
https://github.com/PrefectHQ/prefect.git
Expand on regression test description
6
0
10,682
6
1
23
def build_agent_spaces(self) -> Tuple[Space, Space]: # noqa: E501 action_space = Discrete(19) # The football field's corners are [+-1., +-0.42]. However, the players # and balls may get out of the field. Thus we multiply those limits by # a factor of 2. xlim = 1.0 * 2 ylim = 0.42 * 2 num_players: int = 11 xy_space = Box( np.array([-xlim, -ylim], dtype=np.float32), np.array([xlim, ylim], dtype=np.float32), ) xyz_space = Box( np.array([-xlim, -ylim, 0], dtype=np.float32), np.array([xlim, ylim, np.inf], dtype=np.float32), ) observation_space = DictSpace( { "controlled_players": Discrete(2), "players_raw": TupleSpace( [ DictSpace( { # ball information "ball": xyz_space, "ball_direction": Box(-np.inf, np.inf, (3,)), "ball_rotation": Box(-np.inf, np.inf, (3,)), "ball_owned_team": Discrete(3), "ball_owned_player": Discrete(num_players + 1), # left team "left_team": TupleSpace([xy_space] * num_players), "left_team_direction": TupleSpace( [xy_space] * num_players ), "left_team_tired_factor": Box(0.0, 1.0, (num_players,)), "left_team_yellow_card": MultiBinary(num_players), "left_team_active": MultiBinary(num_players), "left_team_roles": MultiDiscrete([10] * num_players), # right team "right_team": TupleSpace([xy_space] * num_players), "right_team_direction": TupleSpace( [xy_space] * num_players ), "right_team_tired_factor": Box( 0.0, 1.0, (num_players,) ), "right_team_yellow_card": MultiBinary(num_players), "right_team_active": MultiBinary(num_players), "right_team_roles": MultiDiscrete([10] * num_players), # controlled player information "active": Discrete(num_players), "designated": Discrete(num_players), "sticky_actions": MultiBinary(10), # match state "score": Box(-np.inf, np.inf, (2,)), "steps_left": Box(0, np.inf, (1,)), "game_mode": Discrete(7), } ) ] ), } ) return action_space, observation_space
rllib/env/wrappers/kaggle_wrapper.py
627
ray
{ "docstring": "Construct the action and observation spaces\n\n Description of actions and observations:\n https://github.com/google-research/football/blob/master/gfootball/doc/\n observation.md\n ", "language": "en", "n_whitespaces": 41, "n_words": 13, "vocab_size": 12 }
192
Python
129
8e680c483ce326cefc62e44f68ab1a6948b1c3d2
kaggle_wrapper.py
137,755
62
408
build_agent_spaces
https://github.com/ray-project/ray.git
[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)
1,613
0
31,237
21
1
5
def column_names(self) -> List[str]: return self._data.column_names
src/datasets/arrow_dataset.py
29
datasets
{ "docstring": "Names of the columns in the dataset.\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\", split=\"validation\")\n >>> ds.column_names\n ['text', 'label']\n ```\n ", "language": "en", "n_whitespaces": 80, "n_words": 24, "vocab_size": 21 }
6
Python
6
445107bae3fcd6ac9eeae503232960fa4ba8ccfd
arrow_dataset.py
104,761
13
17
column_names
https://github.com/huggingface/datasets.git
Add code examples to API docs (#4168) * add code examples for functions related to the base dataset class * ✨ make style * 🖍 make each code example fully reproducible where applicable * 🖍 show parameter usage for some functions * 🖍 add examples for DatasetInfo functions
20
0
21,956
7
2
10
def clean_copy(self) -> "StorableObject": if self.is_proxy: self._data.generate_presigned_url() return StorableObject( id=self.id, data=self._data, tags=self.tags, description=self.description, ) else: return StorableObject( id=self.id, data=self.data, tags=self.tags, description=self.description )
packages/syft/src/syft/core/store/storeable_object.py
119
PySyft
{ "docstring": "\n This method return a copy of self, but clean up the search_permissions and\n read_permissions attributes.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
22
Python
17
2260fe77c69381a2c815a7213562115969cbf8a3
storeable_object.py
745
17
77
clean_copy
https://github.com/OpenMined/PySyft.git
syft: integrate upload to s3 in send method - data proxy property in storable object class - add method to get presigned GET url in ProxyDataClass - update .get method to support s3 presigned url in case of proxy data class Co-authored-by: IonesioJunior <[email protected]>
173
0
110
12
7
47
def test_cluster_rllib_restore(start_connected_cluster, tmpdir): cluster = start_connected_cluster dirpath = str(tmpdir) script = .format( address=cluster.address, checkpoint_dir=dirpath ) run_string_as_driver_nonblocking(script) # Wait until the right checkpoint is saved. # The trainable returns every 0.5 seconds, so this should not miss # the checkpoint. local_checkpoint_dir = os.path.join(dirpath, "experiment") for i in range(100): if TrialRunner.checkpoint_exists(local_checkpoint_dir): # Inspect the internal trialrunner runner = TrialRunner( resume="LOCAL", local_checkpoint_dir=local_checkpoint_dir ) trials = runner.get_trials() last_res = trials[0].last_result if last_res and last_res.get("training_iteration"): break time.sleep(0.3) if not TrialRunner.checkpoint_exists(local_checkpoint_dir): raise RuntimeError("Checkpoint file didn't appear.") ray.shutdown() cluster.shutdown() cluster = _start_new_cluster() cluster.wait_for_nodes() # Restore properly from checkpoint trials2 = tune.run_experiments( { "experiment": { "run": "PG", "checkpoint_config": CheckpointConfig(checkpoint_frequency=1), "local_dir": dirpath, } }, resume=True, ) assert all(t.status == Trial.TERMINATED for t in trials2) ray.shutdown() cluster.shutdown() # TODO(ujvl): Fix test. @pytest.mark.skip(reason="Not very consistent.")
python/ray/tune/tests/test_cluster.py
365
@pytest.mark.skip(reason="Not very consistent.")
ray
{ "docstring": "\nimport time\nimport ray\nfrom ray import tune\n\nray.init(address=\"{address}\")\n\n\ntune.run(\n \"PG\",\n name=\"experiment\",\n config=dict(env=\"CartPole-v1\", framework=\"tf\"),\n stop=dict(training_iteration=10),\n local_dir=\"{checkpoint_dir}\",\n checkpoint_freq=1,\n max_failures=1,\n dict(experiment=kwargs),\n raise_on_failed_trial=False)\n", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 17 }
124
Python
95
e142be077f0c727ab11ba51ecaba9a98b7bfe474
test_cluster.py
128,591
55
204
test_cluster_rllib_restore
https://github.com/ray-project/ray.git
[tune] Store sync config/checkpoint config in experiment, trial (#29019) This is some clean-up required for future changes to the syncing/checkpointing behavior. At the moment we pass single attributes of these configs to the Experiment class, and then subsequently to the Trial class, from which it is passed on to the trainable. If we extend the configurability in the future (e.g. provide fallback mechanisms in the checkpoint config, or make retry wait times configurable in the sync config), we would have to add more and more attributes to these intermediate classes. Instead, we should just pass and store the full config. As a next follow-up, we can pass these configs to the Trainable. Signed-off-by: Kai Fricke <[email protected]>
396
1
28,755
15
3
15
def predict_snli(net, vocab, premise, hypothesis): premise = np.array(vocab[premise], ctx=d2l.try_gpu()) hypothesis = np.array(vocab[hypothesis], ctx=d2l.try_gpu()) label = np.argmax(net([premise.reshape((1, -1)), hypothesis.reshape((1, -1))]), axis=1) return 'entailment' if label == 0 else 'contradiction' if label == 1 \ else 'neutral' d2l.DATA_HUB['ml-100k'] = ( 'https://files.grouplens.org/datasets/movielens/ml-100k.zip', 'cd4dcac4241c8a4ad7badc7ca635da8a69dddb83')
d2l/mxnet.py
183
d2l-zh
{ "docstring": "Predict the logical relationship between the premise and hypothesis.\n\n Defined in :numref:`sec_natural-language-inference-attention`", "language": "en", "n_whitespaces": 14, "n_words": 12, "vocab_size": 11 }
40
Python
31
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
mxnet.py
158,223
7
104
predict_snli
https://github.com/d2l-ai/d2l-zh.git
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
97
0
37,391
15
10
59
def _prepare_output_docstrings(output_type, config_class, min_indent=None): output_docstring = output_type.__doc__ # Remove the head of the docstring to keep the list of args only lines = output_docstring.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: i += 1 if i < len(lines): params_docstring = "\n".join(lines[(i + 1) :]) params_docstring = _convert_output_args_doc(params_docstring) # Add the return introduction full_output_type = f"{output_type.__module__}.{output_type.__name__}" intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION intro = intro.format(full_output_type=full_output_type, config_class=config_class) result = intro + params_docstring # Apply minimum indent if necessary if min_indent is not None: lines = result.split("\n") # Find the indent of the first nonempty line i = 0 while len(lines[i]) == 0: i += 1 indent = len(_get_indent(lines[i])) # If too small, add indentation to all nonempty lines if indent < min_indent: to_add = " " * (min_indent - indent) lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines] result = "\n".join(lines) return result PT_TOKEN_CLASSIFICATION_SAMPLE = r PT_QUESTION_ANSWERING_SAMPLE = r PT_SEQUENCE_CLASSIFICATION_SAMPLE = r PT_MASKED_LM_SAMPLE = r PT_BASE_MODEL_SAMPLE = r PT_MULTIPLE_CHOICE_SAMPLE = r PT_CAUSAL_LM_SAMPLE = r PT_SPEECH_BASE_MODEL_SAMPLE = r PT_SPEECH_CTC_SAMPLE = r PT_SPEECH_SEQ_CLASS_SAMPLE = r PT_SPEECH_FRAME_CLASS_SAMPLE = r PT_SPEECH_XVECTOR_SAMPLE = r PT_VISION_BASE_MODEL_SAMPLE = r PT_VISION_SEQ_CLASS_SAMPLE = r PT_SAMPLE_DOCSTRINGS = { "SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE, "TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": PT_MASKED_LM_SAMPLE, "LMHead": PT_CAUSAL_LM_SAMPLE, "BaseModel": PT_BASE_MODEL_SAMPLE, "SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE, "CTC": PT_SPEECH_CTC_SAMPLE, "AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE, "AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE, "AudioXVector": PT_SPEECH_XVECTOR_SAMPLE, "VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE, "ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE, } TF_TOKEN_CLASSIFICATION_SAMPLE = r TF_QUESTION_ANSWERING_SAMPLE = r TF_SEQUENCE_CLASSIFICATION_SAMPLE = r TF_MASKED_LM_SAMPLE = r TF_BASE_MODEL_SAMPLE = r TF_MULTIPLE_CHOICE_SAMPLE = r TF_CAUSAL_LM_SAMPLE = r TF_SAMPLE_DOCSTRINGS = { "SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE, "TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": TF_MASKED_LM_SAMPLE, "LMHead": TF_CAUSAL_LM_SAMPLE, "BaseModel": TF_BASE_MODEL_SAMPLE, } FLAX_TOKEN_CLASSIFICATION_SAMPLE = r FLAX_QUESTION_ANSWERING_SAMPLE = r FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r FLAX_MASKED_LM_SAMPLE = r FLAX_BASE_MODEL_SAMPLE = r FLAX_MULTIPLE_CHOICE_SAMPLE = r FLAX_CAUSAL_LM_SAMPLE = r FLAX_SAMPLE_DOCSTRINGS = { "SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE, "TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": FLAX_MASKED_LM_SAMPLE, "BaseModel": FLAX_BASE_MODEL_SAMPLE, "LMHead": FLAX_CAUSAL_LM_SAMPLE, }
src/transformers/utils/doc.py
784
transformers
{ "docstring": "\n Prepares the return part of the docstring using `output_type`.\n \n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> labels = torch.tensor([1] * inputs[\"input_ids\"].size(1)).unsqueeze(0) # Batch size 1\n\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n\n >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> question, text = \"Who was Jim Henson?\", \"Jim Henson was a nice puppet\"\n >>> inputs = tokenizer(question, text, return_tensors=\"pt\")\n >>> start_positions = torch.tensor([1])\n >>> end_positions = torch.tensor([3])\n\n >>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions)\n >>> loss = outputs.loss\n >>> round(loss.item(), 2)\n {expected_loss}\n\n >>> start_scores = outputs.start_logits\n >>> list(start_scores.shape)\n {expected_output}\n\n >>> end_scores = outputs.end_logits\n >>> list(end_scores.shape)\n {expected_output}\n ```\n\n Example of single-label classification:\n\n ```python\n >>> import torch\n >>> from transformers import {processor_class}, {model_class}\n\n >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\", num_labels=2)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n >>> list(logits.shape)\n {expected_output}\n ```\n\n Example of multi-label classification:\n\n ```python\n >>> import torch\n >>> from transformers import {processor_class}, {model_class}\n\n >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\", problem_type=\"multi_label_classification\", num_labels=2)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> list(logits.shape)\n {expected_output}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"The capital of France is {mask}.\", return_tensors=\"pt\")\n >>> labels = tokenizer(\"The capital of France is Paris.\", return_tensors=\"pt\")[\"input_ids\"]\n\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n >>> choice0 = \"It is eaten with a fork and a knife.\"\n >>> choice1 = \"It is eaten while held in the hand.\"\n >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1\n\n >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=\"pt\", padding=True)\n >>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1\n\n >>> # the linear classifier still needs to be trained\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> import torch\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs, labels=inputs[\"input_ids\"])\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n >>> from datasets import load_dataset\n\n >>> dataset = load_dataset(\"hf-internal-testing/librispeech_asr_demo\", \"clean\", split=\"validation\")\n >>> dataset = dataset.sort(\"id\")\n >>> sampling_rate = dataset.features[\"audio\"].sampling_rate\n\n >>> processor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> # audio file is decoded on the fly\n >>> inputs = processor(dataset[0][\"audio\"][\"array\"], sampling_rate=sampling_rate, return_tensors=\"pt\")\n >>> with torch.no_grad():\n ... outputs = model(**inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n >>> list(last_hidden_states.shape)\n {expected_output}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> from datasets import load_dataset\n >>> import torch\n\n >>> dataset = load_dataset(\"hf-internal-testing/librispeech_asr_demo\", \"clean\", split=\"validation\")\n >>> dataset = dataset.sort(\"id\")\n >>> sampling_rate = dataset.features[\"audio\"].sampling_rate\n\n >>> processor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> # audio file is decoded on the fly\n >>> inputs = processor(dataset[0][\"audio\"][\"array\"], sampling_rate=sampling_rate, return_tensors=\"pt\")\n >>> with torch.no_grad():\n ... logits = model(**inputs).logits\n >>> predicted_ids = torch.argmax(logits, dim=-1)\n\n >>> # transcribe speech\n >>> transcription = processor.batch_decode(predicted_ids)\n >>> transcription[0]\n {expected_output}\n ```\n\n ```python\n >>> with processor.as_target_processor():\n ... inputs[\"labels\"] = processor(dataset[0][\"text\"], return_tensors=\"pt\").input_ids\n\n >>> # compute loss\n >>> loss = model(**inputs).loss\n >>> round(loss.item(), 2)\n {expected_loss}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> from datasets import load_dataset\n >>> import torch\n\n >>> dataset = load_dataset(\"hf-internal-testing/librispeech_asr_demo\", \"clean\", split=\"validation\")\n >>> dataset = dataset.sort(\"id\")\n >>> sampling_rate = dataset.features[\"audio\"].sampling_rate\n\n >>> feature_extractor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> # audio file is decoded on the fly\n >>> inputs = feature_extractor(dataset[0][\"audio\"][\"array\"], sampling_rate=sampling_rate, return_tensors=\"pt\")\n\n >>> with torch.no_grad():\n ... logits = model(**inputs).logits\n\n >>> predicted_class_ids = torch.argmax(logits, dim=-1).item()\n >>> predicted_label = model.config.id2label[predicted_class_ids]\n >>> predicted_label\n {expected_output}\n ```\n\n ```python\n >>> # compute loss - target_label is e.g. \"down\"\n >>> target_label = model.config.id2label[0]\n >>> inputs[\"labels\"] = torch.tensor([model.config.label2id[target_label]])\n >>> loss = model(**inputs).loss\n >>> round(loss.item(), 2)\n {expected_loss}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> from datasets import load_dataset\n >>> import torch\n\n >>> dataset = load_dataset(\"hf-internal-testing/librispeech_asr_demo\", \"clean\", split=\"validation\")\n >>> dataset = dataset.sort(\"id\")\n >>> sampling_rate = dataset.features[\"audio\"].sampling_rate\n\n >>> feature_extractor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> # audio file is decoded on the fly\n >>> inputs = feature_extractor(dataset[0][\"audio\"][\"array\"], return_tensors=\"pt\", sampling_rate=sampling_rate)\n >>> with torch.no_grad():\n ... logits = model(**inputs).logits\n\n >>> probabilities = torch.sigmoid(logits[0])\n >>> # labels is a one-hot array of shape (num_frames, num_speakers)\n >>> labels = (probabilities > 0.5).long()\n >>> labels[0].tolist()\n {expected_output}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> from datasets import load_dataset\n >>> import torch\n\n >>> dataset = load_dataset(\"hf-internal-testing/librispeech_asr_demo\", \"clean\", split=\"validation\")\n >>> dataset = dataset.sort(\"id\")\n >>> sampling_rate = dataset.features[\"audio\"].sampling_rate\n\n >>> feature_extractor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> # audio file is decoded on the fly\n >>> inputs = feature_extractor(\n ... [d[\"array\"] for d in dataset[:2][\"audio\"]], sampling_rate=sampling_rate, return_tensors=\"pt\", padding=True\n ... )\n >>> with torch.no_grad():\n ... embeddings = model(**inputs).embeddings\n\n >>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu()\n\n >>> # the resulting embeddings can be used for cosine similarity-based retrieval\n >>> cosine_sim = torch.nn.CosineSimilarity(dim=-1)\n >>> similarity = cosine_sim(embeddings[0], embeddings[1])\n >>> threshold = 0.7 # the optimal threshold is dataset-dependent\n >>> if similarity < threshold:\n ... print(\"Speakers are not the same!\")\n >>> round(similarity.item(), 2)\n {expected_output}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n >>> from datasets import load_dataset\n\n >>> dataset = load_dataset(\"huggingface/cats-image\")\n >>> image = dataset[\"test\"][\"image\"][0]\n\n >>> feature_extractor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = feature_extractor(image, return_tensors=\"pt\")\n\n >>> with torch.no_grad():\n ... outputs = model(**inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n >>> list(last_hidden_states.shape)\n {expected_output}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n >>> from datasets import load_dataset\n\n >>> dataset = load_dataset(\"huggingface/cats-image\")\n >>> image = dataset[\"test\"][\"image\"][0]\n\n >>> feature_extractor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = feature_extractor(image, return_tensors=\"pt\")\n\n >>> with torch.no_grad():\n ... logits = model(**inputs).logits\n\n >>> # model predicts one of the 1000 ImageNet classes\n >>> predicted_label = logits.argmax(-1).item()\n >>> print(model.config.id2label[predicted_label])\n {expected_output}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\n >>> input_ids = inputs[\"input_ids\"]\n >>> inputs[\"labels\"] = tf.reshape(\n ... tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))\n >>> ) # Batch size 1\n\n >>> outputs = model(inputs)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> question, text = \"Who was Jim Henson?\", \"Jim Henson was a nice puppet\"\n >>> input_dict = tokenizer(question, text, return_tensors=\"tf\")\n >>> outputs = model(input_dict)\n >>> start_logits = outputs.start_logits\n >>> end_logits = outputs.end_logits\n\n >>> all_tokens = tokenizer.convert_ids_to_tokens(input_dict[\"input_ids\"].numpy()[0])\n >>> answer = \" \".join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0] + 1])\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\n >>> inputs[\"labels\"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1\n\n >>> outputs = model(inputs)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"The capital of France is {mask}.\", return_tensors=\"tf\")\n >>> inputs[\"labels\"] = tokenizer(\"The capital of France is Paris.\", return_tensors=\"tf\")[\"input_ids\"]\n\n >>> outputs = model(inputs)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\n >>> outputs = model(inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n >>> choice0 = \"It is eaten with a fork and a knife.\"\n >>> choice1 = \"It is eaten while held in the hand.\"\n\n >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=\"tf\", padding=True)\n >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}}\n >>> outputs = model(inputs) # batch size is 1\n\n >>> # the linear classifier still needs to be trained\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\n >>> outputs = model(inputs)\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"jax\")\n\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> question, text = \"Who was Jim Henson?\", \"Jim Henson was a nice puppet\"\n >>> inputs = tokenizer(question, text, return_tensors=\"jax\")\n\n >>> outputs = model(**inputs)\n >>> start_scores = outputs.start_logits\n >>> end_scores = outputs.end_logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"jax\")\n\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"The capital of France is {mask}.\", return_tensors=\"jax\")\n\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"jax\")\n >>> outputs = model(**inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n >>> choice0 = \"It is eaten with a fork and a knife.\"\n >>> choice1 = \"It is eaten while held in the hand.\"\n\n >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=\"jax\", padding=True)\n >>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}})\n\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"np\")\n >>> outputs = model(**inputs)\n\n >>> # retrieve logts for next token\n >>> next_token_logits = outputs.logits[:, -1]\n ```\n", "language": "en", "n_whitespaces": 3163, "n_words": 1837, "vocab_size": 302 }
304
Python
165
4975002df50c472cbb6f8ac3580e475f570606ab
doc.py
36,379
24
209
_prepare_output_docstrings
https://github.com/huggingface/transformers.git
Reorganize file utils (#16264) * Split file_utils in several submodules * Fixes * Add back more objects * More fixes * Who exactly decided to import that from there? * Second suggestion to code with code review * Revert wront move * Fix imports * Adapt all imports * Adapt all imports everywhere * Revert this import, will fix in a separate commit
513
0
6,603
16
1
13
def test_medium_does_not_exist(self) -> None: # test for unknown medium url = "/_synapse/admin/v1/threepid/publickey/users/unknown-key" channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) # test for unknown user with a known medium url = "/_synapse/admin/v1/threepid/email/users/unknown" channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(404, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
tests/rest/admin/test_user.py
178
synapse
{ "docstring": "Tests that both a lookup for a medium that does not exist and a user that\n doesn't exist with that third party ID returns a 404", "language": "en", "n_whitespaces": 32, "n_words": 26, "vocab_size": 19 }
48
Python
28
a3623af74e0af0d2f6cbd37b47dc54a1acd314d5
test_user.py
249,805
19
110
test_medium_does_not_exist
https://github.com/matrix-org/synapse.git
Add an Admin API endpoint for looking up users based on 3PID (#14405)
205
0
73,140
10
1
2
async def _async_poll(self) -> None:
homeassistant/components/sonos/binary_sensor.py
17
core
{ "docstring": "Stub for abstract class implementation. Not a pollable attribute.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
5
Python
5
8a8ffa1c0844106a8827dd28b1d42792b366c5ee
binary_sensor.py
308,638
2
8
_async_poll
https://github.com/home-assistant/core.git
Add support for Sonos microphone binary_sensor (#63097) Co-authored-by: J. Nick Koston <[email protected]>
12
0
107,383
6
1
7
def argpartition(a, kth, axis=-1, kind='introselect', order=None): return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
numpy/core/fromnumeric.py
63
numpy
{ "docstring": "\n Perform an indirect partition along the given axis using the\n algorithm specified by the `kind` keyword. It returns an array of\n indices of the same shape as `a` that index data along the given\n axis in partitioned order.\n\n .. versionadded:: 1.8.0\n\n Parameters\n ----------\n a : array_like\n Array to sort.\n kth : int or sequence of ints\n Element index to partition by. The k-th element will be in its\n final sorted position and all smaller elements will be moved\n before it and all larger elements behind it. The order all\n elements in the partitions is undefined. If provided with a\n sequence of k-th it will partition all of them into their sorted\n position at once.\n\n .. deprecated:: 1.22.0\n Passing booleans as index is deprecated.\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If\n None, the flattened array is used.\n kind : {'introselect'}, optional\n Selection algorithm. Default is 'introselect'\n order : str or list of str, optional\n When `a` is an array with fields defined, this argument\n specifies which fields to compare first, second, etc. A single\n field can be specified as a string, and not all fields need be\n specified, but unspecified fields will still be used, in the\n order in which they come up in the dtype, to break ties.\n\n Returns\n -------\n index_array : ndarray, int\n Array of indices that partition `a` along the specified axis.\n If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.\n More generally, ``np.take_along_axis(a, index_array, axis)``\n always yields the partitioned `a`, irrespective of dimensionality.\n\n See Also\n --------\n partition : Describes partition algorithms used.\n ndarray.partition : Inplace partition.\n argsort : Full indirect sort.\n take_along_axis : Apply ``index_array`` from argpartition\n to an array as if by calling partition.\n\n Notes\n -----\n See `partition` for notes on the different selection algorithms.\n\n Examples\n --------\n One dimensional array:\n\n >>> x = np.array([3, 4, 2, 1])\n >>> x[np.argpartition(x, 3)]\n array([2, 1, 3, 4])\n >>> x[np.argpartition(x, (1, 3))]\n array([1, 2, 3, 4])\n\n >>> x = [3, 4, 2, 1]\n >>> np.array(x)[np.argpartition(x, 3)]\n array([2, 1, 3, 4])\n\n Multi-dimensional array:\n\n >>> x = np.array([[3, 4, 2], [1, 3, 1]])\n >>> index_array = np.argpartition(x, kth=1, axis=-1)\n >>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1)\n array([[2, 3, 4],\n [1, 1, 3]])\n\n ", "language": "en", "n_whitespaces": 681, "n_words": 374, "vocab_size": 213 }
13
Python
12
95a7bb4746197a05fd23dbe39c7b3dbb105a18d9
fromnumeric.py
160,087
2
42
argpartition
https://github.com/numpy/numpy.git
DOC: typo corrected in numpy.argpartition (#21201) * DOC: numpy.argpartition typo corrected Co-authored-by: Matti Picus <[email protected]>
19
0
38,481
8
1
5
def get_ylim(self): return tuple(self.viewLim.intervaly)
lib/matplotlib/axes/_base.py
27
matplotlib
{ "docstring": "\n Return the y-axis view limits.\n\n Returns\n -------\n bottom, top : (float, float)\n The current y-axis limits in data coordinates.\n\n See Also\n --------\n .Axes.set_ylim\n set_ybound, get_ybound\n invert_yaxis, yaxis_inverted\n\n Notes\n -----\n The y-axis may be inverted, in which case the *bottom* value\n will be greater than the *top* value.\n ", "language": "en", "n_whitespaces": 157, "n_words": 47, "vocab_size": 40 }
4
Python
4
c6e43ff4cfd3cb583b30f9882d6228041edc0fd6
_base.py
107,579
2
15
get_ylim
https://github.com/matplotlib/matplotlib.git
Fix ambiguous link targets in docs.
18
0
22,826
9
3
9
def remove(self, key): self._vertices.remove(key) for f in self._forwards.pop(key): self._backwards[f].remove(key) for t in self._backwards.pop(key): self._forwards[t].remove(key)
.venv/lib/python3.8/site-packages/pip/_vendor/resolvelib/structs.py
98
transferlearning
{ "docstring": "Remove a vertex from the graph, disconnecting all edges from/to it.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
14
Python
12
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
structs.py
63,709
6
62
remove
https://github.com/jindongwang/transferlearning.git
upd; format
64
0
13,480
11
3
27
def adaptive_parzen_normal(args, history_mus, prior_mu, prior_sigma): mus = np.append(history_mus, prior_mu) order = np.argsort(mus) mus = mus[order] prior_index = np.searchsorted(mus, prior_mu) if len(mus) == 1: sigmas = np.asarray([prior_sigma]) elif len(mus) == 2: sigmas = np.asarray([prior_sigma * 0.5, prior_sigma * 0.5]) sigmas[prior_index] = prior_sigma else: l_delta = mus[1:-1] - mus[:-2] r_delta = mus[2:] - mus[1:-1] sigmas_mid = np.maximum(l_delta, r_delta) sigmas = np.concatenate([[mus[1] - mus[0]], sigmas_mid, [mus[-1] - mus[-2]]]) sigmas[prior_index] = prior_sigma # "magic formula" in official implementation n = min(100, len(mus) + 1) sigmas = np.clip(sigmas, prior_sigma / n, prior_sigma) weights = np.append(linear_forgetting_weights(args, len(mus)), args.prior_weight) weights = weights[order] return weights / np.sum(weights), mus, sigmas
nni/algorithms/hpo/tpe_tuner.py
373
nni
{ "docstring": "\n The \"Adaptive Parzen Estimator\" described in paper section 4.2, for normal distribution.\n\n Because TPE internally only supports categorical and normal distributed space (domain),\n this function is used for everything other than \"choice\" and \"randint\".\n\n Parameters\n ----------\n args: TpeArguments\n Algorithm arguments.\n history_mus: 1-d array of float\n Parameter values evaluated in history.\n These are the \"observations\" in paper section 4.2. (\"placing density in the vicinity of K observations\")\n prior_mu: float\n µ value of normal search space.\n piror_sigma: float\n σ value of normal search space.\n\n Returns\n -------\n Tuple of three 1-d float arrays: (weight, µ, σ).\n\n The tuple represents N+1 \"vicinity of observations\" and each one's weight,\n calculated from \"N\" history and \"1\" user provided prior.\n\n The result is sorted by µ.\n ", "language": "en", "n_whitespaces": 203, "n_words": 119, "vocab_size": 91 }
101
Python
65
b52f7756fbcf6669dbe92e97e11415c4084cf881
tpe_tuner.py
111,911
21
249
adaptive_parzen_normal
https://github.com/microsoft/nni.git
HPO doc (#4579)
199
0
24,508
16
1
26
def test_tqdm_progress_bar_print_no_train(tqdm_write, tmpdir): model = PrintModel() bar = TQDMProgressBar() trainer = Trainer( default_root_dir=tmpdir, num_sanity_val_steps=0, limit_val_batches=1, limit_test_batches=1, limit_predict_batches=1, max_steps=1, callbacks=[bar], ) trainer.validate(model) trainer.test(model) trainer.predict(model) assert tqdm_write.call_args_list == [ call("validation_step", file=sys.stderr), call("test_step"), call("predict_step"), ] @mock.patch("builtins.print") @mock.patch("pytorch_lightning.callbacks.progress.tqdm_progress.Tqdm.write")
tests/callbacks/test_tqdm_progress_bar.py
183
@mock.patch("builtins.print") @mock.patch("pytorch_lightning.callbacks.progress.tqdm_progress.Tqdm.write")
lightning
{ "docstring": "Test that printing in the LightningModule redirects arguments to the progress bar without training.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
34
Python
32
97710406210a64f94b135500742165d40ef69cf8
test_tqdm_progress_bar.py
241,747
20
99
test_tqdm_progress_bar_print_no_train
https://github.com/Lightning-AI/lightning.git
Add typing to `TQDMProgressBar` (#11369)
132
1
69,681
11
1
9
async def get_appservice_last_pos(self) -> int: return await self.db_pool.simple_select_one_onecol( table="appservice_stream_position", retcol="stream_ordering", keyvalues={}, desc="get_appservice_last_pos", )
synapse/storage/databases/main/appservice.py
60
synapse
{ "docstring": "\n Get the last stream ordering position for the appservice process.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 9 }
13
Python
13
21eeacc99551febcddcef21db96a2bd82166fc7e
appservice.py
248,735
10
34
get_appservice_last_pos
https://github.com/matrix-org/synapse.git
Federation Sender & Appservice Pusher Stream Optimisations (#13251) * Replace `get_new_events_for_appservice` with `get_all_new_events_stream` The functions were near identical and this brings the AS worker closer to the way federation senders work which can allow for multiple workers to handle AS traffic. * Pull received TS alongside events when processing the stream This avoids an extra query -per event- when both federation sender and appservice pusher process events.
78
0
72,433
10
1
14
def test_from_estimator_not_fitted(pyplot): regressor = Ridge() with pytest.raises(NotFittedError, match="is not fitted yet."): PredictionErrorDisplay.from_estimator(regressor, X, y) @pytest.mark.parametrize("class_method", ["from_estimator", "from_predictions"]) @pytest.mark.parametrize("kind", ["actual_vs_predicted", "residual_vs_predicted"])
sklearn/metrics/_plot/tests/test_predict_error_display.py
113
@pytest.mark.parametrize("class_method", ["from_estimator", "from_predictions"]) @pytest.mark.parametrize("kind", ["actual_vs_predicted", "residual_vs_predicted"])
scikit-learn
{ "docstring": "Check that we raise a `NotFittedError` when the passed regressor is not\n fit.", "language": "en", "n_whitespaces": 15, "n_words": 13, "vocab_size": 13 }
20
Python
20
40d7d880eddaf3a9a5e37ba2a8206caf22744926
test_predict_error_display.py
261,659
4
33
test_from_estimator_not_fitted
https://github.com/scikit-learn/scikit-learn.git
FEA add PredictionErrorDisplay (#18020) Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Christian Lorentzen <[email protected]>
34
1
76,920
11
5
32
def test_pagination_from_sync_and_messages(self): channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") self.assertEquals(200, channel.code, channel.json_body) annotation_id = channel.json_body["event_id"] # Send an event after the relation events. self.helper.send(self.room, body="Latest event", tok=self.user_token) # Request /sync, limiting it such that only the latest event is returned # (and not the relation). filter = urllib.parse.quote_plus( '{"room": {"timeline": {"limit": 1}}}'.encode() ) channel = self.make_request( "GET", f"/sync?filter={filter}", access_token=self.user_token ) self.assertEquals(200, channel.code, channel.json_body) room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] sync_prev_batch = room_timeline["prev_batch"] self.assertIsNotNone(sync_prev_batch) # Ensure the relation event is not in the batch returned from /sync. self.assertNotIn( annotation_id, [ev["event_id"] for ev in room_timeline["events"]] ) # Request /messages, limiting it such that only the latest event is # returned (and not the relation). channel = self.make_request( "GET", f"/rooms/{self.room}/messages?dir=b&limit=1", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) messages_end = channel.json_body["end"] self.assertIsNotNone(messages_end) # Ensure the relation event is not in the chunk returned from /messages. self.assertNotIn( annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] ) # Request /relations with the pagination tokens received from both the # /sync and /messages responses above, in turn. # # This is a tiny bit silly since the client wouldn't know the parent ID # from the requests above; consider the parent ID to be known from a # previous /sync. for from_token in (sync_prev_batch, messages_end): channel = self.make_request( "GET", f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) # The relation should be in the returned chunk. self.assertIn( annotation_id, [ev["event_id"] for ev in channel.json_body["chunk"]] )
tests/rest/client/test_relations.py
505
synapse
{ "docstring": "Pagination tokens from /sync and /messages can be used to paginate /relations.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
226
Python
111
df36945ff0e4a293a9dac0da07e2c94256835b32
test_relations.py
246,307
39
289
test_pagination_from_sync_and_messages
https://github.com/matrix-org/synapse.git
Support pagination tokens from /sync and /messages in the relations API. (#11952)
681
0
71,142
13
2
8
def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None): # type: (str, str, str, Optional[str]) -> str egg_project_name = project_name.replace("-", "_") req = f'{repo_url}@{rev}#egg={egg_project_name}' if subdir: req += f'&subdirectory={subdir}' return req
.venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py
80
transferlearning
{ "docstring": "\n Return the URL for a VCS requirement.\n\n Args:\n repo_url: the remote VCS url, with any needed VCS prefix (e.g. \"git+\").\n project_name: the (unescaped) project name.\n ", "language": "en", "n_whitespaces": 45, "n_words": 25, "vocab_size": 21 }
27
Python
23
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
versioncontrol.py
61,401
6
37
make_vcs_requirement_url
https://github.com/jindongwang/transferlearning.git
upd; format
52
0
12,547
10
1
14
def test_alter_value(self): altered_raw_data = apply_changes_to_raw_data( raw_data=self.raw_data, block_path_str="char1", operation=AlterBlockValueOperation(new_value="foo"), streamfield=models.SampleModel.content, ) self.assertEqual(altered_raw_data[0]["value"], "foo") self.assertEqual(altered_raw_data[1]["value"], self.raw_data[1]["value"]) self.assertEqual(altered_raw_data[2]["value"], "foo") self.assertEqual(altered_raw_data[3]["value"], self.raw_data[3]["value"])
wagtail/tests/streamfield_migrations/test_simple_structures.py
181
wagtail
{ "docstring": "Change the value of each `char1` block to `foo`\n\n Check whether the value of each `char1` block has changed to `foo`.\n Check whether the values of other blocks are intact.\n ", "language": "en", "n_whitespaces": 51, "n_words": 30, "vocab_size": 19 }
18
Python
17
ad65741b94f36fbe793cf15f0ab002482070cdb6
test_simple_structures.py
80,146
11
110
test_alter_value
https://github.com/wagtail/wagtail.git
Add tests for streamfield migration helpers Currently failing due to wagtail-factories being broken on Wagtail 4.1: https://github.com/wagtail/wagtail-factories/issues/65
111
0
17,024
13
3
17
def upgrade(): conn = op.get_bind() is_sqlite = bool(conn.dialect.name == "sqlite") if is_sqlite: op.execute("PRAGMA foreign_keys=off") with op.batch_alter_table('dag') as batch_op: batch_op.alter_column( 'concurrency', new_column_name='max_active_tasks', type_=sa.Integer(), nullable=False, ) if is_sqlite: op.execute("PRAGMA foreign_keys=on")
airflow/migrations/versions/30867afad44a_rename_concurrency_column_in_dag_table_.py
137
airflow
{ "docstring": "Apply Rename concurrency column in dag table to max_active_tasks", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
28
Python
24
6e5c9c845f7f0975178dbeb76d4ccfe95d0ed803
30867afad44a_rename_concurrency_column_in_dag_table_.py
45,122
14
75
upgrade
https://github.com/apache/airflow.git
Fix some migrations (#21670) In the xcom migration, there's a bad join. The clauses need to be wrapped in and_. And in both, for sqlite we need to temporarily suspend FK enforcement before dropping the tables.
118
0
8,488
12
1
2
def try_except(f): # pylint: disable=inconsistent-return-statements
gamestonk_terminal/decorators.py
14
OpenBBTerminal
{ "docstring": "Adds a try except block if the user is not in development mode\n\n Parameters\n ----------\n f: function\n The function to be wrapped\n ", "language": "en", "n_whitespaces": 41, "n_words": 22, "vocab_size": 21 }
5
Python
5
006b3570b795215a17c64841110b649b03db9a98
decorators.py
281,227
4
17
try_except
https://github.com/OpenBB-finance/OpenBBTerminal.git
Baseclass (#1141) * A working decorator * Basic intro * Added more * Refactor * Refactor * Cleaned code * Simplified function (thanks Chavi) * Small change * Updating tests : fix issue with mock * Updating tests : fix remaining mocks after merging * Updating tests : black * Cleaned up * Finished base cases * Notes * Slight changes * Added dynamic options handling, error persists * Fixed pylint issues * Fixed mock * fix decorator with dynamic dictionary of args * move choices from dynamic to const in crypto/ov * Updated var names * Check * Moved decorators * Fixed import issues * Fixed tests, update payoff controller * Fixed tests * Fixed pylint * Updated files * Added base class * Added reset * Improved base class * For James * More menues converted * Added contexts * 24 controllers left * 18 Controllers left * Changes choices * 9 controllers left * Added all controllers * Fixed glitch * Replaced all improper callings of class * Removed menu decorator * refactored try_except * Last commit * Black fix * Bug fix * Added James' new menus * Fixed tests * Fixed 8 tests * Fixing mypy issue * Updating tests : stocks/options * Fixed options * Fixed tests * Updating tests : stocks/options * Fixed tests * More test fixes * Updating tests : stocks/ba * Fixed options test * More bug fixes * Fixed tests * fixed pylint * Skipped test_call_load * Add typings to base class * Fix issue with appending auto completer options + bugfixes * Add typings to base class * Terminal throws error for bad path * sexy solution to auto completer in runtime * more sexy reset with reset_level stored * no so sexy jump between indirect menus * Removing choices argument * refactor custom_reset * Fixed tests * Theo fixes * Added back function * Fixed tests Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: DidierRLopes <[email protected]>
11
0
83,631
6
6
28
def feature_extra_checks(self, name): assert isinstance(name, str) d = self.feature_supported[name] extra_checks = d.get("extra_checks", []) if not extra_checks: return [] self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks) flags = self.feature_flags(name) available = [] not_available = [] for chk in extra_checks: test_path = os.path.join( self.conf_check_path, "extra_%s.c" % chk.lower() ) if not os.path.exists(test_path): self.dist_fatal("extra check file does not exist", test_path) is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"]) if is_supported: available.append(chk) else: not_available.append(chk) if not_available: self.dist_log("testing failed for checks", not_available, stderr=True) return available
numpy/distutils/ccompiler_opt.py
267
numpy
{ "docstring": "\n Return a list of supported extra checks after testing them against\n the compiler.\n\n Parameters\n ----------\n names : str\n CPU feature name in uppercase.\n ", "language": "en", "n_whitespaces": 77, "n_words": 23, "vocab_size": 23 }
79
Python
59
f404e9e92e87a3990712d723d5c562a89300ac01
ccompiler_opt.py
160,177
24
162
feature_extra_checks
https://github.com/numpy/numpy.git
Add space after argument name
311
0
38,549
13
5
4
def is_reachable(G, s, t):
networkx/algorithms/tournament.py
17
networkx
{ "docstring": "Decides whether there is a path from `s` to `t` in the\n tournament.\n\n This function is more theoretically efficient than the reachability\n checks than the shortest path algorithms in\n :mod:`networkx.algorithms.shortest_paths`.\n\n The given graph **must** be a tournament, otherwise this function's\n behavior is undefined.\n\n Parameters\n ----------\n G : NetworkX graph\n A directed graph representing a tournament.\n\n s : node\n A node in the graph.\n\n t : node\n A node in the graph.\n\n Returns\n -------\n bool\n Whether there is a path from `s` to `t` in `G`.\n\n Examples\n --------\n >>> from networkx.algorithms import tournament\n >>> G = nx.DiGraph([(1, 0), (1, 3), (1, 2), (2, 3), (2, 0), (3, 0)])\n >>> tournament.is_reachable(G, 1, 3)\n True\n >>> tournament.is_reachable(G, 3, 2)\n False\n\n Notes\n -----\n Although this function is more theoretically efficient than the\n generic shortest path functions, a speedup requires the use of\n parallelism. Though it may in the future, the current implementation\n does not use parallelism, thus you may not see much of a speedup.\n\n This algorithm comes from [1].\n\n References\n ----------\n .. [1] Tantau, Till.\n \"A note on the complexity of the reachability problem for\n tournaments.\"\n *Electronic Colloquium on Computational Complexity*. 2001.\n <http://eccc.hpi-web.de/report/2001/092/>\n ", "language": "en", "n_whitespaces": 357, "n_words": 190, "vocab_size": 119 }
4
Python
4
5a7985fc41bc0c686c035de43c66cf4fb5fcc94f
tournament.py
176,547
5
54
is_reachable
https://github.com/networkx/networkx.git
Added examples in tournament and tree functions (#5536) * examples * examples * examples * Example changed * improved styling * revised * edge labels * improved styling * spacing * error testing * examples * styling * add_nodes removed * spaceing * spacing * spacing * added examples * removed random_tournament example * added examples in branching and aborescence * error removed
7
0
41,956
6
1
9
def forward(self, inputs, labels): logits = self.nsp(inputs) loss = F.cross_entropy(logits, labels) return loss
modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/ernie_modeling.py
48
PaddleHub
{ "docstring": "\n Args:\n start_pos (optional, `Variable` of shape [batch_size]):\n token index of start of answer span in `context`\n end_pos (optional, `Variable` of shape [batch_size]):\n token index of end of answer span in `context`\n Returns:\n loss (`Variable` of shape []):\n Cross entropy loss mean over batch and time, ignore positions where label == -100\n if labels not set, returns None\n start_logits (`Variable` of shape [batch_size, hidden_size]):\n output logits of start position\n end_logits (`Variable` of shape [batch_size, hidden_size]):\n output logits of end position\n ", "language": "en", "n_whitespaces": 246, "n_words": 79, "vocab_size": 46 }
13
Python
11
ffcde21305c61d950a9f93e57e6180c9a9665b87
ernie_modeling.py
50,263
4
30
forward
https://github.com/PaddlePaddle/PaddleHub.git
add disco_diffusion_ernievil_base
41
0
10,073
8
1
4
def identify(self, requirement_or_candidate): # type: (Candidate | Requirement) -> str return requirement_or_candidate.canonical_package_id
lib/ansible/galaxy/dependency_resolution/providers.py
22
ansible
{ "docstring": "Given requirement or candidate, return an identifier for it.\n\n This is used to identify a requirement or candidate, e.g.\n whether two requirements should have their specifier parts\n (version ranges or pins) merged, whether two candidates would\n conflict with each other (because they have same name but\n different versions).\n ", "language": "en", "n_whitespaces": 90, "n_words": 48, "vocab_size": 41 }
12
Python
12
8b2e6285650ec42ec4a19075a8567047e8304ba2
providers.py
266,878
2
12
identify
https://github.com/ansible/ansible.git
galaxy - Clean up type hints and imports.
33
0
78,637
6
6
17
def detect_indentation(self) -> int: _indentations = { len(match.group(1)) for match in re.finditer(r"^( *)(.*)$", self.plain, flags=re.MULTILINE) } try: indentation = ( reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1 ) except TypeError: indentation = 1 return indentation
pipenv/patched/notpip/_vendor/rich/text.py
116
pipenv
{ "docstring": "Auto-detect indentation of code.\n\n Returns:\n int: Number of spaces used to indent code.\n ", "language": "en", "n_whitespaces": 38, "n_words": 13, "vocab_size": 11 }
41
Python
32
f3166e673fe8d40277b804d35d77dcdb760fc3b3
text.py
20,864
17
74
detect_indentation
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
153
0
3,602
16
9
54
def save_model(model, filepath, weights_format="h5"): if not filepath.endswith(".keras"): raise ValueError( "Invalid filename: expected a `.keras` extension. " f"Received: filepath={filepath}" ) if weights_format == "h5" and h5py is None: raise ImportError("h5py must be installed in order to save a model.") if not model.built: warnings.warn( "You are saving a model that has not yet been built. " "It might not contain any weights yet. " "Consider building the model first by calling it " "on some data.", stacklevel=2, ) saving_v3_enabled_value = getattr(_SAVING_V3_ENABLED, "value", False) _SAVING_V3_ENABLED.value = True serialized_model_dict = serialize_keras_object(model) config_json = json.dumps(serialized_model_dict) metadata_json = json.dumps( { "keras_version": keras.__version__, "date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"), } ) try: with zipfile.ZipFile(filepath, "w") as zf: with zf.open(_METADATA_FILENAME, "w") as f: f.write(metadata_json.encode()) with zf.open(_CONFIG_FILENAME, "w") as f: f.write(config_json.encode()) if weights_format == "h5": weights_store = H5IOStore( _VARS_FNAME + ".h5", archive=zf, mode="w" ) elif weights_format == "npz": weights_store = NpzIOStore( _VARS_FNAME + ".npz", archive=zf, mode="w" ) else: raise ValueError( "Unknown weights_format. Expected 'h5' or 'npz'. " f"Received: {weights_format}" ) asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode="w") _save_state( model, weights_handler=weights_store, assets_handler=asset_store, inner_path="", visited_trackables=set(), ) weights_store.close() asset_store.close() except Exception as e: raise e finally: _SAVING_V3_ENABLED.value = saving_v3_enabled_value
keras/saving/experimental/saving_lib.py
521
keras
{ "docstring": "Save a zip-archive representing a Keras model to the given filepath.\n\n The zip-based archive contains the following structure:\n\n - JSON-based configuration file (config.json): Records of model, layer, and\n other trackables' configuration.\n - NPZ-based trackable state files, found in respective directories, such as\n model/states.npz, model/dense_layer/states.npz, etc.\n - Metadata file.\n\n The states of Keras trackables (layers, optimizers, loss, and metrics) are\n automatically saved as long as they can be discovered through the attributes\n returned by `dir(Model)`. Typically, the state includes the variables\n associated with the trackable, but some specially purposed layers may\n contain more such as the vocabularies stored in the hashmaps. The trackables\n define how their states are saved by exposing `save_state()` and\n `load_state()` APIs.\n\n For the case of layer states, the variables will be visited as long as\n they are either 1) referenced via layer attributes, or 2) referenced via a\n container (list, tuple, or dict), and the container is referenced via a\n layer attribute.\n ", "language": "en", "n_whitespaces": 217, "n_words": 155, "vocab_size": 106 }
181
Python
129
e6f739a31247c43a86c37c33b0b8b2ba6be6a5f6
saving_lib.py
280,200
59
291
save_model
https://github.com/keras-team/keras.git
- Add standalone weights file saving/loading functionality. - Switch to in-memory, single write / single read archive saving for better performance. - Remove ability to pick between zipping or not zipping a Keras saved artifact: it's always a zip archive now. PiperOrigin-RevId: 483705728
770
0
83,285
17
5
23
def process_deferred_accounting(posting_date=None): if not posting_date: posting_date = today() if not cint( frappe.db.get_singles_value( "Accounts Settings", "automatically_process_deferred_accounting_entry" ) ): return start_date = add_months(today(), -1) end_date = add_days(today(), -1) companies = frappe.get_all("Company") for company in companies: for record_type in ("Income", "Expense"): doc = frappe.get_doc( dict( doctype="Process Deferred Accounting", company=company.name, posting_date=posting_date, start_date=start_date, end_date=end_date, type=record_type, ) ) doc.insert() doc.submit()
erpnext/accounts/deferred_revenue.py
207
erpnext
{ "docstring": "Converts deferred income/expense into income/expense\n\tExecuted via background jobs on every month end", "language": "en", "n_whitespaces": 11, "n_words": 13, "vocab_size": 12 }
54
Python
43
494bd9ef78313436f0424b918f200dab8fc7c20b
deferred_revenue.py
64,731
26
124
process_deferred_accounting
https://github.com/frappe/erpnext.git
style: format code with black
28
0
13,709
16
1
21
def test_disabled(self) -> None: fake_oidc_server = self.helper.fake_oidc_server() user = "john" login_resp, grant = self.helper.login_via_oidc( fake_oidc_server, user, with_sid=True ) access_token: str = login_resp["access_token"] self.helper.whoami(access_token, expect_code=HTTPStatus.OK) # Logging out shouldn't work logout_token = fake_oidc_server.generate_logout_token(grant) channel = self.submit_logout_token(logout_token) self.assertEqual(channel.code, 400) # And the token should still be valid self.helper.whoami(access_token, expect_code=HTTPStatus.OK)
tests/rest/client/test_auth.py
163
synapse
{ "docstring": "\n Receiving a logout token should do nothing if it is disabled in the config\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
47
Python
39
cc3a52b33df72bb4230367536b924a6d1f510d36
test_auth.py
249,779
15
100
test_disabled
https://github.com/matrix-org/synapse.git
Support OIDC backchannel logouts (#11414) If configured an OIDC IdP can log a user's session out of Synapse when they log out of the identity provider. The IdP sends a request directly to Synapse (and must be configured with an endpoint) when a user logs out.
149
0
73,122
9
2
2
def test_literal_slice_boxing(self):
numba/tests/test_slices.py
13
numba
{ "docstring": "\n Tests that a literal slice can be used\n as an argument to a JIT function.\n ", "language": "en", "n_whitespaces": 45, "n_words": 15, "vocab_size": 14 }
2
Python
2
0294ef37a19ecd995823678462faedbe10a09b22
test_slices.py
161,979
13
77
test_literal_slice_boxing
https://github.com/numba/numba.git
support for boxing SliceLiteral type
9
0
39,117
6
1
4
def _expects_training_arg(self): return self._call_spec.expects_training_arg
keras/engine/base_layer.py
22
keras
{ "docstring": "Whether the call function uses 'training' as a parameter.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
4
Python
4
84afc5193d38057e2e2badf9c889ea87d80d8fbf
base_layer.py
270,679
2
12
_expects_training_arg
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
18
0
80,522
7
1
5
def __array_wrap__(self, result, context=None): # TODO: This is very inefficient. __array__ and as_matrix have been # changed to call the more efficient to_numpy, but this has been left # unchanged since we are not sure of its purpose. return self._default_to_pandas("__array_wrap__", result, context=context)
modin/pandas/base.py
43
modin
{ "docstring": "\n Get called after a ufunc and other functions.\n\n Parameters\n ----------\n result : np.ndarray\n The result of the ufunc or other function called on the NumPy array\n returned by __array__.\n context : tuple of (func, tuple, int), optional\n This parameter is returned by ufuncs as a 3-element tuple: (name of the\n ufunc, arguments of the ufunc, domain of the ufunc), but is not set by\n other NumPy functions.\n\n Returns\n -------\n BasePandasDataset\n Wrapped Modin object.\n ", "language": "en", "n_whitespaces": 203, "n_words": 73, "vocab_size": 51 }
42
Python
38
605efa618e7994681f57b11d04d417f353ef8d50
base.py
153,621
2
25
__array_wrap__
https://github.com/modin-project/modin.git
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
77
0
35,502
8
2
8
def get_primary_key_column(self, cursor, table_name): cursor.execute( , [table_name], ) row = cursor.fetchone() return self.identifier_converter(row[0]) if row else None
django/db/backends/oracle/introspection.py
63
django
{ "docstring": "\n SELECT\n cols.column_name\n FROM\n user_constraints,\n user_cons_columns cols\n WHERE\n user_constraints.constraint_name = cols.constraint_name AND\n user_constraints.constraint_type = 'P' AND\n user_constraints.table_name = UPPER(%s) AND\n cols.position = 1\n ", "language": "en", "n_whitespaces": 172, "n_words": 22, "vocab_size": 17 }
17
Python
16
c5cd8783825b5f6384417dac5f3889b4210b7d08
introspection.py
203,224
18
41
get_primary_key_column
https://github.com/django/django.git
Refs #33476 -- Refactored problematic code before reformatting by Black. In these cases Black produces unexpected results, e.g. def make_random_password( self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789', ): or cursor.execute(""" SELECT ... """, [table name], )
66
0
50,258
9
3
8
def get_extra_loggers(self) -> List[str]: return ( [name.strip() for name in self.extra_loggers.split(",")] if self.extra_loggers else [] )
src/prefect/utilities/settings.py
64
prefect
{ "docstring": "\n Parse the `extra_loggers` CSV and trim whitespace from logger names\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
16
Python
16
a452d8b8917000774302411a7aeb949f7e326814
settings.py
53,193
9
39
get_extra_loggers
https://github.com/PrefectHQ/prefect.git
Strip logger name to prevent accidental spaces
70
0
10,735
12
3
16
def _get_input_from_iterator(iterator, model): next_element = iterator.get_next() # `len(nest.flatten(x))` is going to not count empty elements such as {}. # len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is # going to get flattened in `_prepare_feed_values` to work around that. Empty # elements are going to get filtered out as part of the flattening. if len(tf.nest.flatten(next_element)) == len(model.inputs): x = next_element y = None sample_weights = None elif len(tf.nest.flatten(next_element)) == ( len(model.inputs) + len(model.outputs) ): x, y = next_element sample_weights = None else: x, y, sample_weights = next_element # Validate that all the elements in x and y are of the same type and shape. validate_distributed_dataset_inputs( model._distribution_strategy, x, y, sample_weights ) return x, y, sample_weights
keras/distribute/distributed_training_utils_v1.py
176
keras
{ "docstring": "Get elements from the iterator and verify the input shape and type.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 10 }
115
Python
67
84afc5193d38057e2e2badf9c889ea87d80d8fbf
distributed_training_utils_v1.py
270,328
17
108
_get_input_from_iterator
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
215
0
80,431
12
7
28
def _get_target_nodes(self) -> List[Tuple[str, str]]: location = self._config.location target_nodes = get_all_node_ids() if location == DeploymentMode.NoServer: return [] if location == DeploymentMode.HeadOnly: head_node_resource_key = get_current_node_resource_key() return [ (node_id, node_resource) for node_id, node_resource in target_nodes if node_resource == head_node_resource_key ][:1] if location == DeploymentMode.FixedNumber: num_replicas = self._config.fixed_number_replicas if num_replicas > len(target_nodes): logger.warning( "You specified fixed_number_replicas=" f"{num_replicas} but there are only " f"{len(target_nodes)} total nodes. Serve will start one " "HTTP proxy per node." ) num_replicas = len(target_nodes) # Seed the random state so sample is deterministic. # i.e. it will always return the same set of nodes. random.seed(self._config.fixed_number_selection_seed) return random.sample(sorted(target_nodes), k=num_replicas) return target_nodes
python/ray/serve/http_state.py
235
ray
{ "docstring": "Return the list of (id, resource_key) to deploy HTTP servers on.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
101
Python
72
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
http_state.py
130,910
26
137
_get_target_nodes
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
426
0
29,422
16
3
9
async def cancel_triggers(self): while self.to_cancel: trigger_id = self.to_cancel.popleft() if trigger_id in self.triggers: # We only delete if it did not exit already self.triggers[trigger_id]["task"].cancel() await asyncio.sleep(0)
airflow/jobs/triggerer_job.py
83
airflow
{ "docstring": "\n Drain the to_cancel queue and ensure all triggers that are not in the\n DB are cancelled, so the cleanup job deletes them.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 19 }
25
Python
23
c20ad79b40ea2b213f6dca221221c6dbd55bd08f
triggerer_job.py
43,650
6
47
cancel_triggers
https://github.com/apache/airflow.git
Rename `to_delete` to `to_cancel` in TriggerRunner (#20658) The queue's purpose is to track triggers that need to be canceled. The language `to_delete` was a bit confusing because for one it does not actually delete them but cancel them. The deletion work is actually in `cleanup_finished_triggers`. It seems that this method will usually not do anything and it's only for cancelling triggers that are currently running but for whatever reason no longer should be. E.g. when a task is killed and therefore the trigger is no longer needed, or some multi-triggerer scenarios. So putting cancel in the name also highlights that this is about stopping running triggers, not e.g. purging completed ones.
102
0
8,018
14
2
5
def set_url_path(self, parent): if parent: self.url_path = parent.url_path + self.slug + "/" else: # a page without a parent is the tree root, which always has a url_path of '/' self.url_path = "/" return self.url_path
wagtail/core/models/__init__.py
63
wagtail
{ "docstring": "\n Populate the url_path field based on this page's slug and the specified parent page.\n (We pass a parent in here, rather than retrieving it via get_parent, so that we can give\n new unsaved pages a meaningful URL when previewing them; at that point the page has not\n been assigned a position in the tree, as far as treebeard is concerned.\n ", "language": "en", "n_whitespaces": 96, "n_words": 60, "vocab_size": 51 }
35
Python
28
d10f15e55806c6944827d801cd9c2d53f5da4186
__init__.py
73,844
6
35
set_url_path
https://github.com/wagtail/wagtail.git
Reformat with black
96
0
16,139
11
2
6
def order(self): if self.characteristic == 0: raise NotImplementedError("Still not implemented") return len(self.points())
sympy/ntheory/elliptic_curve.py
49
sympy
{ "docstring": "\n Number of points in Finite field.\n\n Examples\n ========\n\n >>> from sympy.ntheory.elliptic_curve import EllipticCurve\n >>> e2 = EllipticCurve(1, 0, modulus=19)\n >>> e2.order\n 19\n\n ", "language": "en", "n_whitespaces": 79, "n_words": 22, "vocab_size": 20 }
12
Python
12
8fc835bcd86ea080644783a363e47adca6dff3a7
elliptic_curve.py
200,253
4
27
order
https://github.com/sympy/sympy.git
Remove redundant list calls
44
0
49,567
10
1
33
async def test_fossil_energy_consumption(hass, hass_ws_client, recorder_mock): now = dt_util.utcnow() later = dt_util.as_utc(dt_util.parse_datetime("2022-09-01 00:00:00")) await async_setup_component(hass, "history", {}) await async_setup_component(hass, "sensor", {}) await async_recorder_block_till_done(hass) period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00")) period2_day_start = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 00:00:00")) period3 = dt_util.as_utc(dt_util.parse_datetime("2021-10-01 00:00:00")) period4 = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 23:00:00")) period4_day_start = dt_util.as_utc(dt_util.parse_datetime("2021-10-31 00:00:00")) external_energy_statistics_1 = ( { "start": period1, "last_reset": None, "state": 0, "sum": 2, }, { "start": period2, "last_reset": None, "state": 1, "sum": 3, }, { "start": period3, "last_reset": None, "state": 2, "sum": 4, }, { "start": period4, "last_reset": None, "state": 3, "sum": 5, }, ) external_energy_metadata_1 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_1", "unit_of_measurement": "kWh", } external_energy_statistics_2 = ( { "start": period1, "last_reset": None, "state": 0, "sum": 20000, }, { "start": period2, "last_reset": None, "state": 1, "sum": 30000, }, { "start": period3, "last_reset": None, "state": 2, "sum": 40000, }, { "start": period4, "last_reset": None, "state": 3, "sum": 50000, }, ) external_energy_metadata_2 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "statistic_id": "test:total_energy_import_tariff_2", "unit_of_measurement": "Wh", } external_co2_statistics = ( { "start": period1, "last_reset": None, "mean": 10, }, { "start": period2, "last_reset": None, "mean": 30, }, { "start": period3, "last_reset": None, "mean": 60, }, { "start": period4, "last_reset": None, "mean": 90, }, ) external_co2_metadata = { "has_mean": True, "has_sum": False, "name": "Fossil percentage", "source": "test", "statistic_id": "test:fossil_percentage", "unit_of_measurement": "%", } async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) async_add_external_statistics( hass, external_energy_metadata_2, external_energy_statistics_2 ) async_add_external_statistics(hass, external_co2_metadata, external_co2_statistics) await async_wait_recording_done(hass) client = await hass_ws_client() await client.send_json( { "id": 1, "type": "energy/fossil_energy_consumption", "start_time": now.isoformat(), "end_time": later.isoformat(), "energy_statistic_ids": [ "test:total_energy_import_tariff_1", "test:total_energy_import_tariff_2", ], "co2_statistic_id": "test:fossil_percentage", "period": "hour", } ) response = await client.receive_json() assert response["success"] assert response["result"] == { period2.isoformat(): pytest.approx((33.0 - 22.0) * 0.3), period3.isoformat(): pytest.approx((44.0 - 33.0) * 0.6), period4.isoformat(): pytest.approx((55.0 - 44.0) * 0.9), } await client.send_json( { "id": 2, "type": "energy/fossil_energy_consumption", "start_time": now.isoformat(), "end_time": later.isoformat(), "energy_statistic_ids": [ "test:total_energy_import_tariff_1", "test:total_energy_import_tariff_2", ], "co2_statistic_id": "test:fossil_percentage", "period": "day", } ) response = await client.receive_json() assert response["success"] assert response["result"] == { period2_day_start.isoformat(): pytest.approx((33.0 - 22.0) * 0.3), period3.isoformat(): pytest.approx((44.0 - 33.0) * 0.6), period4_day_start.isoformat(): pytest.approx((55.0 - 44.0) * 0.9), } await client.send_json( { "id": 3, "type": "energy/fossil_energy_consumption", "start_time": now.isoformat(), "end_time": later.isoformat(), "energy_statistic_ids": [ "test:total_energy_import_tariff_1", "test:total_energy_import_tariff_2", ], "co2_statistic_id": "test:fossil_percentage", "period": "month", } ) response = await client.receive_json() assert response["success"] assert response["result"] == { period1.isoformat(): pytest.approx((33.0 - 22.0) * 0.3), period3.isoformat(): pytest.approx( ((44.0 - 33.0) * 0.6) + ((55.0 - 44.0) * 0.9) ), }
tests/components/energy/test_websocket_api.py
1,475
core
{ "docstring": "Test fossil_energy_consumption with co2 sensor data.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
397
Python
139
5d7756885be0fd044d86e60ec0d2639f9d114ea3
test_websocket_api.py
288,579
183
904
test_fossil_energy_consumption
https://github.com/home-assistant/core.git
Normalize to kWh when handling WS energy/fossil_energy_consumption (#79649) * Normalize to kWh when handling WS energy/fossil_energy_consumption * Improve test
1,806
0
87,736
15
7
54
def test_application_services_receive_bursts_of_to_device(self): # Register two application services with exclusive interest in a user interested_appservices = [] for _ in range(2): appservice = self._register_application_service( namespaces={ ApplicationService.NS_USERS: [ { "regex": "@exclusive_as_user:.+", "exclusive": True, } ], }, ) interested_appservices.append(appservice) # ...and an application service which does not have any user interest. self._register_application_service() to_device_message_content = { "some key": "some interesting value", } # We need to send a large burst of to-device messages. We also would like to # include them all in the same application service transaction so that we can # test large transactions. # # To do this, we can send a single to-device message to many user devices at # once. # # We insert number_of_messages - 1 messages into the database directly. We'll then # send a final to-device message to the real device, which will also kick off # an AS transaction (as just inserting messages into the DB won't). number_of_messages = 150 fake_device_ids = [f"device_{num}" for num in range(number_of_messages - 1)] messages = { self.exclusive_as_user: { device_id: to_device_message_content for device_id in fake_device_ids } } # Create a fake device per message. We can't send to-device messages to # a device that doesn't exist. self.get_success( self.hs.get_datastore().db_pool.simple_insert_many( desc="test_application_services_receive_burst_of_to_device", table="devices", keys=("user_id", "device_id"), values=[ ( self.exclusive_as_user, device_id, ) for device_id in fake_device_ids ], ) ) # Seed the device_inbox table with our fake messages self.get_success( self.hs.get_datastore().add_messages_to_device_inbox(messages, {}) ) # Now have local_user send a final to-device message to exclusive_as_user. All unsent # to-device messages should be sent to any application services # interested in exclusive_as_user. chan = self.make_request( "PUT", "/_matrix/client/r0/sendToDevice/m.room_key_request/4", content={ "messages": { self.exclusive_as_user: { self.exclusive_as_user_device_id: to_device_message_content } } }, access_token=self.local_user_token, ) self.assertEqual(chan.code, 200, chan.result) self.send_mock.assert_called() # Count the total number of to-device messages that were sent out per-service. # Ensure that we only sent to-device messages to interested services, and that # each interested service received the full count of to-device messages. service_id_to_message_count: Dict[str, int] = {} for call in self.send_mock.call_args_list: service, _events, _ephemeral, to_device_messages = call[0] # Check that this was made to an interested service self.assertIn(service, interested_appservices) # Add to the count of messages for this application service service_id_to_message_count.setdefault(service.id, 0) service_id_to_message_count[service.id] += len(to_device_messages) # Assert that each interested service received the full count of messages for count in service_id_to_message_count.values(): self.assertEqual(count, number_of_messages)
tests/handlers/test_appservice.py
514
synapse
{ "docstring": "\n Test that when a user sends >100 to-device messages at once, any\n interested AS's will receive them in separate transactions.\n\n Also tests that uninterested application services do not receive messages.\n ", "language": "en", "n_whitespaces": 59, "n_words": 30, "vocab_size": 28 }
373
Python
199
64ec45fc1b0856dc7daacca7d3ab75d50bd89f84
test_appservice.py
246,233
64
308
test_application_services_receive_bursts_of_to_device
https://github.com/matrix-org/synapse.git
Send to-device messages to application services (#11215) Co-authored-by: Richard van der Hoff <[email protected]>
1,357
0
71,114
17
1
2
def imaginaryaxis(self): return self["imaginaryaxis"]
packages/python/plotly/plotly/graph_objs/layout/_smith.py
22
plotly.py
{ "docstring": "\n The 'imaginaryaxis' property is an instance of Imaginaryaxis\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.layout.smith.Imaginaryaxis`\n - A dict of string/value properties that will be passed\n to the Imaginaryaxis constructor\n\n Supported dict properties:\n\n color\n Sets default for all colors associated with\n this axis all at once: line, font, tick, and\n grid colors. Grid color is lightened by\n blending this with the plot background\n Individual pieces can override this.\n gridcolor\n Sets the color of the grid lines.\n gridwidth\n Sets the width (in px) of the grid lines.\n hoverformat\n Sets the hover text formatting rule using d3\n formatting mini-languages which are very\n similar to those in Python. For numbers, see: h\n ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f\n ormat. And for dates see:\n https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format. We add two\n items to d3's date formatter: \"%h\" for half of\n the year as a decimal number as well as \"%{n}f\"\n for fractional seconds with n digits. For\n example, *2016-10-13 09:15:23.456* with\n tickformat \"%H~%M~%S.%2f\" would display\n \"09~15~23.46\"\n layer\n Sets the layer on which this axis is displayed.\n If *above traces*, this axis is displayed above\n all the subplot's traces If *below traces*,\n this axis is displayed below all the subplot's\n traces, but above the grid lines. Useful when\n used together with scatter-like traces with\n `cliponaxis` set to False to show markers\n and/or text nodes above this axis.\n linecolor\n Sets the axis line color.\n linewidth\n Sets the width (in px) of the axis line.\n showgrid\n Determines whether or not grid lines are drawn.\n If True, the grid lines are drawn at every tick\n mark.\n showline\n Determines whether or not a line bounding this\n axis is drawn.\n showticklabels\n Determines whether or not the tick labels are\n drawn.\n showtickprefix\n If \"all\", all tick labels are displayed with a\n prefix. If \"first\", only the first tick is\n displayed with a prefix. If \"last\", only the\n last tick is displayed with a suffix. If\n \"none\", tick prefixes are hidden.\n showticksuffix\n Same as `showtickprefix` but for tick suffixes.\n tickcolor\n Sets the tick color.\n tickfont\n Sets the tick font.\n tickformat\n Sets the tick label formatting rule using d3\n formatting mini-languages which are very\n similar to those in Python. For numbers, see: h\n ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f\n ormat. And for dates see:\n https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format. We add two\n items to d3's date formatter: \"%h\" for half of\n the year as a decimal number as well as \"%{n}f\"\n for fractional seconds with n digits. For\n example, *2016-10-13 09:15:23.456* with\n tickformat \"%H~%M~%S.%2f\" would display\n \"09~15~23.46\"\n ticklen\n Sets the tick length (in px).\n tickprefix\n Sets a tick label prefix.\n ticks\n Determines whether ticks are drawn or not. If\n \"\", this axis' ticks are not drawn. If\n \"outside\" (\"inside\"), this axis' are drawn\n outside (inside) the axis lines.\n ticksuffix\n Sets a tick label suffix.\n tickvals\n Sets the values at which ticks on this axis\n appear. Defaults to `realaxis.tickvals` plus\n the same as negatives and zero.\n tickvalssrc\n Sets the source reference on Chart Studio Cloud\n for `tickvals`.\n tickwidth\n Sets the tick width (in px).\n visible\n A single toggle to hide the axis while\n preserving interaction like dragging. Default\n is true when a cheater plot is present on the\n axis, otherwise false\n\n Returns\n -------\n plotly.graph_objs.layout.smith.Imaginaryaxis\n ", "language": "en", "n_whitespaces": 2370, "n_words": 517, "vocab_size": 232 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_smith.py
231,700
2
11
imaginaryaxis
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
63,144
7
2
6
def vf2pp_is_isomorphic(G1, G2, node_label=None, default_label=None): if vf2pp_isomorphism(G1, G2, node_label, default_label) is not None: return True return False
networkx/algorithms/isomorphism/vf2pp.py
51
networkx
{ "docstring": "Examines whether G1 and G2 are isomorphic.\n\n Parameters\n ----------\n G1, G2 : NetworkX Graph or MultiGraph instances.\n The two graphs to check for isomorphism.\n\n node_label : str, optional\n The name of the node attribute to be used when comparing nodes.\n The default is `None`, meaning node attributes are not considered\n in the comparison. Any node that doesn't have the `node_label`\n attribute uses `default_label` instead.\n\n default_label : scalar\n Default value to use when a node doesn't have an attribute\n named `node_label`. Default is `None`.\n\n Returns\n -------\n bool\n True if the two graphs are isomorphic, False otherwise.\n ", "language": "en", "n_whitespaces": 178, "n_words": 95, "vocab_size": 71 }
17
Python
15
a796f526c7ce6a7f182aee4b81b8499feabe1a45
vf2pp.py
177,286
4
35
vf2pp_is_isomorphic
https://github.com/networkx/networkx.git
VF2++ for Directed Graphs (#5972) Modify vf2pp implementation to support directed graphs. Updates all helper functions and state/parameter objects to account for in/out degree. Includes other changes such as renaming the keyword argument from node_labels to node_label to better reflect the fact that the label kwarg expects a single value. Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Dan Schult <[email protected]>
33
0
42,325
8
1
7
def commit_sha(): return run_command( ['git', 'rev-parse', 'HEAD'], capture_output=True, text=True, check=False ).stdout.strip()
dev/breeze/src/airflow_breeze/utils/run_utils.py
58
airflow
{ "docstring": "Returns commit SHA of current repo. Cached for various usages.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
11
Python
11
4ffd4f09532fceb67675fce4c1f5cd383eff992e
run_utils.py
46,790
4
34
commit_sha
https://github.com/apache/airflow.git
Prepare Breeze2 for prime time :) (#22713) This is a review and clean-up for all the parameters and commands for Breeze2 in order to prepare it for being used by the contribugors. There are various small fixes here and there, removal of duplicated code, refactoring and moving code around as well as cleanup and review all the parameters used for all implemented commands. The parameters, default values and their behaviours were updated to match "new" life of Breeze rather than old one. Some improvements are made to the autocomplete and click help messages printed. Full list of choices is always displayed, parameters are groups according to their target audience, and they were sorted according to importance and frequency of use. Various messages have been colourised according to their meaning - warnings as yellow, errors as red and informational messages as bright_blue. The `dry-run` option has been added to just show what would have been run without actually running some potentially "write" commands (read commands are still executed) so that you can easily verify and manually copy and execute the commands with option to modify them before. The `dry_run` and `verbose` options are now used for all commands. The "main" command now runs "shell" by default similarly as the original Breeze. All "shortcut" parameters have been standardized - i.e common options (verbose/dry run/help) have one and all common flags that are likely to be used often have an assigned shortcute. The "stop" and "cleanup" command have been added as they are necessary for average user to complete the regular usage cycle. Documentation for all the important methods have been updated.
27
0
8,994
12
11
23
def _copy_source_without_wildcard(self, hook, prefix): objects = hook.list(self.source_bucket, prefix=prefix, delimiter=self.delimiter) if not self.replace: # If we are not replacing, ignore files already existing in source buckets objects = self._ignore_existing_files(hook, prefix, objects=objects, delimiter=self.delimiter) # If objects is empty and we have prefix, let's check if prefix is a blob # and copy directly if len(objects) == 0 and prefix: if hook.exists(self.source_bucket, prefix): self._copy_single_object( hook=hook, source_object=prefix, destination_object=self.destination_object ) elif self.source_object_required: msg = f"{prefix} does not exist in bucket {self.source_bucket}" self.log.warning(msg) raise AirflowException(msg) for source_obj in objects: if self.exact_match and (source_obj != prefix or not source_obj.endswith(prefix)): continue if self.destination_object is None: destination_object = source_obj else: destination_object = source_obj.replace(prefix, self.destination_object, 1) self._copy_single_object( hook=hook, source_object=source_obj, destination_object=destination_object )
airflow/providers/google/cloud/transfers/gcs_to_gcs.py
296
airflow
{ "docstring": "\n For source_objects with no wildcard, this operator would first list\n all files in source_objects, using provided delimiter if any. Then copy\n files from source_objects to destination_object and rename each source\n file.\n\n Example 1:\n\n\n The following Operator would copy all the files from ``a/``folder\n (i.e a/a.csv, a/b.csv, a/c.csv)in ``data`` bucket to the ``b/`` folder in\n the ``data_backup`` bucket (b/a.csv, b/b.csv, b/c.csv) ::\n\n copy_files = GCSToGCSOperator(\n task_id='copy_files_without_wildcard',\n source_bucket='data',\n source_objects=['a/'],\n destination_bucket='data_backup',\n destination_object='b/',\n gcp_conn_id=google_cloud_conn_id\n )\n\n Example 2:\n\n\n The following Operator would copy all avro files from ``a/``folder\n (i.e a/a.avro, a/b.avro, a/c.avro)in ``data`` bucket to the ``b/`` folder in\n the ``data_backup`` bucket (b/a.avro, b/b.avro, b/c.avro) ::\n\n copy_files = GCSToGCSOperator(\n task_id='copy_files_without_wildcard',\n source_bucket='data',\n source_objects=['a/'],\n destination_bucket='data_backup',\n destination_object='b/',\n delimiter='.avro',\n gcp_conn_id=google_cloud_conn_id\n )\n ", "language": "en", "n_whitespaces": 443, "n_words": 112, "vocab_size": 68 }
111
Python
77
ec84ffe71cfa8246155b9b4cb10bf2167e75adcf
gcs_to_gcs.py
42,927
23
185
_copy_source_without_wildcard
https://github.com/apache/airflow.git
Fix GCSToGCSOperator cannot copy a single file/folder without copying other files/folders with that prefix (#24039)
413
0
7,766
14
21
39
def _encode_files(files, data): if not files: raise ValueError("Files must be provided.") elif isinstance(data, basestring): raise ValueError("Data must not be a string.") new_fields = [] fields = to_key_val_list(data or {}) files = to_key_val_list(files or {}) for field, val in fields: if isinstance(val, basestring) or not hasattr(val, "__iter__"): val = [val] for v in val: if v is not None: # Don't call str() on bytestrings: in Py3 it all goes wrong. if not isinstance(v, bytes): v = str(v) new_fields.append( ( field.decode("utf-8") if isinstance(field, bytes) else field, v.encode("utf-8") if isinstance(v, str) else v, ) ) for (k, v) in files: # support for explicit filename ft = None fh = None if isinstance(v, (tuple, list)): if len(v) == 2: fn, fp = v elif len(v) == 3: fn, fp, ft = v else: fn, fp, ft, fh = v else: fn = guess_filename(v) or k fp = v if isinstance(fp, (str, bytes, bytearray)): fdata = fp elif hasattr(fp, "read"): fdata = fp.read() elif fp is None: continue else: fdata = fp rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) rf.make_multipart(content_type=ft) new_fields.append(rf) body, content_type = encode_multipart_formdata(new_fields) return body, content_type
pipenv/patched/pip/_vendor/requests/models.py
497
pipenv
{ "docstring": "Build the body for a multipart/form-data request.\n\n Will successfully encode files when passed as a dict or a list of\n tuples. Order is retained if data is a list of tuples but arbitrary\n if parameters are supplied as a dict.\n The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)\n or 4-tuples (filename, fileobj, contentype, custom_headers).\n ", "language": "en", "n_whitespaces": 99, "n_words": 57, "vocab_size": 43 }
184
Python
106
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
models.py
22,102
49
313
_encode_files
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
905
0
4,180
18
1
2
def sector(self): return self["sector"]
packages/python/plotly/plotly/graph_objs/layout/_polar.py
22
plotly.py
{ "docstring": "\n Sets angular span of this polar subplot with two angles (in\n degrees). Sector are assumed to be spanned in the\n counterclockwise direction with 0 corresponding to rightmost\n limit of the polar subplot.\n\n The 'sector' property is an info array that may be specified as:\n\n * a list or tuple of 2 elements where:\n (0) The 'sector[0]' property is a number and may be specified as:\n - An int or float\n (1) The 'sector[1]' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n list\n ", "language": "en", "n_whitespaces": 237, "n_words": 90, "vocab_size": 58 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_polar.py
231,632
2
11
sector
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
63,076
7