complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
1
2
def tickangle(self): return self["tickangle"]
packages/python/plotly/plotly/graph_objs/bar/marker/_colorbar.py
22
plotly.py
{ "docstring": "\n Sets the angle of the tick labels with respect to the\n horizontal. For example, a `tickangle` of -90 draws the tick\n labels vertically.\n\n The 'tickangle' property is a angle (in degrees) that may be\n specified as a number between -180 and 180. Numeric values outside this\n range are converted to the equivalent value\n (e.g. 270 is converted to -90).\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 140, "n_words": 62, "vocab_size": 48 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_colorbar.py
228,753
2
11
tickangle
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
60,426
7
1
11
def _reset_replica_iterator(self): replicas = list(self.in_flight_queries.keys()) random.shuffle(replicas) self.replica_iterator = itertools.cycle(replicas)
python/ray/serve/_private/router.py
59
ray
{ "docstring": "Reset the iterator used to load balance replicas.\n\n This call is expected to be called after the replica membership has\n been updated. It will shuffle the replicas randomly to avoid multiple\n handle sending requests in the same order.\n ", "language": "en", "n_whitespaces": 66, "n_words": 38, "vocab_size": 33 }
9
Python
8
545c51609f0f55b41cf99cec95a9c21bee6846de
router.py
126,370
4
34
_reset_replica_iterator
https://github.com/ray-project/ray.git
[Serve] ServeHandle detects ActorError and drop replicas from target group (#26685)
37
0
28,152
11
5
62
def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization): workflow_job_template.organization = organization label = Label.objects.create(name="foobar", organization=organization) workflow_job_template.labels.add(label) ee = ExecutionEnvironment.objects.create(name="barfoo", organization=organization) workflow_job_template.execution_environment = ee ig = InstanceGroup.objects.create(name="bazbar", organization=organization) workflow_job_template.instance_groups.add(ig) workflow_job_template.save() jts = [JobTemplate.objects.create(name='test-jt-{}'.format(i)) for i in range(0, 5)] nodes = [WorkflowJobTemplateNode.objects.create(workflow_job_template=workflow_job_template, unified_job_template=jts[i]) for i in range(0, 5)] nodes[0].success_nodes.add(nodes[1]) nodes[1].success_nodes.add(nodes[2]) nodes[0].failure_nodes.add(nodes[3]) nodes[3].failure_nodes.add(nodes[4]) with mock.patch('awx.api.generics.trigger_delayed_deep_copy') as deep_copy_mock: wfjt_copy_id = post( reverse('api:workflow_job_template_copy', kwargs={'pk': workflow_job_template.pk}), {'name': 'new wfjt name'}, admin, expect=201 ).data['id'] wfjt_copy = type(workflow_job_template).objects.get(pk=wfjt_copy_id) args, kwargs = deep_copy_mock.call_args deep_copy_model_obj(*args, **kwargs) assert wfjt_copy.organization == organization assert wfjt_copy.created_by == admin assert wfjt_copy.name == 'new wfjt name' assert wfjt_copy.labels.count() != 0 assert wfjt_copy.labels.get(pk=label.pk) == label assert wfjt_copy.execution_environment == ee assert wfjt_copy.instance_groups.count() != 0 assert wfjt_copy.instance_groups.get(pk=ig.pk) == ig copied_node_list = [x for x in wfjt_copy.workflow_job_template_nodes.all()] copied_node_list.sort(key=lambda x: int(x.unified_job_template.name[-1])) for node, success_count, failure_count, always_count in zip(copied_node_list, [1, 1, 0, 0, 0], [1, 0, 0, 1, 0], [0, 0, 0, 0, 0]): assert node.success_nodes.count() == success_count assert node.failure_nodes.count() == failure_count assert node.always_nodes.count() == always_count assert copied_node_list[1] in copied_node_list[0].success_nodes.all() assert copied_node_list[2] in copied_node_list[1].success_nodes.all() assert copied_node_list[3] in copied_node_list[0].failure_nodes.all() assert copied_node_list[4] in copied_node_list[3].failure_nodes.all() @pytest.mark.django_db
awx/main/tests/functional/test_copy.py
837
@pytest.mark.django_db
awx
{ "docstring": "\n Tests the FIELDS_TO_PRESERVE_AT_COPY attribute on WFJTs\n ", "language": "en", "n_whitespaces": 13, "n_words": 6, "vocab_size": 6 }
169
Python
102
7de5f772626a00d31026270865276365287cbe37
test_copy.py
81,880
40
538
test_workflow_job_template_copy
https://github.com/ansible/awx.git
adding test coverage to ensure that FIELDS_TO_PRESERVE_AT_COPY is behaving as expected for WFJTs
328
1
17,273
18
1
10
def line_collection_2d_to_3d(col, zs=0, zdir='z'): segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir) col.__class__ = Line3DCollection col.set_segments(segments3d)
lib/mpl_toolkits/mplot3d/art3d.py
64
matplotlib
{ "docstring": "Convert a `.LineCollection` to a `.Line3DCollection` object.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 6 }
13
Python
12
df6f95703b60348e01603f98a439b133da2938a0
art3d.py
109,916
4
39
line_collection_2d_to_3d
https://github.com/matplotlib/matplotlib.git
Improve mpl_toolkit documentation
25
0
23,823
10
48
98
def rsolve_hyper(coeffs, f, n, **hints): r coeffs = list(map(sympify, coeffs)) f = sympify(f) r, kernel, symbols = len(coeffs) - 1, [], set() if not f.is_zero: if f.is_Add: similar = {} for g in f.expand().args: if not g.is_hypergeometric(n): return None for h in similar.keys(): if hypersimilar(g, h, n): similar[h] += g break else: similar[g] = S.Zero inhomogeneous = [] for g, h in similar.items(): inhomogeneous.append(g + h) elif f.is_hypergeometric(n): inhomogeneous = [f] else: return None for i, g in enumerate(inhomogeneous): coeff, polys = S.One, coeffs[:] denoms = [S.One]*(r + 1) s = hypersimp(g, n) for j in range(1, r + 1): coeff *= s.subs(n, n + j - 1) p, q = coeff.as_numer_denom() polys[j] *= p denoms[j] = q for j in range(r + 1): polys[j] *= Mul(*(denoms[:j] + denoms[j + 1:])) R = rsolve_poly(polys, Mul(*denoms), n) if not (R is None or R is S.Zero): inhomogeneous[i] *= R else: return None result = Add(*inhomogeneous) else: result = S.Zero Z = Dummy('Z') p, q = coeffs[0], coeffs[r].subs(n, n - r + 1) p_factors = [z for z in roots(p, n).keys()] q_factors = [z for z in roots(q, n).keys()] factors = [(S.One, S.One)] for p in p_factors: for q in q_factors: if p.is_integer and q.is_integer and p <= q: continue else: factors += [(n - p, n - q)] p = [(n - p, S.One) for p in p_factors] q = [(S.One, n - q) for q in q_factors] factors = p + factors + q for A, B in factors: polys, degrees = [], [] D = A*B.subs(n, n + r - 1) for i in range(r + 1): a = Mul(*[A.subs(n, n + j) for j in range(i)]) b = Mul(*[B.subs(n, n + j) for j in range(i, r)]) poly = quo(coeffs[i]*a*b, D, n) polys.append(poly.as_poly(n)) if not poly.is_zero: degrees.append(polys[i].degree()) if degrees: d, poly = max(degrees), S.Zero else: return None for i in range(r + 1): coeff = polys[i].nth(d) if coeff is not S.Zero: poly += coeff * Z**i for z in roots(poly, Z).keys(): if z.is_zero: continue recurr_coeffs = [polys[i].as_expr()*z**i for i in range(r + 1)] if d == 0 and 0 != Add(*[recurr_coeffs[j]*j for j in range(1, r + 1)]): # faster inline check (than calling rsolve_poly) for a # constant solution to a constant coefficient recurrence. sol = [Symbol("C" + str(len(symbols)))] else: sol, syms = rsolve_poly(recurr_coeffs, 0, n, len(symbols), symbols=True) sol = sol.collect(syms) sol = [sol.coeff(s) for s in syms] for C in sol: ratio = z * A * C.subs(n, n + 1) / B / C ratio = simplify(ratio) # If there is a nonnegative root in the denominator of the ratio, # this indicates that the term y(n_root) is zero, and one should # start the product with the term y(n_root + 1). n0 = 0 for n_root in roots(ratio.as_numer_denom()[1], n).keys(): if n_root.has(I): return None elif (n0 < (n_root + 1)) == True: n0 = n_root + 1 K = product(ratio, (n, n0, n - 1)) if K.has(factorial, FallingFactorial, RisingFactorial): K = simplify(K) if casoratian(kernel + [K], n, zero=False) != 0: kernel.append(K) kernel.sort(key=default_sort_key) sk = list(zip(numbered_symbols('C'), kernel)) if sk: for C, ker in sk: result += C * ker else: return None if hints.get('symbols', False): # XXX: This returns the symbols in a non-deterministic order symbols |= {s for s, k in sk} return (result, list(symbols)) else: return result
sympy/solvers/recurr.py
1,617
sympy
{ "docstring": "\n Given linear recurrence operator `\\operatorname{L}` of order `k`\n with polynomial coefficients and inhomogeneous equation\n `\\operatorname{L} y = f` we seek for all hypergeometric solutions\n over field `K` of characteristic zero.\n\n The inhomogeneous part can be either hypergeometric or a sum\n of a fixed number of pairwise dissimilar hypergeometric terms.\n\n The algorithm performs three basic steps:\n\n (1) Group together similar hypergeometric terms in the\n inhomogeneous part of `\\operatorname{L} y = f`, and find\n particular solution using Abramov's algorithm.\n\n (2) Compute generating set of `\\operatorname{L}` and find basis\n in it, so that all solutions are linearly independent.\n\n (3) Form final solution with the number of arbitrary\n constants equal to dimension of basis of `\\operatorname{L}`.\n\n Term `a(n)` is hypergeometric if it is annihilated by first order\n linear difference equations with polynomial coefficients or, in\n simpler words, if consecutive term ratio is a rational function.\n\n The output of this procedure is a linear combination of fixed\n number of hypergeometric terms. However the underlying method\n can generate larger class of solutions - D'Alembertian terms.\n\n Note also that this method not only computes the kernel of the\n inhomogeneous equation, but also reduces in to a basis so that\n solutions generated by this procedure are linearly independent\n\n Examples\n ========\n\n >>> from sympy.solvers import rsolve_hyper\n >>> from sympy.abc import x\n\n >>> rsolve_hyper([-1, -1, 1], 0, x)\n C0*(1/2 - sqrt(5)/2)**x + C1*(1/2 + sqrt(5)/2)**x\n\n >>> rsolve_hyper([-1, 1], 1 + x, x)\n C0 + x*(x + 1)/2\n\n References\n ==========\n\n .. [1] M. Petkovsek, Hypergeometric solutions of linear recurrences\n with polynomial coefficients, J. Symbolic Computation,\n 14 (1992), 243-264.\n\n .. [2] M. Petkovsek, H. S. Wilf, D. Zeilberger, A = B, 1996.\n ", "language": "en", "n_whitespaces": 443, "n_words": 270, "vocab_size": 169 }
553
Python
259
4a7c0c31501685f9d8e6572fe735b592a1fa3c33
recurr.py
198,001
164
1,051
rsolve_hyper
https://github.com/sympy/sympy.git
rsolve_hyper: take into account degenerate solutions This fixes sympy/sympy#8697: In [2]: rsolve(a(n + 3) - a(n + 2) - a(n + 1) + a(n), a(n)) Out[2]: n (-1) ⋅C₁ + C₀ + C₂⋅n Added also test from issue thread, which is not related to the problem. And from PR request diofant/diofant#442. Test for sympy/sympy#6844 was adapted.
1,808
0
48,766
20
8
22
def parse_targets(self, source): self.dist_log("looking for '@targets' inside -> ", source) # get lines between /*@targets and */ with open(source) as fd: tokens = "" max_to_reach = 1000 # good enough, isn't? start_with = "@targets" start_pos = -1 end_with = "*/" end_pos = -1 for current_line, line in enumerate(fd): if current_line == max_to_reach: self.dist_fatal("reached the max of lines") break if start_pos == -1: start_pos = line.find(start_with) if start_pos == -1: continue start_pos += len(start_with) tokens += line end_pos = line.find(end_with) if end_pos != -1: end_pos += len(tokens) - len(line) break if start_pos == -1: self.dist_fatal("expected to find '%s' within a C comment" % start_with) if end_pos == -1: self.dist_fatal("expected to end with '%s'" % end_with) tokens = tokens[start_pos:end_pos] return self._parse_target_tokens(tokens) _parse_regex_arg = re.compile(r'\s|,|([+-])')
numpy/distutils/ccompiler_opt.py
305
numpy
{ "docstring": "\n Fetch and parse configuration statements that required for\n defining the targeted CPU features, statements should be declared\n in the top of source in between **C** comment and start\n with a special mark **@targets**.\n\n Configuration statements are sort of keywords representing\n CPU features names, group of statements and policies, combined\n together to determine the required optimization.\n\n Parameters\n ----------\n source : str\n the path of **C** source file.\n\n Returns\n -------\n - bool, True if group has the 'baseline' option\n - list, list of CPU features\n - list, list of extra compiler flags\n ", "language": "en", "n_whitespaces": 214, "n_words": 90, "vocab_size": 63 }
122
Python
78
f404e9e92e87a3990712d723d5c562a89300ac01
ccompiler_opt.py
160,174
29
165
parse_targets
https://github.com/numpy/numpy.git
Add space after argument name
511
0
38,546
15
1
8
def data_system_ping_fixture(): return json.loads(load_fixture("system_ping_data.json", "guardian")) @pytest.fixture(name="data_valve_status", scope="session")
tests/components/guardian/conftest.py
58
@pytest.fixture(name="data_valve_status", scope="session")
core
{ "docstring": "Define data from a successful system_ping response.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
7
Python
7
6bbe38578c74e5ecd8aadcd2cf39cddca8a59a52
conftest.py
310,457
2
17
data_system_ping_fixture
https://github.com/home-assistant/core.git
Add diagnostics to Elexa Guardian (#64599)
12
1
109,142
10
2
18
def _softmax(x, axis): if not dtypes.issubdtype(x.dtype, np.floating): raise TypeError(f"_softmax only accepts floating dtypes, got {x.dtype}") x_max = jnp.max(x, axis, keepdims=True) unnormalized = jnp.exp(x - lax.stop_gradient(x_max)) return unnormalized / unnormalized.sum(axis, keepdims=True)
jax/_src/random.py
118
jax
{ "docstring": "Utility to compute the softmax of x along a given axis.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
30
Python
27
69969ef8031e424b19dd020a396b3fbdc25b703e
random.py
119,870
6
71
_softmax
https://github.com/google/jax.git
add random.loggamma and improve dirichlet & beta implementation
38
0
26,701
12
3
22
def fit(self, X, y=None): if not self.degree >= 1: raise ValueError(f"degree={self.degree} should be >=1.") X = self._validate_data(X, accept_sparse="csc") random_state = check_random_state(self.random_state) n_features = X.shape[1] if self.coef0 != 0: n_features += 1 self.indexHash_ = random_state.randint( 0, high=self.n_components, size=(self.degree, n_features) ) self.bitHash_ = random_state.choice(a=[-1, 1], size=(self.degree, n_features)) self._n_features_out = self.n_components return self
sklearn/kernel_approximation.py
202
scikit-learn
{ "docstring": "Fit the model with X.\n\n Initializes the internal variables. The method needs no information\n about the distribution of data, so we only care about n_features in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs), \\\n default=None\n Target values (None for unsupervised transformations).\n\n Returns\n -------\n self : object\n Returns the instance itself.\n ", "language": "en", "n_whitespaces": 209, "n_words": 80, "vocab_size": 61 }
50
Python
42
d616e43947340e152e4a901931e954d699368fa9
kernel_approximation.py
259,122
14
126
fit
https://github.com/scikit-learn/scikit-learn.git
ENH Adds feature_names_out for most of kernel_approximation (#22694)
160
0
75,581
12
6
23
def _poll_with_exponential_delay(request, execute_num_retries, max_n, is_done_func, is_error_func): for i in range(0, max_n): try: response = request.execute(num_retries=execute_num_retries) if is_error_func(response): raise ValueError(f'The response contained an error: {response}') if is_done_func(response): log.info('Operation is done: %s', response) return response time.sleep((2 ** i) + (random.randint(0, 1000) / 1000)) except HttpError as e: if e.resp.status != 429: log.info('Something went wrong. Not retrying: %s', format(e)) raise else: time.sleep((2 ** i) + (random.randint(0, 1000) / 1000)) raise ValueError(f'Connection could not be established after {max_n} retries.')
airflow/providers/google/cloud/hooks/mlengine.py
238
airflow
{ "docstring": "\n Execute request with exponential delay.\n\n This method is intended to handle and retry in case of api-specific errors,\n such as 429 \"Too Many Requests\", unlike the `request.execute` which handles\n lower level errors like `ConnectionError`/`socket.timeout`/`ssl.SSLError`.\n\n :param request: request to be executed.\n :param execute_num_retries: num_retries for `request.execute` method.\n :param max_n: number of times to retry request in this method.\n :param is_done_func: callable to determine if operation is done.\n :param is_error_func: callable to determine if operation is failed.\n :return: response\n :rtype: httplib2.Response\n ", "language": "en", "n_whitespaces": 116, "n_words": 79, "vocab_size": 58 }
75
Python
60
602abe8394fafe7de54df7e73af56de848cdf617
mlengine.py
44,110
17
144
_poll_with_exponential_delay
https://github.com/apache/airflow.git
Remove `:type` lines now sphinx-autoapi supports typehints (#20951) * Remove `:type` lines now sphinx-autoapi supports typehints Since we have no updated sphinx-autoapi to a more recent version it supports showing type hints in the documentation, so we don't need to have the type hints _and_ the `:type` lines -- which is good, as the ones in the doc strings are easy to get out of date! The following settings have been set: `autodoc_typehints = 'description'` -- show types in description (where previous `:type` used to show up) `autodoc_typehints_description_target = 'documented'` -- only link to types that are documented. (Without this we have some missing return types that aren't documented, and aren't linked to in our current python API docs, so this caused a build failure) `autodoc_typehints_format = 'short'` -- Shorten type hints where possible, i.e. `StringIO` instead of `io.StringIO` * Add argument type names to local spelling dictionary Now that we are using the type hints in the docs, sphinxcontrib-spelling picks them up as words to be checked, so we have to ignore them. I've chosen to add the provider specific ones to local dictionary files rather than the global, as for example, `mgmt` is an error in most places, but not in some of the Azure provider.
254
0
8,160
20
13
16
def get_change_message(self): if self.change_message and self.change_message[0] == "[": try: change_message = json.loads(self.change_message) except json.JSONDecodeError: return self.change_message messages = [] for sub_message in change_message: if "added" in sub_message: if sub_message["added"]: sub_message["added"]["name"] = gettext( sub_message["added"]["name"] ) messages.append( gettext("Added {name} “{object}”.").format( **sub_message["added"] ) ) else: messages.append(gettext("Added.")) elif "changed" in sub_message: sub_message["changed"]["fields"] = get_text_list( [ gettext(field_name) for field_name in sub_message["changed"]["fields"] ], gettext("and"), ) if "name" in sub_message["changed"]: sub_message["changed"]["name"] = gettext( sub_message["changed"]["name"] ) messages.append( gettext("Changed {fields} for {name} “{object}”.").format( **sub_message["changed"] ) ) else: messages.append( gettext("Changed {fields}.").format( **sub_message["changed"] ) ) elif "deleted" in sub_message: sub_message["deleted"]["name"] = gettext( sub_message["deleted"]["name"] ) messages.append( gettext("Deleted {name} “{object}”.").format( **sub_message["deleted"] ) ) change_message = " ".join(msg[0].upper() + msg[1:] for msg in messages) return change_message or gettext("No fields changed.") else: return self.change_message
django/contrib/admin/models.py
520
django
{ "docstring": "\n If self.change_message is a JSON structure, interpret it as a change\n string, properly translated.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
119
Python
64
9c19aff7c7561e3a82978a272ecdaad40dda5c00
models.py
203,402
56
289
get_change_message
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
1,263
0
50,352
22
21
64
def call_ef(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="ef", description=, ) parser.add_argument( "-p", "--period", default=self.params["historic_period"] if "historic_period" in self.params else "3y", dest="historic_period", help=, ) parser.add_argument( "-s", "--start", default=self.params["start_period"] if "start_period" in self.params else "", dest="start_period", help=, ) parser.add_argument( "-e", "--end", default=self.params["end_period"] if "end_period" in self.params else "", dest="end_period", help=, ) parser.add_argument( "-lr", "--log-returns", action="store_true", default=self.params["log_returns"] if "log_returns" in self.params else False, dest="log_returns", help="If use logarithmic or arithmetic returns to calculate returns", ) parser.add_argument( "-f", "--freq", default=self.params["return_frequency"] if "return_frequency" in self.params else "d", dest="return_frequency", help=, choices=self.FREQ_CHOICES, ) parser.add_argument( "-mn", "--maxnan", type=float, default=self.params["max_nan"] if "max_nan" in self.params else 0.05, dest="max_nan", help=, ) parser.add_argument( "-th", "--threshold", type=float, default=self.params["threshold_value"] if "threshold_value" in self.params else 0.30, dest="threshold_value", help=, ) parser.add_argument( "-mt", "--method", default=self.params["nan_fill_method"] if "nan_fill_method" in self.params else "time", dest="nan_fill_method", help=, ) parser.add_argument( "-rm", "--risk-measure", default=self.params["risk_measure"] if "risk_measure" in self.params else "MV", dest="risk_measure", help=, choices=self.MEAN_RISK_CHOICES, ) parser.add_argument( "-r", "--risk-free-rate", type=float, dest="risk_free", default=self.params["risk_free"] if "risk_free" in self.params else get_rf(), help=, ) parser.add_argument( "-a", "--alpha", type=float, default=self.params["significance_level"] if "significance_level" in self.params else 0.05, dest="significance_level", help="Significance level of CVaR, EVaR, CDaR and EDaR", ) parser.add_argument( "-v", "--value", dest="long_allocation", help="Amount to allocate to portfolio in long positions", type=float, default=self.params["long_allocation"] if "long_allocation" in self.params else 1, ) parser.add_argument( "-vs", "--value-short", dest="short_allocation", help="Amount to allocate to portfolio in short positions", type=float, default=self.params["short_allocation"] if "short_allocation" in self.params else 0.0, ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-n") parser.add_argument( "-n", "--number-portfolios", default=self.params["amount_portfolios"] if "amount_portfolios" in self.params else 100, type=check_non_negative, dest="amount_portfolios", help="Number of portfolios to simulate", ) parser.add_argument( "-se", "--seed", default=self.params["random_seed"] if "random_seed" in self.params else 123, type=check_non_negative, dest="random_seed", help="Seed used to generate random portfolios", ) parser.add_argument( "-t", "--tangency", action="store_true", dest="tangency", default=self.params["tangency"] if "tangency" in self.params else False, help="Adds the optimal line with the risk-free asset", ) ns_parser = parse_known_args_and_warn(parser, other_args) if ns_parser: if len(self.tickers) < 2: console.print( "Please have at least 2 loaded tickers to calculate weights.\n" ) return optimizer_view.display_ef( stocks=self.tickers, period=ns_parser.historic_period, start=ns_parser.start_period, end=ns_parser.end_period, log_returns=ns_parser.log_returns, freq=ns_parser.return_frequency, maxnan=ns_parser.max_nan, threshold=ns_parser.threshold_value, method=ns_parser.nan_fill_method, risk_measure=ns_parser.risk_measure.lower(), risk_free_rate=ns_parser.risk_free, alpha=ns_parser.significance_level, value=ns_parser.long_allocation, value_short=ns_parser.short_allocation, n_portfolios=ns_parser.amount_portfolios, seed=ns_parser.random_seed, tangency=ns_parser.tangency, )
openbb_terminal/portfolio/portfolio_optimization/po_controller.py
1,304
OpenBBTerminal
{ "docstring": "Process ef commandThis function plots random portfolios based on their\n risk and returns and shows the efficient frontier.Period to get yfinance data from.\n Possible frequency strings are:\n 'd': means days, for example '252d' means 252 days\n 'w': means weeks, for example '52w' means 52 weeks\n 'mo': means months, for example '12mo' means 12 months\n 'y': means years, for example '1y' means 1 year\n 'ytd': downloads data from beginning of year to today\n 'max': downloads all data available for each assetStart date to get yfinance data from. Must be in\n 'YYYY-MM-DD' formatEnd date to get yfinance data from. Must be in\n 'YYYY-MM-DD' formatFrequency used to calculate returns. Possible values are:\n 'd': for daily returns\n 'w': for weekly returns\n 'm': for monthly returns\n Max percentage of nan values accepted per asset to be\n considered in the optimization processValue used to replace outliers that are higher to threshold\n in absolute valueMethod used to fill nan values in time series, by default time.\n Possible values are:\n 'linear': linear interpolation\n 'time': linear interpolation based on time index\n 'nearest': use nearest value to replace nan values\n 'zero': spline of zeroth order\n 'slinear': spline of first order\n 'quadratic': spline of second order\n 'cubic': spline of third order\n 'barycentric': builds a polynomial that pass for all pointsRisk measure used to optimize the portfolio. Possible values are:\n 'MV' : Variance\n 'MAD' : Mean Absolute Deviation\n 'MSV' : Semi Variance (Variance of negative returns)\n 'FLPM' : First Lower Partial Moment\n 'SLPM' : Second Lower Partial Moment\n 'CVaR' : Conditional Value at Risk\n 'EVaR' : Entropic Value at Risk\n 'WR' : Worst Realization\n 'ADD' : Average Drawdown of uncompounded returns\n 'UCI' : Ulcer Index of uncompounded returns\n 'CDaR' : Conditional Drawdown at Risk of uncompounded returns\n 'EDaR' : Entropic Drawdown at Risk of uncompounded returns\n 'MDD' : Maximum Drawdown of uncompounded returns\n Risk-free rate of borrowing/lending. The period of the\n risk-free rate must be annual", "language": "en", "n_whitespaces": 1057, "n_words": 314, "vocab_size": 174 }
327
Python
194
34bc290dded1bd2418fc3c6b375a79f9cdd68d5a
po_controller.py
284,355
223
800
call_ef
https://github.com/OpenBB-finance/OpenBBTerminal.git
New portfolio optimization menu (#1642) * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * Update _index.md * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * configure portfolio optimization parameters ini * minor improvement * Revert "New-Portfolio-Optimization-Menu" This reverts commit b4b7169cfbc8f28c379eb1920307c2cdd2e47a0f. * Add in Excel functionality and improve the capabilities * Add Excel load function * Tidying up the functions * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * Re-add my code * Some spacing and details * Add folder structure for portfolio * Update terminal file loading * New-Portfolio-Optimization-Menu * Make it possible to move from params to po with loaded file * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * Making the connection between the parameters file and the functions * Add in allocation and new params files * Improve params default settings * New-Portfolio-Optimization-Menu * Update Portfolios and Params sheets * Update sheets * Update command to load in correct sheet * Adjust function to only read specific columns * Update portfolio * Small correction * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * Patched up show error * Add Equity portfolio * Make functions more robust * New-Portfolio-Optimization-Menu * New-Portfolio-Optimization-Menu * Add in Params documentation * Fixing Linting * Add in Requirements and Poetry Updates * Update website * linting * Update tests * Minor fix * remove unneccesary READMEs * Remove expected variable type * Improve documentation * Clean up the code * Refractoring * Adjust names to make it OS friendly Co-authored-by: Jeroen Bouma <[email protected]> Co-authored-by: jmaslek <[email protected]> Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: DidierRLopes <[email protected]>
2,276
0
84,706
13
1
4
def get(cls): min_partition_size = super().get() assert min_partition_size > 0, "`min_partition_size` should be > 0" return min_partition_size
modin/config/envvars.py
42
modin
{ "docstring": "\n Get ``MinPartitionSize`` with extra checks.\n\n Returns\n -------\n int\n ", "language": "en", "n_whitespaces": 44, "n_words": 8, "vocab_size": 8 }
16
Python
13
0bdc482d6f1682e103b4c4d7ee7c4d505d2d3b1c
envvars.py
152,965
4
23
get
https://github.com/modin-project/modin.git
REFACTOR-#3768: change 'compute_chunksize' signature (#3769) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Anatoly Myachev <[email protected]>
44
0
35,209
10
1
3
def _get_loss(self): return HalfSquaredError() # TODO(1.3): remove
sklearn/linear_model/_glm/glm.py
21
scikit-learn
{ "docstring": "This is only necessary because of the link and power arguments of the\n TweedieRegressor.\n\n Note that we do not need to pass sample_weight to the loss class as this is\n only needed to set loss.constant_hessian on which GLMs do not rely.\n ", "language": "en", "n_whitespaces": 69, "n_words": 41, "vocab_size": 32 }
7
Python
7
75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc
glm.py
259,436
2
10
_get_loss
https://github.com/scikit-learn/scikit-learn.git
ENH migrate GLMs / TweedieRegressor to linear loss (#22548) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
24
0
75,770
7
9
24
def update_last_purchase_rate(doc, is_submit): import frappe.utils this_purchase_date = frappe.utils.getdate(doc.get("posting_date") or doc.get("transaction_date")) for d in doc.get("items"): # get last purchase details last_purchase_details = get_last_purchase_details(d.item_code, doc.name) # compare last purchase date and this transaction's date last_purchase_rate = None if last_purchase_details and ( doc.get("docstatus") == 2 or last_purchase_details.purchase_date > this_purchase_date ): last_purchase_rate = last_purchase_details["base_net_rate"] elif is_submit == 1: # even if this transaction is the latest one, it should be submitted # for it to be considered for latest purchase rate if flt(d.conversion_factor): last_purchase_rate = flt(d.base_net_rate) / flt(d.conversion_factor) # Check if item code is present # Conversion factor should not be mandatory for non itemized items elif d.item_code: frappe.throw(_("UOM Conversion factor is required in row {0}").format(d.idx)) # update last purchsae rate frappe.db.set_value("Item", d.item_code, "last_purchase_rate", flt(last_purchase_rate))
erpnext/buying/utils.py
263
erpnext
{ "docstring": "updates last_purchase_rate in item table for each item", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
121
Python
80
494bd9ef78313436f0424b918f200dab8fc7c20b
utils.py
65,607
16
153
update_last_purchase_rate
https://github.com/frappe/erpnext.git
style: format code with black
98
0
13,953
20
6
14
def clean_subpage_models(cls): if cls._clean_subpage_models is None: subpage_types = getattr(cls, "subpage_types", None) if subpage_types is None: # if subpage_types is not specified on the Page class, allow all page types as subpages cls._clean_subpage_models = get_page_models() else: cls._clean_subpage_models = [ resolve_model_string(model_string, cls._meta.app_label) for model_string in subpage_types ] for model in cls._clean_subpage_models: if not issubclass(model, Page): raise LookupError("%s is not a Page subclass" % model) return cls._clean_subpage_models
wagtail/core/models/__init__.py
137
wagtail
{ "docstring": "\n Returns the list of subpage types, normalised as model classes.\n Throws ValueError if any entry in subpage_types cannot be recognised as a model name,\n or LookupError if a model does not exist (or is not a Page subclass).\n ", "language": "en", "n_whitespaces": 67, "n_words": 38, "vocab_size": 31 }
64
Python
44
d10f15e55806c6944827d801cd9c2d53f5da4186
__init__.py
73,792
14
84
clean_subpage_models
https://github.com/wagtail/wagtail.git
Reformat with black
273
0
16,113
18
1
8
def test_pandas_contiguous_dtypes(): pd = pytest.importorskip("pandas") df1 = pd.DataFrame([[1, 2.2], [3, 4.4]]) df2 = pd.DataFrame([[1.1, 2.2], [3.3, 4.4]]) assert sizeof(df2) < sizeof(df1)
dask/tests/test_sizeof.py
99
dask
{ "docstring": "2+ contiguous columns of the same dtype in the same DataFrame share the same\n surface thus have lower overhead\n ", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 15 }
21
Python
17
80dd84d46ef6b7befa1b416c4597c83ef81ef972
test_sizeof.py
157,273
5
75
test_pandas_contiguous_dtypes
https://github.com/dask/dask.git
Deflate sizeof() of duplicate references to pandas object types (#9776)
36
0
36,896
10
1
2
def namelengthsrc(self): return self["namelengthsrc"]
packages/python/plotly/plotly/graph_objs/bar/_hoverlabel.py
22
plotly.py
{ "docstring": "\n Sets the source reference on Chart Studio Cloud for\n `namelength`.\n\n The 'namelengthsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 84, "n_words": 27, "vocab_size": 25 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_hoverlabel.py
228,667
2
11
namelengthsrc
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
60,340
7
1
9
def list(self, directory='""', pattern='*'): name = 'LIST' typ, dat = self._simple_command(name, directory, pattern) return self._untagged_response(typ, dat, name)
python3.10.4/Lib/imaplib.py
69
XX-Net
{ "docstring": "List mailbox names in directory matching pattern.\n\n (typ, [data]) = <instance>.list(directory='\"\"', pattern='*')\n\n 'data' is list of LIST responses.\n ", "language": "en", "n_whitespaces": 39, "n_words": 18, "vocab_size": 18 }
17
Python
16
8198943edd73a363c266633e1aa5b2a9e9c9f526
imaplib.py
217,896
4
42
list
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
45
0
54,999
8
2
9
def deploy(self) -> Pipeline: [node.deploy() for node in self._incoming_edges] self._executor = create_executor_from_step_config( self._serialized_callable_factory, self._config ) return Pipeline(self)
python/ray/serve/pipeline/node.py
65
ray
{ "docstring": "Instantiates executors for this and all dependent nodes.\n\n After the pipeline is deployed, .call() and .call_async() can be used.\n ", "language": "en", "n_whitespaces": 33, "n_words": 19, "vocab_size": 18 }
17
Python
17
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
node.py
130,920
10
40
deploy
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
63
0
29,427
9
1
15
async def test_only_migrate_once(hass, utcnow): entity_registry = er.async_get(hass) aid = get_next_aid() old_light_entry = entity_registry.async_get_or_create( "light", "homekit_controller", f"homekit-00:00:00:00:00:00-{aid}-8", ) new_light_entry = entity_registry.async_get_or_create( "light", "homekit_controller", f"00:00:00:00:00:00_{aid}_8", ) await setup_test_component(hass, create_lightbulb_service_with_color_temp) assert ( entity_registry.async_get(old_light_entry.entity_id).unique_id == f"homekit-00:00:00:00:00:00-{aid}-8" ) assert ( entity_registry.async_get(new_light_entry.entity_id).unique_id == f"00:00:00:00:00:00_{aid}_8" )
tests/components/homekit_controller/test_light.py
163
core
{ "docstring": "Test a we handle migration happening after an upgrade and than a downgrade and then an upgrade.", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 14 }
39
Python
27
f23b1750e85f07091eb896a0b12b8f95e5646338
test_light.py
288,895
22
88
test_only_migrate_once
https://github.com/home-assistant/core.git
Migrate HomeKit Controller to use stable identifiers (#80064)
145
0
88,044
11
1
8
def expectation(self, expr, condition=None, evaluate=True, **kwargs): return _SubstituteRV._expectation(expr, condition, evaluate, **kwargs)
sympy/stats/stochastic_process_types.py
48
sympy
{ "docstring": "\n Computes expectation.\n\n Parameters\n ==========\n\n expr : RandomIndexedSymbol, Relational, Logic\n Condition for which expectation has to be computed. Must\n contain a RandomIndexedSymbol of the process.\n condition : Relational, Logic\n The given conditions under which computations should be done.\n\n Returns\n =======\n\n Expectation of the RandomIndexedSymbol.\n\n ", "language": "en", "n_whitespaces": 140, "n_words": 43, "vocab_size": 36 }
11
Python
11
7fe8e027ae1d7f683243c0229b961671a6cbb4c5
stochastic_process_types.py
197,542
2
33
expectation
https://github.com/sympy/sympy.git
Improved some documentation in the stats module
25
0
48,620
8
2
11
def slice_inputs(self, indices_dataset, inputs): dataset = tf.data.Dataset.zip( (indices_dataset, tf.data.Dataset.from_tensors(inputs).repeat()) )
keras/engine/data_adapter.py
62
keras
{ "docstring": "Slice inputs into a Dataset of batches.\n\n Given a Dataset of batch indices and the unsliced inputs,\n this step slices the inputs in a parallelized fashion\n and produces a dataset of input batches.\n\n Args:\n indices_dataset: A Dataset of batched indices\n inputs: A python data structure that contains the inputs, targets,\n and possibly sample weights.\n\n Returns:\n A Dataset of input batches matching the batch indices.\n ", "language": "en", "n_whitespaces": 144, "n_words": 64, "vocab_size": 41 }
10
Python
10
84afc5193d38057e2e2badf9c889ea87d80d8fbf
data_adapter.py
271,113
14
103
slice_inputs
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
42
0
80,693
15
1
2
def operation(self): return self["operation"]
packages/python/plotly/plotly/graph_objs/contour/_contours.py
22
plotly.py
{ "docstring": "\n Sets the constraint operation. \"=\" keeps regions equal to\n `value` \"<\" and \"<=\" keep regions less than `value` \">\" and\n \">=\" keep regions greater than `value` \"[]\", \"()\", \"[)\", and\n \"(]\" keep regions inside `value[0]` to `value[1]` \"][\", \")(\",\n \"](\", \")[\" keep regions outside `value[0]` to value[1]` Open\n vs. closed intervals make no difference to constraint display,\n but all versions are allowed for consistency with filter\n transforms.\n\n The 'operation' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['=', '<', '>=', '>', '<=', '[]', '()', '[)', '(]', '][',\n ')(', '](', ')[']\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 232, "n_words": 101, "vocab_size": 82 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_contours.py
229,518
2
11
operation
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
61,191
7
1
2
def test_generic_inline_model_admin_bad_fk_field(self):
tests/admin_checks/tests.py
13
django
{ "docstring": "\n A GenericInlineModelAdmin errors if the ct_fk_field points to a\n nonexistent field.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
2
Python
2
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,036
15
72
test_generic_inline_model_admin_bad_fk_field
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
9
0
51,842
6
12
19
def evaluate(self, expr, context): if isinstance(expr, string_types): if expr[0] in '\'"': result = expr[1:-1] else: if expr not in context: raise SyntaxError('unknown variable: %s' % expr) result = context[expr] else: assert isinstance(expr, dict) op = expr['op'] if op not in self.operations: raise NotImplementedError('op not implemented: %s' % op) elhs = expr['lhs'] erhs = expr['rhs'] if _is_literal(expr['lhs']) and _is_literal(expr['rhs']): raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs)) lhs = self.evaluate(elhs, context) rhs = self.evaluate(erhs, context) if ((elhs == 'python_version' or erhs == 'python_version') and op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')): lhs = NV(lhs) rhs = NV(rhs) elif elhs == 'python_version' and op in ('in', 'not in'): lhs = NV(lhs) rhs = _get_versions(rhs) result = self.operations[op](lhs, rhs) return result
pipenv/patched/notpip/_vendor/distlib/markers.py
395
pipenv
{ "docstring": "\n Evaluate a marker expression returned by the :func:`parse_requirement`\n function in the specified context.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 12 }
123
Python
73
f3166e673fe8d40277b804d35d77dcdb760fc3b3
markers.py
20,032
28
233
evaluate
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
463
0
3,185
16
9
18
def _is_function_class_equation(func_class, f, symbol): if f.is_Mul or f.is_Add: return all(_is_function_class_equation(func_class, arg, symbol) for arg in f.args) if f.is_Pow: if not f.exp.has(symbol): return _is_function_class_equation(func_class, f.base, symbol) else: return False if not f.has(symbol): return True if isinstance(f, func_class): try: g = Poly(f.args[0], symbol) return g.degree() <= 1 except PolynomialError: return False else: return False
sympy/solvers/solveset.py
185
sympy
{ "docstring": " Tests whether the equation is an equation of the given function class.\n\n The given equation belongs to the given function class if it is\n comprised of functions of the function class which are multiplied by\n or added to expressions independent of the symbol. In addition, the\n arguments of all such functions must be linear in the symbol as well.\n\n Examples\n ========\n\n >>> from sympy.solvers.solveset import _is_function_class_equation\n >>> from sympy import tan, sin, tanh, sinh, exp\n >>> from sympy.abc import x\n >>> from sympy.functions.elementary.trigonometric import TrigonometricFunction\n >>> from sympy.functions.elementary.hyperbolic import HyperbolicFunction\n >>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x)\n False\n >>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x)\n True\n >>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x)\n False\n >>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x)\n True\n >>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x)\n True\n ", "language": "en", "n_whitespaces": 190, "n_words": 123, "vocab_size": 73 }
52
Python
35
e0dc14eca132f37c5f49369eb4051eae37c9b119
solveset.py
197,067
19
119
_is_function_class_equation
https://github.com/sympy/sympy.git
Refactored import ordering in functions
192
0
48,321
14
6
21
def update_connected_interfaces(instance, created, raw=False, **kwargs): logger = logging.getLogger('netbox.wireless.wirelesslink') if raw: logger.debug(f"Skipping endpoint updates for imported wireless link {instance}") return if instance.interface_a.wireless_link != instance: logger.debug(f"Updating interface A for wireless link {instance}") instance.interface_a.wireless_link = instance instance.interface_a._link_peer = instance.interface_b instance.interface_a.save() if instance.interface_b.cable != instance: logger.debug(f"Updating interface B for wireless link {instance}") instance.interface_b.wireless_link = instance instance.interface_b._link_peer = instance.interface_a instance.interface_b.save() # Create/update cable paths if created: for interface in (instance.interface_a, instance.interface_b): create_cablepath([interface]) @receiver(post_delete, sender=WirelessLink)
netbox/wireless/signals.py
244
@receiver(post_delete, sender=WirelessLink)
netbox
{ "docstring": "\n When a WirelessLink is saved, save a reference to it on each connected interface.\n ", "language": "en", "n_whitespaces": 21, "n_words": 14, "vocab_size": 13 }
69
Python
46
951627093c11584ffb73ad2be2aef40a91a90934
signals.py
264,921
18
134
update_connected_interfaces
https://github.com/netbox-community/netbox.git
Test cleanup
177
1
77,914
12
19
39
def get_payload(self, i=None, decode=False): # Here is the logic table for this code, based on the email5.0.0 code: # i decode is_multipart result # ------ ------ ------------ ------------------------------ # None True True None # i True True None # None False True _payload (a list) # i False True _payload element i (a Message) # i False False error (not a list) # i True False error (not a list) # None False False _payload # None True False _payload decoded (bytes) # Note that Barry planned to factor out the 'decode' case, but that # isn't so easy now that we handle the 8 bit data, which needs to be # converted in both the decode and non-decode path. if self.is_multipart(): if decode: return None if i is None: return self._payload else: return self._payload[i] # For backward compatibility, Use isinstance and this error message # instead of the more logical is_multipart test. if i is not None and not isinstance(self._payload, list): raise TypeError('Expected list, got %s' % type(self._payload)) payload = self._payload # cte might be a Header, so for now stringify it. cte = str(self.get('content-transfer-encoding', '')).lower() # payload may be bytes here. if isinstance(payload, str): if utils._has_surrogates(payload): bpayload = payload.encode('ascii', 'surrogateescape') if not decode: try: payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace') except LookupError: payload = bpayload.decode('ascii', 'replace') elif decode: try: bpayload = payload.encode('ascii') except UnicodeError: # This won't happen for RFC compliant messages (messages # containing only ASCII code points in the unicode input). # If it does happen, turn the string into bytes in a way # guaranteed not to fail. bpayload = payload.encode('raw-unicode-escape') if not decode: return payload if cte == 'quoted-printable': return quopri.decodestring(bpayload) elif cte == 'base64': # XXX: this is a bit of a hack; decode_b should probably be factored # out somewhere, but I haven't figured out where yet. value, defects = decode_b(b''.join(bpayload.splitlines())) for defect in defects: self.policy.handle_defect(self, defect) return value elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'): in_file = BytesIO(bpayload) out_file = BytesIO() try: uu.decode(in_file, out_file, quiet=True) return out_file.getvalue() except uu.Error: # Some decoding problem return bpayload if isinstance(payload, str): return bpayload return payload
python3.10.4/Lib/email/message.py
534
XX-Net
{ "docstring": "Return a reference to the payload.\n\n The payload will either be a list object or a string. If you mutate\n the list object, you modify the message's payload in place. Optional\n i returns that index into the payload.\n\n Optional decode is a flag indicating whether the payload should be\n decoded or not, according to the Content-Transfer-Encoding header\n (default is False).\n\n When True and the message is not a multipart, the payload will be\n decoded if this header's value is `quoted-printable' or `base64'. If\n some other encoding is used, or the header is missing, or if the\n payload has bogus data (i.e. bogus base64 or uuencoded data), the\n payload is returned as-is.\n\n If the message is a multipart and the decode flag is True, then None\n is returned.\n ", "language": "en", "n_whitespaces": 228, "n_words": 127, "vocab_size": 73 }
350
Python
187
8198943edd73a363c266633e1aa5b2a9e9c9f526
message.py
223,818
45
301
get_payload
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
1,271
0
57,083
19
1
2
def notched(self): return self["notched"]
packages/python/plotly/plotly/graph_objs/_box.py
22
plotly.py
{ "docstring": "\n Determines whether or not notches are drawn. Notches displays a\n confidence interval around the median. We compute the\n confidence interval as median +/- 1.57 * IQR / sqrt(N), where\n IQR is the interquartile range and N is the sample size. If two\n boxes' notches do not overlap there is 95% confidence their\n medians differ. See\n https://sites.google.com/site/davidsstatistics/home/notched-\n box-plots for more info. Defaults to False unless `notchwidth`\n or `notchspan` is set.\n\n The 'notched' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 191, "n_words": 85, "vocab_size": 68 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_box.py
226,302
2
11
notched
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
57,975
7
8
34
def solve_biquadratic(f, g, opt): G = groebner([f, g]) if len(G) == 1 and G[0].is_ground: return None if len(G) != 2: raise SolveFailed x, y = opt.gens p, q = G if not p.gcd(q).is_ground: # not 0-dimensional raise SolveFailed p = Poly(p, x, expand=False) p_roots = [rcollect(expr, y) for expr in roots(p).keys()] q = q.ltrim(-1) q_roots = list(roots(q).keys()) solutions = [] for q_root in q_roots: for p_root in p_roots: solution = (p_root.subs(y, q_root), q_root) solutions.append(solution) return sorted(solutions, key=default_sort_key)
sympy/solvers/polysys.py
266
sympy
{ "docstring": "Solve a system of two bivariate quadratic polynomial equations.\n\n Parameters\n ==========\n\n f: a single Expr or Poly\n First equation\n g: a single Expr or Poly\n Second Equation\n opt: an Options object\n For specifying keyword arguments and generators\n\n Returns\n =======\n\n List[Tuple]\n A List of tuples. Solutions for symbols that satisfy the\n equations listed in seq.\n\n Examples\n ========\n\n >>> from sympy import Options, Poly\n >>> from sympy.abc import x, y\n >>> from sympy.solvers.polysys import solve_biquadratic\n >>> NewOption = Options((x, y), {'domain': 'ZZ'})\n\n >>> a = Poly(y**2 - 4 + x, y, x, domain='ZZ')\n >>> b = Poly(y*2 + 3*x - 7, y, x, domain='ZZ')\n >>> solve_biquadratic(a, b, NewOption)\n [(1/3, 3), (41/27, 11/9)]\n\n >>> a = Poly(y + x**2 - 3, y, x, domain='ZZ')\n >>> b = Poly(-y + x - 4, y, x, domain='ZZ')\n >>> solve_biquadratic(a, b, NewOption)\n [(7/2 - sqrt(29)/2, -sqrt(29)/2 - 1/2), (sqrt(29)/2 + 7/2, -1/2 + \\\n sqrt(29)/2)]\n ", "language": "en", "n_whitespaces": 258, "n_words": 149, "vocab_size": 97 }
77
Python
55
59d22b6bb7287613d598611027f640d068ca5748
polysys.py
196,425
20
170
solve_biquadratic
https://github.com/sympy/sympy.git
Moved imports to higher level
176
0
47,925
13
14
35
def _get_metric_object(self, metric, y_t, y_p): if metric is None: return None # Ok to have no metric for an output. # Convenience feature for selecting b/t binary, categorical, # and sparse categorical. if str(metric).lower() not in ["accuracy", "acc", "crossentropy", "ce"]: metric_obj = metrics_mod.get(metric) else: y_t_rank = len(y_t.shape.as_list()) y_p_rank = len(y_p.shape.as_list()) y_t_last_dim = y_t.shape.as_list()[-1] y_p_last_dim = y_p.shape.as_list()[-1] is_binary = y_p_last_dim == 1 is_sparse_categorical = ( y_t_rank < y_p_rank or y_t_last_dim == 1 and y_p_last_dim > 1 ) if str(metric).lower() in ["accuracy", "acc"]: if is_binary: metric_obj = metrics_mod.binary_accuracy elif is_sparse_categorical: metric_obj = metrics_mod.sparse_categorical_accuracy else: metric_obj = metrics_mod.categorical_accuracy else: if is_binary: metric_obj = metrics_mod.binary_crossentropy elif is_sparse_categorical: metric_obj = metrics_mod.sparse_categorical_crossentropy else: metric_obj = metrics_mod.categorical_crossentropy if isinstance(metric_obj, losses_mod.Loss): metric_obj._allow_sum_over_batch_size = ( True # pylint: disable=protected-access ) if not isinstance(metric_obj, metrics_mod.Metric): if isinstance(metric, str): metric_name = metric else: metric_name = get_custom_object_name(metric) if metric_name is None: raise ValueError( f"Metric should be a callable, received: {metric}" ) metric_obj = metrics_mod.MeanMetricWrapper( metric_obj, name=metric_name ) return metric_obj
keras/engine/compile_utils.py
428
keras
{ "docstring": "Converts user-supplied metric to a `Metric` object.\n\n Args:\n metric: A string, function, or `Metric` object.\n y_t: Sample of label.\n y_p: Sample of output.\n\n Returns:\n A `Metric` object.\n ", "language": "en", "n_whitespaces": 84, "n_words": 27, "vocab_size": 20 }
157
Python
89
84afc5193d38057e2e2badf9c889ea87d80d8fbf
compile_utils.py
271,042
45
256
_get_metric_object
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
764
0
80,679
16
1
19
def test_visibility_when_disabled(self) -> None: room_id = self.helper.create_room_as(self.user_id, tok=self.token) self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, body={"max_lifetime": one_day_ms}, tok=self.token, ) resp = self.helper.send(room_id=room_id, body="test", tok=self.token) self.reactor.advance(one_day_ms * 2 / 1000) self.get_event(room_id, resp["event_id"])
tests/rest/client/test_retention.py
160
synapse
{ "docstring": "Retention policies should be ignored when the retention feature is disabled.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
27
Python
25
4cc4229cd7a55d2556c798fecbb1c9660dc821c8
test_retention.py
248,358
12
102
test_visibility_when_disabled
https://github.com/matrix-org/synapse.git
Prevent expired events from being filtered out when retention is disabled (#12611) Co-authored-by: Richard van der Hoff <[email protected]> Co-authored-by: Patrick Cloke <[email protected]>
120
0
72,235
11
11
12
def __eq__(self, other): if self is other: return True if not isinstance(other, Basic): return self._do_eq_sympify(other) # check for pure number expr if not (self.is_Number and other.is_Number) and ( type(self) != type(other)): return False a, b = self._hashable_content(), other._hashable_content() if a != b: return False # check number *in* an expression for a, b in zip(a, b): if not isinstance(a, Basic): continue if a.is_Number and type(a) != type(b): return False return True
sympy/core/basic.py
193
sympy
{ "docstring": "Return a boolean indicating whether a == b on the basis of\n their symbolic trees.\n\n This is the same as a.compare(b) == 0 but faster.\n\n Notes\n =====\n\n If a class that overrides __eq__() needs to retain the\n implementation of __hash__() from a parent class, the\n interpreter must be told this explicitly by setting\n __hash__ : Callable[[object], int] = <ParentClass>.__hash__.\n Otherwise the inheritance of __hash__() will be blocked,\n just as if __hash__ had been explicitly set to None.\n\n References\n ==========\n\n from http://docs.python.org/dev/reference/datamodel.html#object.__hash__\n ", "language": "en", "n_whitespaces": 179, "n_words": 81, "vocab_size": 64 }
71
Python
45
f5ef4e62e5cb5637f2bf2af0ee73e43c58c33c25
basic.py
195,887
17
120
__eq__
https://github.com/sympy/sympy.git
core/basic: Basic.__eq__ only performs user defined conversions core/evalf: no longer create unneeded Tuples with None arguments Fixes #22581 only use _sympify in __eq__ when needed defined _converter and updated Boolean comparisons removed try-except for sympify because it should always be possible at that point completely split sympy and external converters checking entire mro use the relevant part of sympify directly type from copy paste removed ambiguous try-except blocks changed resolve order for sympy/user converters and mro updated documentation typo
253
0
47,468
11
1
4
def check_for_updates(): version_message = get_update_status() print(version_message)
spotdl/utils/console.py
28
spotify-downloader
{ "docstring": "\n Check for updates to the current version.\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
6
Python
6
deca40c2e26afed62e1f9ec4be14aff9e125929b
console.py
30,421
3
14
check_for_updates
https://github.com/spotDL/spotify-downloader.git
moved console actions to a new file
15
0
5,565
8
1
4
def is_packaged_application() -> bool: return cfg.LOGGING_APP_NAME == "gst_packaged"
openbb_terminal/terminal_helper.py
26
OpenBBTerminal
{ "docstring": "Tell whether or not it is a packaged version (Windows or Mac installer).\n\n\n Returns:\n bool: If the application is packaged\n ", "language": "en", "n_whitespaces": 33, "n_words": 20, "vocab_size": 17 }
8
Python
8
eb244a1d8d01e1ad93c5dc349656aa4170397f90
terminal_helper.py
286,154
8
13
is_packaged_application
https://github.com/OpenBB-finance/OpenBBTerminal.git
Docker : building + publishing (#2904) * fixed integrated test test_stocks_ba.openbb * fixed integrated test test_stocks_dd.openbb * improved and centralised the check * fix lint * Docker : update ci + build files * Docker : update build and CD * Docker : update CD * Docker : test * Docker : test CD * Docker : test CD * Docker : rename `build.sh` * Docker : tests CD * Docker : test CD * Docker : update CD + build * Docker : fix CD * Docker : fix CD * Docker : build * Docker : test CD * Docker : CD * Docker : CD * Docker : test * Docker : test CD * Docker : test CD * Docker : test CD * Docker : test CD * Docker : test CD * Docker : test CD * Docker : test CD * Docker : test CD * Docker : build + CD * Docker : test CD * Docker : test CD * Docker : test CD * Docker : test CD * Docker : build * Docker : test CD * Docker : build + cd * Moving `scripts` * Checkout `helper_funcs.py` from main * Docker : remove test file with alpine * fixing readme errors * fixing missed readme errors * Docker : build * Logging : handle docker app name * Docker : test CD * Docker : cd * Doc * Doc * Doc : linting * Doc * Docker * Doc * Fixing `terminal_controller` * Linting * Doc : fixing links * Version 1.9.1 * Docker : fix name * Doc : add volumes in command Co-authored-by: hjoaquim <[email protected]> Co-authored-by: James Simmons <[email protected]> Co-authored-by: Colin Delahunty <[email protected]>
14
0
85,600
7
4
25
def _optimize_stages(self): context = DatasetContext.get_current() if not context.optimize_fuse_stages: self._optimized_stages = self._stages return # This dummy dataset will be used to get a set of optimized stages. dummy_ds = Dataset( ExecutionPlan(BlockList([], []), DatasetStats(stages={}, parent=None)), 0, True, used_from_dataset_pipeline=True, ) # Apply all pipeline operations to the dummy dataset. for stage in self._stages: dummy_ds = stage(dummy_ds) # Get the optimized stages. _, _, stages = dummy_ds._plan._optimize() # Apply these optimized stages to the datasets underlying the pipeline. # These optimized stages will be executed by the PipelineExecutor. optimized_stages = [] for stage in stages: optimized_stages.append( lambda ds, stage=stage: Dataset( ds._plan.with_stage(stage), ds._epoch, True, used_from_dataset_pipeline=True, ) ) self._optimized_stages = optimized_stages
python/ray/data/dataset_pipeline.py
217
ray
{ "docstring": "Optimize this pipeline, fusing stages together as possible.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
105
Python
67
45ba0e3cacbf4f38b9724437798c75341c2ddc7c
dataset_pipeline.py
124,671
25
138
_optimize_stages
https://github.com/ray-project/ray.git
Object GC for block splitting inside the dataset splitting (#26196) The pipeline will spill objects when splitting the dataset into multiple equal parts. Co-authored-by: Ubuntu <[email protected]>
415
0
27,652
15
2
13
def vertices_loss(criterion_vertices, pred_vertices, gt_vertices, has_smpl): pred_vertices_with_shape = pred_vertices[has_smpl == 1] gt_vertices_with_shape = gt_vertices[has_smpl == 1] if len(gt_vertices_with_shape) > 0: return criterion_vertices(pred_vertices_with_shape, gt_vertices_with_shape) else: return paddle.to_tensor([1.]).fill_(0.) @register @serializable
ppdet/modeling/losses/pose3d_loss.py
99
@register @serializable
PaddleDetection
{ "docstring": "\n Compute per-vertex loss if vertex annotations are available.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
27
Python
23
d4e34fe165c09db65fd00113708be1b711ac957c
pose3d_loss.py
211,434
8
61
vertices_loss
https://github.com/PaddlePaddle/PaddleDetection.git
pose3d metro modeling (#6612) * pose3d metro modeling * delete extra comments
87
1
53,098
13
3
6
def step(self): if self._implements_method("_train") and log_once("_train"): raise DeprecationWarning( "Trainable._train is deprecated and is now removed. Override " "Trainable.step instead." ) raise NotImplementedError
python/ray/tune/trainable.py
55
ray
{ "docstring": "Subclasses should override this to implement train().\n\n The return value will be automatically passed to the loggers. Users\n can also return `tune.result.DONE` or `tune.result.SHOULD_CHECKPOINT`\n as a key to manually trigger termination or checkpointing of this\n trial. Note that manual checkpointing only works when subclassing\n Trainables.\n\n .. versionadded:: 0.8.7\n\n Returns:\n A dict that describes training progress.\n\n ", "language": "en", "n_whitespaces": 122, "n_words": 55, "vocab_size": 48 }
22
Python
19
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
trainable.py
132,815
7
27
step
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
95
0
29,807
11
1
7
def print_help(self): help_text = console.print(text=help_text, menu="Cryptocurrency - Discovery")
gamestonk_terminal/cryptocurrency/discovery/discovery_controller.py
40
OpenBBTerminal
{ "docstring": "Print help[cmds]\n[src][CoinGecko][/src]\n cgtrending trending coins\n cgvoted most voted coins\n cgvisited most visited coins\n cgvolume coins with highest volume\n cgrecently recently added\n cgsentiment coins with most positive sentiment\n cggainers top gainers - coins which price gained the most in given period\n cglosers top losers - coins which price dropped the most in given period\n cgyfarms top yield farms\n cgdefi top defi protocols\n cgdex top decentralized exchanges\n cgnft top non fungible tokens\n[src][CoinPaprika][/src]\n cpsearch search for coins\n[src][CoinMarketCap][/src]\n cmctop top coins[/cmds]\n", "language": "en", "n_whitespaces": 246, "n_words": 80, "vocab_size": 55 }
8
Python
8
82747072c511beb1b2672846ae2ee4aec53eb562
discovery_controller.py
281,450
21
21
print_help
https://github.com/OpenBB-finance/OpenBBTerminal.git
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: james <[email protected]> Co-authored-by: jose-donato <[email protected]>
30
0
83,767
9
9
5
def _rpc_stats(self) -> Dict[str, Any]:
freqtrade/rpc/rpc.py
22
freqtrade
{ "docstring": "\n Generate generic stats for trades in database\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
5
Python
5
be84a028c18bdbfd58dea8a51b6d59b77b672a8c
rpc.py
148,776
21
272
_rpc_stats
https://github.com/freqtrade/freqtrade.git
Avoid mixed types in the api for /stats
12
0
34,332
6
4
31
def preprocess_samples(self): r # sort items based on the sequence length in ascending order text_ignore_idx, text_keep_idx = self.sort_and_filter_by_length(self.text_lengths, self.min_text_len, self.max_text_len) audio_ignore_idx, audio_keep_idx = self.sort_and_filter_by_length(self.audio_lengths, self.min_audio_len, self.max_audio_len) keep_idx = list(set(audio_keep_idx) | set(text_keep_idx)) ignore_idx = list(set(audio_ignore_idx) | set(text_ignore_idx)) samples = [] for idx in keep_idx: samples.append(self.samples[idx]) if len(samples) == 0: raise RuntimeError(" [!] No samples left") # shuffle batch groups # create batches with similar length items # the larger the `batch_group_size`, the higher the length variety in a batch. samples = self.create_buckets(samples, self.batch_group_size) # update items to the new sorted items self.samples = samples if self.verbose: print(" | > Preprocessing samples") print(" | > Max text length: {}".format(np.max(self.text_lengths))) print(" | > Min text length: {}".format(np.min(self.text_lengths))) print(" | > Avg text length: {}".format(np.mean(self.text_lengths))) print(" | ") print(" | > Max audio length: {}".format(np.max(self.audio_lengths))) print(" | > Min audio length: {}".format(np.min(self.audio_lengths))) print(" | > Avg audio length: {}".format(np.mean(self.audio_lengths))) print(f" | > Num. instances discarded samples: {len(ignore_idx)}") print(" | > Batch group size: {}.".format(self.batch_group_size))
TTS/tts/datasets/dataset.py
434
TTS
{ "docstring": "Sort `items` based on text length or audio length in ascending order. Filter out samples out or the length\n range.\n ", "language": "en", "n_whitespaces": 34, "n_words": 20, "vocab_size": 16 }
160
Python
95
176b712c1a40cf630da9a77f1826836723c40fde
dataset.py
262,050
26
250
preprocess_samples
https://github.com/coqui-ai/TTS.git
Refactor TTSDataset ⚡️
403
0
77,109
14
1
2
def selectedpoints(self): return self["selectedpoints"]
packages/python/plotly/plotly/graph_objs/_bar.py
22
plotly.py
{ "docstring": "\n Array containing integer indices of selected points. Has an\n effect only for traces that support selections. Note that an\n empty array means an empty selection where the `unselected` are\n turned on for all points, whereas, any other non-array values\n means no selection all where the `selected` and `unselected`\n styles have no effect.\n\n The 'selectedpoints' property accepts values of any type\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 141, "n_words": 63, "vocab_size": 48 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_bar.py
226,180
2
11
selectedpoints
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
57,853
7
5
24
def make_release_tree(self, base_dir, files): # Create all the directories under 'base_dir' necessary to # put 'files' there; the 'mkpath()' is just so we don't die # if the manifest happens to be empty. self.mkpath(base_dir) dir_util.create_tree(base_dir, files, dry_run=self.dry_run) # And walk over the list of files, either making a hard link (if # os.link exists) to each one that doesn't already exist in its # corresponding location under 'base_dir', or copying each file # that's out-of-date in 'base_dir'. (Usually, all files will be # out-of-date, because by default we blow away 'base_dir' when # we're done making the distribution archives.) if hasattr(os, 'link'): # can make hard links on this system link = 'hard' msg = "making hard links in %s..." % base_dir else: # nope, have to copy link = None msg = "copying files to %s..." % base_dir if not files: log.warn("no files to distribute -- empty manifest?") else: log.info(msg) for file in files: if not os.path.isfile(file): log.warn("'%s' not a regular file -- skipping", file) else: dest = os.path.join(base_dir, file) self.copy_file(file, dest, link=link) self.distribution.metadata.write_pkg_info(base_dir)
python3.10.4/Lib/distutils/command/sdist.py
235
XX-Net
{ "docstring": "Create the directory tree that will become the source\n distribution archive. All directories implied by the filenames in\n 'files' are created under 'base_dir', and then we hard link or copy\n (if hard linking is unavailable) those files into place.\n Essentially, this duplicates the developer's source tree, but in a\n directory named after the distribution, containing only the files\n to be distributed.\n ", "language": "en", "n_whitespaces": 111, "n_words": 61, "vocab_size": 51 }
175
Python
117
8198943edd73a363c266633e1aa5b2a9e9c9f526
sdist.py
222,813
20
134
make_release_tree
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
468
0
56,754
14
1
2
def run_after_hook(self): return None
wagtail/admin/views/generic/mixins.py
16
wagtail
{ "docstring": "\n Define how to run the hooks after the operation is executed.\n The `self.run_hook(hook_name, *args, **kwargs)` from HookResponseMixin\n can be utilised to call the hooks.\n\n If this method returns a response, it will be returned as the view\n response immediately after the operation finishes, skipping the default\n response.\n ", "language": "en", "n_whitespaces": 97, "n_words": 47, "vocab_size": 38 }
4
Python
4
bc1a2ab1148b0f27cfd1435f8cb0e44c2721102d
mixins.py
77,224
2
8
run_after_hook
https://github.com/wagtail/wagtail.git
Extract mixins from Snippet views and use it in generic create/edit/delete views (#8361)
18
0
16,643
6
5
17
def __setitem__(self, name, val): max_count = self.policy.header_max_count(name) if max_count: lname = name.lower() found = 0 for k, v in self._headers: if k.lower() == lname: found += 1 if found >= max_count: raise ValueError("There may be at most {} {} headers " "in a message".format(max_count, name)) self._headers.append(self.policy.header_store_parse(name, val))
python3.10.4/Lib/email/message.py
146
XX-Net
{ "docstring": "Set the value of a header.\n\n Note: this does not overwrite an existing header with the same field\n name. Use __delitem__() first to delete any existing headers.\n ", "language": "en", "n_whitespaces": 49, "n_words": 27, "vocab_size": 25 }
47
Python
39
8198943edd73a363c266633e1aa5b2a9e9c9f526
message.py
223,837
12
89
__setitem__
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
224
0
57,096
19
7
14
def __call__(self, *i): # list indices can be Integer or int; leave this # as it is (don't test or convert it) because this # gets called a lot and should be fast if len(i) == 1: i = i[0] if not isinstance(i, Iterable): i = as_int(i) if i < 0 or i > self.size: raise TypeError( "{} should be an integer between 0 and {}" .format(i, self.size-1)) return self._array_form[i] # P([a, b, c]) if len(i) != self.size: raise TypeError( "{} should have the length {}.".format(i, self.size)) return [i[j] for j in self._array_form] # P(1, 2, 3) return self*Permutation(Cycle(*i), size=self.size)
sympy/combinatorics/permutations.py
204
sympy
{ "docstring": "\n Allows applying a permutation instance as a bijective function.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> p = Permutation([[2, 0], [3, 1]])\n >>> p.array_form\n [2, 3, 0, 1]\n >>> [p(i) for i in range(4)]\n [2, 3, 0, 1]\n\n If an array is given then the permutation selects the items\n from the array (i.e. the permutation is applied to the array):\n\n >>> from sympy.abc import x\n >>> p([x, 1, 0, x**2])\n [0, x**2, x, 1]\n ", "language": "en", "n_whitespaces": 181, "n_words": 75, "vocab_size": 52 }
100
Python
73
498015021131af4dbb07eb110e5badaba8250c7b
permutations.py
196,174
15
126
__call__
https://github.com/sympy/sympy.git
Updated import locations
348
0
47,674
17
12
56
def transform(self, X): check_is_fitted(self) X = self._validate_input(X, in_fit=False) statistics = self.statistics_ if X.shape[1] != statistics.shape[0]: raise ValueError( "X has %d features per sample, expected %d" % (X.shape[1], self.statistics_.shape[0]) ) # compute mask before eliminating invalid features missing_mask = _get_mask(X, self.missing_values) # Decide whether to keep missing features if self.strategy == "constant" or self.keep_empty_features: valid_statistics = statistics valid_statistics_indexes = None else: # same as np.isnan but also works for object dtypes invalid_mask = _get_mask(statistics, np.nan) valid_mask = np.logical_not(invalid_mask) valid_statistics = statistics[valid_mask] valid_statistics_indexes = np.flatnonzero(valid_mask) if invalid_mask.any(): invalid_features = np.arange(X.shape[1])[invalid_mask] if self.verbose != "deprecated" and self.verbose: # use feature names warning if features are provided if hasattr(self, "feature_names_in_"): invalid_features = self.feature_names_in_[invalid_features] warnings.warn( "Skipping features without any observed values:" f" {invalid_features}. At least one non-missing value is needed" f" for imputation with strategy='{self.strategy}'." ) X = X[:, valid_statistics_indexes] # Do actual imputation if sp.issparse(X): if self.missing_values == 0: raise ValueError( "Imputation not possible when missing_values " "== 0 and input is sparse. Provide a dense " "array instead." ) else: # if no invalid statistics are found, use the mask computed # before, else recompute mask if valid_statistics_indexes is None: mask = missing_mask.data else: mask = _get_mask(X.data, self.missing_values) indexes = np.repeat( np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr) )[mask] X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False) else: # use mask computed before eliminating invalid mask if valid_statistics_indexes is None: mask_valid_features = missing_mask else: mask_valid_features = missing_mask[:, valid_statistics_indexes] n_missing = np.sum(mask_valid_features, axis=0) values = np.repeat(valid_statistics, n_missing) coordinates = np.where(mask_valid_features.transpose())[::-1] X[coordinates] = values X_indicator = super()._transform_indicator(missing_mask) return super()._concatenate_indicator(X, X_indicator)
sklearn/impute/_base.py
642
scikit-learn
{ "docstring": "Impute all missing values in `X`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n X_imputed : {ndarray, sparse matrix} of shape \\\n (n_samples, n_features_out)\n `X` with imputed values.\n ", "language": "en", "n_whitespaces": 123, "n_words": 37, "vocab_size": 33 }
249
Python
160
d8fa96c29828e3ca79ddd5d7466521ac4d95213c
_base.py
261,576
56
388
transform
https://github.com/scikit-learn/scikit-learn.git
ENH keep features with all missing values during imputation (#24770) Co-authored-by: Chiara Marmo <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]> Co-authored-by: Vitor SRG <[email protected]> Fixes https://github.com/scikit-learn/scikit-learn/pull/16695 Fixes https://github.com/scikit-learn/scikit-learn/issues/16426 Fixes https://github.com/scikit-learn/scikit-learn/issues/16977
1,085
0
76,872
20
2
4
def get_running_loop(): # NOTE: this function is implemented in C (see _asynciomodule.c) loop = _get_running_loop() if loop is None: raise RuntimeError('no running event loop') return loop
python3.10.4/Lib/asyncio/events.py
43
XX-Net
{ "docstring": "Return the running event loop. Raise a RuntimeError if there is none.\n\n This function is thread-specific.\n ", "language": "en", "n_whitespaces": 23, "n_words": 16, "vocab_size": 15 }
26
Python
23
8198943edd73a363c266633e1aa5b2a9e9c9f526
events.py
220,442
5
22
get_running_loop
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
48
0
55,997
10
13
44
def _predict_faces(self) -> None: faces_seen = 0 consecutive_no_faces = 0 batch: List[ConvertItem] = [] is_amd = get_backend() == "amd" while True: item: Union[Literal["EOF"], ConvertItem] = self._in_queue.get() if item == "EOF": logger.debug("EOF Received") break logger.trace("Got from queue: '%s'", item.inbound.filename) # type:ignore faces_count = len(item.inbound.detected_faces) # Safety measure. If a large stream of frames appear that do not have faces, # these will stack up into RAM. Keep a count of consecutive frames with no faces. # If self._batchsize number of frames appear, force the current batch through # to clear RAM. consecutive_no_faces = consecutive_no_faces + 1 if faces_count == 0 else 0 self._faces_count += faces_count if faces_count > 1: self._verify_output = True logger.verbose("Found more than one face in an image! '%s'", # type:ignore os.path.basename(item.inbound.filename)) self.load_aligned(item) faces_seen += faces_count batch.append(item) if faces_seen < self._batchsize and consecutive_no_faces < self._batchsize: logger.trace("Continuing. Current batchsize: %s, " # type:ignore "consecutive_no_faces: %s", faces_seen, consecutive_no_faces) continue if batch: logger.trace("Batching to predictor. Frames: %s, Faces: %s", # type:ignore len(batch), faces_seen) feed_batch = [feed_face for item in batch for feed_face in item.feed_faces] if faces_seen != 0: feed_faces = self._compile_feed_faces(feed_batch) batch_size = None if is_amd and feed_faces.shape[0] != self._batchsize: logger.verbose("Fallback to BS=1") # type:ignore batch_size = 1 predicted = self._predict(feed_faces, batch_size) else: predicted = np.array([]) self._queue_out_frames(batch, predicted) consecutive_no_faces = 0 faces_seen = 0 batch = [] logger.debug("Putting EOF") self._out_queue.put("EOF") logger.debug("Load queue complete")
scripts/convert.py
509
faceswap
{ "docstring": " Run Prediction on the Faceswap model in a background thread.\n\n Reads from the :attr:`self._in_queue`, prepares images for prediction\n then puts the predictions back to the :attr:`self.out_queue`\n ", "language": "en", "n_whitespaces": 48, "n_words": 26, "vocab_size": 23 }
221
Python
138
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
convert.py
101,368
51
300
_predict_faces
https://github.com/deepfakes/faceswap.git
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
919
0
20,783
16
3
20
def prepare(self, timestamp, duration, organization): reports = {} for project in organization.project_set.all(): reports[project.id] = self.__encode(self.build(timestamp, duration, project)) if not reports: # XXX: HMSET requires at least one key/value pair, so we need to # protect ourselves here against organizations that were created # but haven't set up any projects yet. return with self.cluster.map() as client: key = self.__make_key(timestamp, duration, organization) client.hmset(key, reports) client.expire(key, self.ttl)
src/sentry/tasks/reports.py
152
sentry
{ "docstring": "\n For every project belonging to the organization, serially build a report and zlib compress it\n After this completes, store it in Redis with an expiration\n ", "language": "en", "n_whitespaces": 47, "n_words": 25, "vocab_size": 24 }
64
Python
58
9731253cf103cfdced62c36753a0e957ab29d705
reports.py
85,595
10
95
prepare
https://github.com/getsentry/sentry.git
feat: Add instrumentation to Celery tasks for weekly reports (#38561) It seems that if we don't include the parent celery task, it will not trace any of the children tasks. This enables further investigation as to why the building of the report is slow. Fixes WOR-2159.
187
0
18,014
12
1
16
def test_file_not_found_error(self): response = self.get_success_response( self.organization.slug, self.project.slug, qs_params={"file": self.filepath} ) assert response.data["config"] == self.expected_configurations(self.code_mapping1) assert not response.data["sourceUrl"] # XXX: This depends on what was the last attempted code mapping assert response.data["error"] == "stack_root_mismatch" assert response.data["integrations"] == [serialized_integration(self.integration)] # XXX: This depends on what was the last attempted code mapping assert ( response.data["attemptedUrl"] == f"https://example.com/{self.repo.name}/blob/master/src/sentry/src/sentry/utils/safe.py" )
tests/sentry/api/endpoints/test_project_stacktrace_link.py
171
sentry
{ "docstring": "File matches code mapping but it cannot be found in the source repository.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
55
Python
35
2e0d2c856eb17a842c67d88363bed92c99578c20
test_project_stacktrace_link.py
88,596
12
95
test_file_not_found_error
https://github.com/getsentry/sentry.git
ref(stacktrace_link): Add more than one code mapping in the tests (#41409) Include more than one code mapping in the setup code. Cleaning up a bit how we tag the transactions. This makes the PR for WOR-2395 a little easier to read.
165
0
18,415
12
3
4
def has_pretrained_cfg_key(model_name, cfg_key): if model_name in _model_pretrained_cfgs and cfg_key in _model_pretrained_cfgs[model_name]: return True return False
timm/models/registry.py
39
pytorch-image-models
{ "docstring": " Query model default_cfgs for existence of a specific key.\n ", "language": "en", "n_whitespaces": 13, "n_words": 9, "vocab_size": 9 }
15
Python
13
abc9ba254430ef971ea3dbd12f2b4f1969da55be
registry.py
331,645
4
24
has_pretrained_cfg_key
https://github.com/huggingface/pytorch-image-models.git
Transitioning default_cfg -> pretrained_cfg. Improving handling of pretrained_cfg source (HF-Hub, files, timm config, etc). Checkpoint handling tweaks.
31
0
119,879
8
1
57
def is_ccl_available(): return _is_ccl_available # docstyle-ignore DATASETS_IMPORT_ERROR = # docstyle-ignore TOKENIZERS_IMPORT_ERROR = # docstyle-ignore SENTENCEPIECE_IMPORT_ERROR = # docstyle-ignore PROTOBUF_IMPORT_ERROR = # docstyle-ignore FAISS_IMPORT_ERROR = # docstyle-ignore PYTORCH_IMPORT_ERROR = # docstyle-ignore PYTORCH_IMPORT_ERROR_WITH_TF = # docstyle-ignore TF_IMPORT_ERROR_WITH_PYTORCH = # docstyle-ignore SKLEARN_IMPORT_ERROR = # docstyle-ignore TENSORFLOW_IMPORT_ERROR = # docstyle-ignore DETECTRON2_IMPORT_ERROR = # docstyle-ignore FLAX_IMPORT_ERROR = # docstyle-ignore FTFY_IMPORT_ERROR = # docstyle-ignore SCATTER_IMPORT_ERROR = # docstyle-ignore PYTORCH_QUANTIZATION_IMPORT_ERROR = # docstyle-ignore TENSORFLOW_PROBABILITY_IMPORT_ERROR = # docstyle-ignore TENSORFLOW_TEXT_IMPORT_ERROR = # docstyle-ignore PANDAS_IMPORT_ERROR = # docstyle-ignore PHONEMIZER_IMPORT_ERROR = # docstyle-ignore SACREMOSES_IMPORT_ERROR = # docstyle-ignore SCIPY_IMPORT_ERROR = # docstyle-ignore SPEECH_IMPORT_ERROR = # docstyle-ignore TIMM_IMPORT_ERROR = # docstyle-ignore VISION_IMPORT_ERROR = # docstyle-ignore PYTESSERACT_IMPORT_ERROR = # docstyle-ignore PYCTCDECODE_IMPORT_ERROR = # docstyle-ignore ACCELERATE_IMPORT_ERROR = # docstyle-ignore CCL_IMPORT_ERROR = BACKENDS_MAPPING = OrderedDict( [ ("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)), ("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)), ("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)), ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), ("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)), ("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)), ("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)), ("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)), ("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)), ("sacremoses", (is_sacremoses_available, SACREMOSES_IMPORT_ERROR)), ("scatter", (is_scatter_available, SCATTER_IMPORT_ERROR)), ("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)), ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), ("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)), ("speech", (is_speech_available, SPEECH_IMPORT_ERROR)), ("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)), ("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)), ("tensorflow_text", (is_tensorflow_text_available, TENSORFLOW_TEXT_IMPORT_ERROR)), ("timm", (is_timm_available, TIMM_IMPORT_ERROR)), ("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)), ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), ("vision", (is_vision_available, VISION_IMPORT_ERROR)), ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), ("accelerate", (is_accelerate_available, ACCELERATE_IMPORT_ERROR)), ("oneccl_bind_pt", (is_ccl_available, CCL_IMPORT_ERROR)), ] )
src/transformers/utils/import_utils.py
611
transformers
{ "docstring": "\n{0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with:\n```\npip install datasets\n```\nIn a notebook or a colab, you can install it by executing a cell with\n```\n!pip install datasets\n```\nthen restarting your kernel.\n\nNote that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current\nworking directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or\nthat python file if that's the case.\n\n{0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with:\n```\npip install tokenizers\n```\nIn a notebook or a colab, you can install it by executing a cell with\n```\n!pip install tokenizers\n```\n\n{0} requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the\ninstallation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones\nthat match your environment.\n\n{0} requires the protobuf library but it was not found in your environment. Checkout the instructions on the\ninstallation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones\nthat match your environment.\n\n{0} requires the faiss library but it was not found in your environment. Checkout the instructions on the\ninstallation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones\nthat match your environment.\n\n{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.\n\n{0} requires the PyTorch library but it was not found in your environment.\nHowever, we were able to find a TensorFlow installation. TensorFlow classes begin\nwith \"TF\", but are otherwise identically named to our PyTorch classes. This\nmeans that the TF equivalent of the class you tried to import would be \"TF{0}\".\nIf you want to use TensorFlow, please use TF classes instead!\n\nIf you really do want to use PyTorch please go to\nhttps://pytorch.org/get-started/locally/ and follow the instructions that\nmatch your environment.\n\n{0} requires the TensorFlow library but it was not found in your environment.\nHowever, we were able to find a PyTorch installation. PyTorch classes do not begin\nwith \"TF\", but are otherwise identically named to our TF classes.\nIf you want to use PyTorch, please use those classes instead!\n\nIf you really do want to use TensorFlow, please follow the instructions on the\ninstallation page https://www.tensorflow.org/install that match your environment.\n\n{0} requires the scikit-learn library but it was not found in your environment. You can install it with:\n```\npip install -U scikit-learn\n```\nIn a notebook or a colab, you can install it by executing a cell with\n```\n!pip install -U scikit-learn\n```\n\n{0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://www.tensorflow.org/install and follow the ones that match your environment.\n\n{0} requires the detectron2 library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md and follow the ones\nthat match your environment.\n\n{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://github.com/google/flax and follow the ones that match your environment.\n\n{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the\ninstallation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones\nthat match your environment.\n\n{0} requires the torch-scatter library but it was not found in your environment. You can install it with pip as\nexplained here: https://github.com/rusty1s/pytorch_scatter.\n\n{0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip:\n`pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com`\n\n{0} requires the tensorflow_probability library but it was not found in your environment. You can install it with pip as\nexplained here: https://github.com/tensorflow/probability.\n\n{0} requires the tensorflow_text library but it was not found in your environment. You can install it with pip as\nexplained here: https://www.tensorflow.org/text/guide/tf_text_intro.\n\n{0} requires the pandas library but it was not found in your environment. You can install it with pip as\nexplained here: https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html.\n\n{0} requires the phonemizer library but it was not found in your environment. You can install it with pip:\n`pip install phonemizer`\n\n{0} requires the sacremoses library but it was not found in your environment. You can install it with pip:\n`pip install sacremoses`\n\n{0} requires the scipy library but it was not found in your environment. You can install it with pip:\n`pip install scipy`\n\n{0} requires the torchaudio library but it was not found in your environment. You can install it with pip:\n`pip install torchaudio`\n\n{0} requires the timm library but it was not found in your environment. You can install it with pip:\n`pip install timm`\n\n{0} requires the PIL library but it was not found in your environment. You can install it with pip:\n`pip install pillow`\n\n{0} requires the PyTesseract library but it was not found in your environment. You can install it with pip:\n`pip install pytesseract`\n\n{0} requires the pyctcdecode library but it was not found in your environment. You can install it with pip:\n`pip install pyctcdecode`\n\n{0} requires the accelerate library but it was not found in your environment. You can install it with pip:\n`pip install accelerate`\n\n{0} requires the torch ccl library but it was not found in your environment. You can install it with pip:\n`pip install oneccl_bind_pt -f https://developer.intel.com/ipex-whl-stable`\n", "language": "en", "n_whitespaces": 824, "n_words": 917, "vocab_size": 167 }
200
Python
118
2b81f72be9fa6d69734ae27cfcbfd72b04988fe4
import_utils.py
32,535
2
6
is_ccl_available
https://github.com/huggingface/transformers.git
start from 1.12, torch_ccl is renamed as oneccl_bindings_for_pytorch … (#18229) * start from 1.12, torch_ccl is renamed as oneccl_bindings_for_pytorch and should import it before use Signed-off-by: Wang, Yi A <[email protected]> * add doc for perf_train_cpu_many Signed-off-by: Wang, Yi A <[email protected]> * update doc Signed-off-by: Wang, Yi A <[email protected]>
360
0
5,947
9
2
25
def test_json_get_subscribers(self) -> None: stream_name = gather_subscriptions(self.user_profile)[0][0]["name"] stream_id = get_stream(stream_name, self.user_profile.realm).id expected_subscribers = gather_subscriptions(self.user_profile, include_subscribers=True)[0][ 0 ]["subscribers"] result = self.client_get(f"/json/streams/{stream_id}/members") result_dict = self.assert_json_success(result) self.assertIn("subscribers", result_dict) self.assertIsInstance(result_dict["subscribers"], list) subscribers: List[int] = [] for subscriber in result_dict["subscribers"]: self.assertIsInstance(subscriber, int) subscribers.append(subscriber) self.assertEqual(set(subscribers), set(expected_subscribers))
zerver/tests/test_subs.py
231
zulip
{ "docstring": "\n json_get_subscribers in zerver/views/streams.py\n also returns the list of subscribers for a stream, when requested.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
40
Python
35
a142fbff85302c5e3acb2e204eca2e9c75dbc74b
test_subs.py
84,134
19
141
test_json_get_subscribers
https://github.com/zulip/zulip.git
tests: Refactor away result.json() calls with helpers. Signed-off-by: Zixuan James Li <[email protected]>
157
0
17,779
12
9
10
def _fix_compile_args(self, output_dir, macros, include_dirs): if output_dir is None: output_dir = self.output_dir elif not isinstance(output_dir, str): raise TypeError("'output_dir' must be a string or None") if macros is None: macros = self.macros elif isinstance(macros, list): macros = macros + (self.macros or []) else: raise TypeError("'macros' (if supplied) must be a list of tuples") if include_dirs is None: include_dirs = self.include_dirs elif isinstance(include_dirs, (list, tuple)): include_dirs = list(include_dirs) + (self.include_dirs or []) else: raise TypeError( "'include_dirs' (if supplied) must be a list of strings") return output_dir, macros, include_dirs
python3.10.4/Lib/distutils/ccompiler.py
199
XX-Net
{ "docstring": "Typecheck and fix-up some of the arguments to the 'compile()'\n method, and return fixed-up values. Specifically: if 'output_dir'\n is None, replaces it with 'self.output_dir'; ensures that 'macros'\n is a list, and augments it with 'self.macros'; ensures that\n 'include_dirs' is a list, and augments it with 'self.include_dirs'.\n Guarantees that the returned values are of the correct type,\n i.e. for 'output_dir' either string or None, and for 'macros' and\n 'include_dirs' either list or None.\n ", "language": "en", "n_whitespaces": 129, "n_words": 72, "vocab_size": 44 }
86
Python
48
8198943edd73a363c266633e1aa5b2a9e9c9f526
ccompiler.py
222,586
19
123
_fix_compile_args
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
261
0
56,654
13
4
16
def get_template_names(self): try: names = super().get_template_names() except ImproperlyConfigured: # If template_name isn't specified, it's not a problem -- # we just start with an empty list. names = [] # If the list is a queryset, we'll invent a template name based on the # app and model name. This name gets put at the end of the template # name list so that user-supplied names override the automatically- # generated ones. if hasattr(self.object_list, "model"): opts = self.object_list.model._meta names.append( "%s/%s%s.html" % (opts.app_label, opts.model_name, self.template_name_suffix) ) elif not names: raise ImproperlyConfigured( "%(cls)s requires either a 'template_name' attribute " "or a get_queryset() method that returns a QuerySet." % { "cls": self.__class__.__name__, } ) return names
django/views/generic/list.py
155
django
{ "docstring": "\n Return a list of template names to be used for the request. Must return\n a list. May not be called if render_to_response is overridden.\n ", "language": "en", "n_whitespaces": 46, "n_words": 24, "vocab_size": 22 }
113
Python
85
9c19aff7c7561e3a82978a272ecdaad40dda5c00
list.py
206,882
20
86
get_template_names
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
391
0
51,781
15
1
3
def _SpinboxSelectHandler(self): self._generic_callback_handler('')
PySimpleGUI.py
25
PySimpleGUI
{ "docstring": "\n Internal callback function for when an entry is selected in a Combobox.\n\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
3
Python
3
40757180b5d0ac66d44958e4ab13329c7b03ea36
PySimpleGUI.py
212,675
2
12
_SpinboxSelectHandler
https://github.com/PySimpleGUI/PySimpleGUI.git
Fix for enable_events for Spin element. Changed how the event is generated. Need to determine manual entry of value still
17
0
53,336
8
1
19
def test_small_integration_test(self): model = TFAutoModelForSeq2SeqLM.from_pretrained("google/mt5-small") tokenizer = AutoTokenizer.from_pretrained("google/mt5-small") input_ids = tokenizer("Hello there", return_tensors="tf").input_ids labels = tokenizer("Hi I am", return_tensors="tf").input_ids loss = model(input_ids, labels=labels).loss mtf_score = -tf.math.reduce_mean(loss).numpy() EXPECTED_SCORE = -21.210594 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2e-4)
tests/models/mt5/test_modeling_tf_mt5.py
158
transformers
{ "docstring": "\n For comparision run:\n >>> import t5 # pip install t5==0.7.1\n >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary\n\n >>> path_to_mtf_small_mt5_checkpoint = '<fill_in>'\n >>> path_to_mtf_small_mt5_spm_model_path = '<fill_in>'\n >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)\n >>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path, extra_ids=100)\n >>> score = t5_model.score(inputs=[\"Hello there\"], targets=[\"Hi I am\"], vocabulary=vocab)\n ", "language": "en", "n_whitespaces": 108, "n_words": 43, "vocab_size": 31 }
34
Python
27
5ae087cf8ec080b121c9cdc9bafdc2b35b6e110e
test_modeling_tf_mt5.py
32,078
9
94
test_small_integration_test
https://github.com/huggingface/transformers.git
Fix T5/mT5 tests (#18029)
97
0
5,847
12
1
8
async def test_in_interface(self): iface = gr.Interface(lambda x: x, "text", "markdown") input_data = "Here's an [image](https://gradio.app/images/gradio_logo.png)" output_data = iface(input_data) assert ( output_data == )
test/test_components.py
69
gradio
{ "docstring": "\n Interface, process\n <p>Here's an <a href=\"https://gradio.app/images/gradio_logo.png\">image</a></p>\\n", "language": "en", "n_whitespaces": 20, "n_words": 6, "vocab_size": 6 }
23
Python
20
d79039beb1c3eab597de4871f7eb6522196d1a00
test_components.py
181,302
8
36
test_in_interface
https://github.com/gradio-app/gradio.git
Latex support (#2696) * initial use of dollarmath plugin * add frontend support * chnages * changes * changes * changes * changes * fix * added latex to kinematics blocks * changes * Update CHANGELOG.md Co-authored-by: Abubakar Abid <[email protected]> * added example to changelog * remove param * doc fix * fixes * latex noteboox fix * fix * changes Co-authored-by: Ali Abid <[email protected]> Co-authored-by: Abubakar Abid <[email protected]>
88
0
43,297
10
1
6
def gelu(x): return x * 0.5 * (1.0 + paddle.erf(x / math.sqrt(2.0)))
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/cn_clip/clip/modeling_bert.py
47
PaddleHub
{ "docstring": " Original Implementation of the gelu activation function in Google Bert repo when initially created.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n ", "language": "en", "n_whitespaces": 71, "n_words": 46, "vocab_size": 39 }
12
Python
11
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
modeling_bert.py
49,740
2
34
gelu
https://github.com/PaddlePaddle/PaddleHub.git
add disco_diffusion_cnclip_vitb16 module
18
0
9,899
13
2
20
def _set_preview_feed(self): retval = {} for idx, side in enumerate(("a", "b")): logger.debug("Setting preview feed: (side: '%s')", side) preview_images = self._config.get("preview_images", 14) preview_images = min(max(preview_images, 2), 16) batchsize = min(len(self._images[side]), preview_images) retval[side] = self._load_generator(idx).minibatch_ab(self._images[side], batchsize, side, do_shuffle=True, is_preview=True) logger.debug("Set preview feed. Batchsize: %s", batchsize) return retval
plugins/train/trainer/_base.py
184
faceswap
{ "docstring": " Set the preview feed for this feeder.\n\n Creates a generator from :class:`lib.training_data.TrainingDataGenerator` specifically\n for previews for the feeder.\n\n Returns\n -------\n dict\n The side (\"a\" or \"b\") as key, :class:`~lib.training_data.TrainingDataGenerator` as\n value.\n ", "language": "en", "n_whitespaces": 96, "n_words": 31, "vocab_size": 26 }
45
Python
38
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
_base.py
100,389
14
116
_set_preview_feed
https://github.com/deepfakes/faceswap.git
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
395
0
19,874
14
4
26
def script_error_handler(path, exc, msg="", tb=False): exception = type(exc).__name__ if msg: exception = msg lineno = "" if hasattr(exc, "lineno"): lineno = str(exc.lineno) log_msg = f"in script {path}:{lineno} {exception}" if tb: etype, value, tback = sys.exc_info() tback = addonmanager.cut_traceback(tback, "invoke_addon_sync") log_msg = ( log_msg + "\n" + "".join(traceback.format_exception(etype, value, tback)) ) ctx.log.error(log_msg) ReloadInterval = 1
mitmproxy/addons/script.py
199
mitmproxy
{ "docstring": "\n Handles all the user's script errors with\n an optional traceback\n ", "language": "en", "n_whitespaces": 20, "n_words": 10, "vocab_size": 10 }
54
Python
37
b3587b52b25077f68116b9852b041d33e7fc6601
script.py
251,307
15
108
script_error_handler
https://github.com/mitmproxy/mitmproxy.git
make it black!
130
0
73,672
14
1
2
def prevent_sync_event_circular_query(func):
saleor/graphql/checkout/utils.py
13
saleor
{ "docstring": "Prevent circular dependencies in synchronous events resolvers.\n\n Synchronous events are not allowed to request fields that are resolved using other\n synchronous events, which would lead to circular calls of the webhook.\n Using this decorator prevents such circular events resolution.\n\n :raises CircularSubscriptionSyncEvent: When a field being resolved from a\n synchronous webhook's payload uses another synchronous webhook internally.\n ", "language": "en", "n_whitespaces": 74, "n_words": 56, "vocab_size": 45 }
2
Python
2
8201efcde2d7aacccf3512c544cceea6780a0598
utils.py
28,243
3
10
prevent_sync_event_circular_query
https://github.com/saleor/saleor.git
GraphQL subscription support for synchronous webhook events (#9763) * WIP add sync webhooks subscription payload handling * add tests, fix minor things * update schema * remove unneeded code * add fix for circular field resolve * fix-filter-shipping-methods-payload * added_in added to desription * add missing types * revert refactor, precommit issues * fixes after review * cosmetix fixes post-review * subscription types description fixes * remove unneeded description from PaymentBase * add validation for creating webhook with two top level fields, add tests for shippingListMethodsForCheckout * add docstring, refactor prevent_sync_event_circular_wuery wrapper * fix docstring of revent_sync_event_circular_query * fix linters
5
0
5,164
6
1
3
def iterations(self): return self._iterations
keras/optimizers/optimizer_experimental/optimizer.py
19
keras
{ "docstring": "The number of training steps this `optimizer` has run.\n\n By default, iterations would be incremented by one every time\n `apply_gradients()` is called.\n ", "language": "en", "n_whitespaces": 43, "n_words": 22, "vocab_size": 22 }
4
Python
4
84afc5193d38057e2e2badf9c889ea87d80d8fbf
optimizer.py
275,290
2
10
iterations
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
18
0
81,374
6
1
3
def test_nested_auto_heights(snap_compare): assert snap_compare("snapshot_apps/nested_auto_heights.py", press=["1", "2"]) # --- Other ---
tests/snapshot_tests/test_snapshots.py
38
textual
{ "docstring": "Test refreshing widget within a auto sized container", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
10
Python
9
32b7308ac83c20c49ca422726be149fdc5b8fc2d
test_snapshots.py
186,216
2
19
test_nested_auto_heights
https://github.com/Textualize/textual.git
fox for nested heights
15
0
45,406
10
1
4
def num_arrays_on_dev(dev): return len(get_all_arrays_on_dev(dev)) # noinspection PyShadowingNames
ivy/core/device.py
27
ivy
{ "docstring": "\n Returns the number of arrays which are currently alive on the specified device.\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 12 }
7
Python
7
d743336b1f3654cd0315f380f43eed4116997c1d
device.py
213,609
2
14
num_arrays_on_dev
https://github.com/unifyai/ivy.git
renamed dev_str arg to dev for all methods.
12
0
53,674
9
3
6
def pattern(self) -> str | None: if hasattr(self, "_attr_pattern"): return self._attr_pattern if hasattr(self, "entity_description"): return self.entity_description.pattern return None
homeassistant/components/text/__init__.py
65
core
{ "docstring": "Return the regex pattern that the value must match.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
18
Python
14
003e4224c89a6da381960dc5347750d1521d85c9
__init__.py
291,305
7
38
pattern
https://github.com/home-assistant/core.git
Add `text` platform (#79454) Co-authored-by: Franck Nijhof <[email protected]> Co-authored-by: Franck Nijhof <[email protected]>
68
0
90,415
9
6
4
def render_pep440(pieces): if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += f"{pieces['distance']}.g{pieces['short']}" if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = f"0+untagged.{pieces['distance']}.g{pieces['short']}" if pieces["dirty"]: rendered += ".dirty" return rendered
pandas/_version.py
163
pandas
{ "docstring": "Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n ", "language": "en", "n_whitespaces": 52, "n_words": 37, "vocab_size": 35 }
36
Python
20
e2df99823758210fb2b7c4aba39e23f3445f7cd3
_version.py
171,627
13
65
render_pep440
https://github.com/pandas-dev/pandas.git
BLD: use nonvendor versioneer (#49924) * BLD: remove vendored versioneer * run vis * move config to pyproject.toml * add versioneer to deps * run pyupgrade * fix isort and pylint * fix ci * fix env
142
0
40,694
14
1
4
def clear(self): del self._toklist[:] self._tokdict.clear()
pipenv/patched/notpip/_vendor/pyparsing/results.py
36
pipenv
{ "docstring": "\n Clear all elements and results names.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
5
Python
5
f3166e673fe8d40277b804d35d77dcdb760fc3b3
results.py
20,624
3
20
clear
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
26
0
3,461
8
1
2
def _refresh_on_access_denied(func):
homeassistant/components/ubus/device_tracker.py
13
core
{ "docstring": "If remove rebooted, it lost our session so rebuild one and try again.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
2
Python
2
8819634b613f6bfd55885283bab86c3852ae40c4
device_tracker.py
298,014
3
10
_refresh_on_access_denied
https://github.com/home-assistant/core.git
String formatting and max line length - Part 6 (#84525)
5
0
96,962
6
8
14
def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin try: if not outfile: realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO() formatter.format(tokens, realoutfile) return realoutfile.getvalue() else: formatter.format(tokens, outfile) except TypeError as err: if (isinstance(err.args[0], str) and ('unbound method format' in err.args[0] or 'missing 1 required positional argument' in err.args[0])): raise TypeError('format() argument must be a formatter instance, ' 'not a class') raise
pipenv/patched/notpip/_vendor/pygments/__init__.py
180
pipenv
{ "docstring": "\n Format a tokenlist ``tokens`` with the formatter ``formatter``.\n\n If ``outfile`` is given and a valid file object (an object\n with a ``write`` method), the result will be written to it, otherwise\n it is returned as a string.\n ", "language": "en", "n_whitespaces": 53, "n_words": 37, "vocab_size": 30 }
61
Python
54
f3166e673fe8d40277b804d35d77dcdb760fc3b3
__init__.py
20,262
15
107
format
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
204
0
3,301
15
14
21
def get_bin_path(arg, opt_dirs=None, required=None): opt_dirs = [] if opt_dirs is None else opt_dirs sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin'] paths = [] for d in opt_dirs: if d is not None and os.path.exists(d): paths.append(d) paths += os.environ.get('PATH', '').split(os.pathsep) bin_path = None # mangle PATH to include /sbin dirs for p in sbin_paths: if p not in paths and os.path.exists(p): paths.append(p) for d in paths: if not d: continue path = os.path.join(d, arg) if os.path.exists(path) and not os.path.isdir(path) and is_executable(path): bin_path = path break if bin_path is None: raise ValueError('Failed to find required executable "%s" in paths: %s' % (arg, os.pathsep.join(paths))) return bin_path
lib/ansible/module_utils/common/process.py
303
ansible
{ "docstring": "\n Find system executable in PATH. Raises ValueError if executable is not found.\n Optional arguments:\n - required: [Deprecated] Prior to 2.10, if executable is not found and required is true it raises an Exception.\n In 2.10 and later, an Exception is always raised. This parameter will be removed in 2.14.\n - opt_dirs: optional list of directories to search in addition to PATH\n In addition to PATH and opt_dirs, this function also looks through /sbin, /usr/sbin and /usr/local/sbin. A lot of\n modules, especially for gathering facts, depend on this behaviour.\n If found return full path, otherwise raise ValueError.\n ", "language": "en", "n_whitespaces": 148, "n_words": 96, "vocab_size": 73 }
101
Python
61
b56d73796e85f162d50b4fcd5930035183032d4a
process.py
267,630
22
187
get_bin_path
https://github.com/ansible/ansible.git
Clarify that sbin directories are always looked at in get_bin_path (#78171)
234
0
78,989
14
10
33
def aggregate(self, *args, **kwargs): if self.query.distinct_fields: raise NotImplementedError("aggregate() + distinct(fields) not implemented.") self._validate_values_are_expressions( (*args, *kwargs.values()), method_name="aggregate" ) for arg in args: # The default_alias property raises TypeError if default_alias # can't be set automatically or AttributeError if it isn't an # attribute. try: arg.default_alias except (AttributeError, TypeError): raise TypeError("Complex aggregates require an alias") kwargs[arg.default_alias] = arg query = self.query.chain() for (alias, aggregate_expr) in kwargs.items(): query.add_annotation(aggregate_expr, alias, is_summary=True) annotation = query.annotations[alias] if not annotation.contains_aggregate: raise TypeError("%s is not an aggregate expression" % alias) for expr in annotation.get_source_expressions(): if ( expr.contains_aggregate and isinstance(expr, Ref) and expr.refs in kwargs ): name = expr.refs raise exceptions.FieldError( "Cannot compute %s('%s'): '%s' is an aggregate" % (annotation.name, name, name) ) return query.get_aggregation(self.db, kwargs)
django/db/models/query.py
306
django
{ "docstring": "\n Return a dictionary containing the calculations (aggregation)\n over the current queryset.\n\n If args is present the expression is passed as a kwarg using\n the Aggregate object's default alias.\n ", "language": "en", "n_whitespaces": 64, "n_words": 28, "vocab_size": 23 }
117
Python
88
9c19aff7c7561e3a82978a272ecdaad40dda5c00
query.py
205,775
30
191
aggregate
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
540
0
51,208
16
1
16
def to_local_object_without_private_data_child(self) -> NDimEntityPhiTensor: # relative from ..tensor import Tensor public_shape = getattr(self, "public_shape", None) public_dtype = getattr(self, "public_dtype", None) return Tensor( child=NDimEntityPhiTensor( child=FixedPrecisionTensor(value=None), entities=self.entities, min_vals=self.min_vals, # type: ignore max_vals=self.max_vals, # type: ignore ), public_shape=public_shape, public_dtype=public_dtype, ) @serializable(capnp_bytes=True)
packages/syft/src/syft/core/tensor/autodp/ndim_entity_phi.py
137
@serializable(capnp_bytes=True)
PySyft
{ "docstring": "Convert this pointer into a partial version of the NDimEntityPhiTensor but without\n any of the private data therein.", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 16 }
38
Python
31
8fdb37e3227eb40d431c32ae8f5bfb44866e4490
ndim_entity_phi.py
967
16
79
to_local_object_without_private_data_child
https://github.com/OpenMined/PySyft.git
working ndept addition
192
1
147
14
1
5
def current_umask() -> int: mask = os.umask(0) os.umask(mask) return mask
pipenv/patched/notpip/_internal/utils/unpacking.py
41
pipenv
{ "docstring": "Get the current umask which involves having to set it temporarily.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
10
Python
9
f3166e673fe8d40277b804d35d77dcdb760fc3b3
unpacking.py
20,003
5
23
current_umask
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
22
0
3,171
8
3
12
async def _recover_running_jobs(self): all_jobs = await self._job_info_client.get_all_jobs() for job_id, job_info in all_jobs.items(): if not job_info.status.is_terminal(): create_task(self._monitor_job(job_id))
dashboard/modules/job/job_manager.py
80
ray
{ "docstring": "Recovers all running jobs from the status client.\n\n For each job, we will spawn a coroutine to monitor it.\n Each will be added to self._running_jobs and reconciled.\n ", "language": "en", "n_whitespaces": 48, "n_words": 27, "vocab_size": 25 }
16
Python
16
326b5bd1acc6d3d00ab0546e4ae45da6bed501f7
job_manager.py
126,657
5
46
_recover_running_jobs
https://github.com/ray-project/ray.git
Convert job_manager to be async (#27123) Updates jobs api Updates snapshot api Updates state api Increases jobs api version to 2 Signed-off-by: Alan Guo [email protected] Why are these changes needed? follow-up for #25902 (comment)
63
0
28,217
13
6
16
def collate_full_clips(batch): max_mel_length = max([b[0].shape[1] for b in batch]) if len(batch) > 1 else batch[0][0].shape[1] max_audio_length = max([b[1].shape[0] for b in batch]) if len(batch) > 1 else batch[0][1].shape[0] mels = torch.zeros([len(batch), batch[0][0].shape[0], max_mel_length]) audios = torch.zeros([len(batch), max_audio_length]) for idx, b in enumerate(batch): mel = b[0] audio = b[1] mels[idx, :, : mel.shape[1]] = mel audios[idx, : audio.shape[0]] = audio return mels, audios
TTS/vocoder/datasets/wavegrad_dataset.py
272
TTS
{ "docstring": "This is used in tune_wavegrad.py.\n It pads sequences to the max length.", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 12 }
62
Python
38
2c9f00a808e0aa76a82af2e8b325abb71f50d1df
wavegrad_dataset.py
262,565
11
185
collate_full_clips
https://github.com/coqui-ai/TTS.git
Fix tune wavegrad (#1844) * fix imports in tune_wavegrad * load_config returns Coqpit object instead None * set action (store true) for flag "--use_cuda"; start to tune if module is running as the main program * fix var order in the result of batch collating * make style * make style with black and isort
155
0
77,276
13
1
7
def waist2rayleigh(w, wavelen, n=1): w, wavelen = map(sympify, (w, wavelen)) return w**2*n*pi/wavelen
sympy/physics/optics/gaussopt.py
55
sympy
{ "docstring": "\n Calculate the rayleigh range from the waist of a gaussian beam.\n\n See Also\n ========\n\n rayleigh2waist, BeamParameter\n\n Examples\n ========\n\n >>> from sympy.physics.optics import waist2rayleigh\n >>> from sympy import symbols\n >>> w, wavelen = symbols('w wavelen')\n >>> waist2rayleigh(w, wavelen)\n pi*w**2/wavelen\n ", "language": "en", "n_whitespaces": 75, "n_words": 38, "vocab_size": 30 }
12
Python
12
c32aa66c02befb7a12915e6ae4ae953a1a81c8f7
gaussopt.py
196,508
3
36
waist2rayleigh
https://github.com/sympy/sympy.git
Refractive_Index_Parameter_Considered
21
0
47,949
9
1
7
def __copy__(self): # Shallow copy. return self.__constructor__( self.gpu_manager, self.key, self._length_cache, self._width_cache )
modin/core/execution/ray/implementations/cudf_on_ray/partitioning/partition.py
43
modin
{ "docstring": "\n Create a copy of this object.\n\n Returns\n -------\n cuDFOnRayDataframePartition\n A copy of this object.\n ", "language": "en", "n_whitespaces": 61, "n_words": 14, "vocab_size": 10 }
12
Python
12
2bb9a1fab7b0092974853e616dfd5e7ed98f085d
partition.py
155,358
4
27
__copy__
https://github.com/modin-project/modin.git
REFACTOR-#5363: introduce partition constructor; move `add_to_apply_calls` impl in base class (#5354) Signed-off-by: Myachev <[email protected]>
51
0
36,353
8
2
16
def upgrade(): try: with op.batch_alter_table('connection') as batch_op: batch_op.alter_column("conn_id", nullable=False, existing_type=sa.String(250, **COLLATION_ARGS)) batch_op.create_unique_constraint(constraint_name="unique_conn_id", columns=["conn_id"]) except sa.exc.IntegrityError: raise Exception("Make sure there are no duplicate connections with the same conn_id or null values")
airflow/migrations/versions/8d48763f6d53_add_unique_constraint_to_conn_id.py
117
airflow
{ "docstring": "Apply Add unique constraint to ``conn_id`` and set it as non-nullable", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
30
Python
29
69f6f9e01b6df76c3c8fa266d460324163957887
8d48763f6d53_add_unique_constraint_to_conn_id.py
45,476
7
65
upgrade
https://github.com/apache/airflow.git
Autogenerate migration reference doc (#21601) * document airflow version in each alembic migration module and use this to autogen the doc * update each migration module to have the same description used in migration ref (so it can be used in autogen)
75
0
8,603
15
2
19
def forward(self, input_ids, token_type_ids=None, attention_mask=None): r if attention_mask is None: attention_mask = paddle.unsqueeze( (input_ids == self.pad_token_id ).astype(self.pooler.dense.weight.dtype) * -1e4, axis=[1, 2]) embedding_output = self.embeddings(input_ids, token_type_ids) encoded_layer = self.encoder(embedding_output, attention_mask) pooled_output = self.pooler(encoded_layer) return encoded_layer, pooled_output
paddlenlp/transformers/tinybert/modeling.py
139
PaddleNLP
{ "docstring": "\n The TinyBertModel forward method, overrides the `__call__()` special method.\n\n Args:\n input_ids (Tensor):\n Indices of input sequence tokens in the vocabulary. They are\n numerical representations of tokens that build the input sequence.\n Its data type should be `int64` and it has a shape of [batch_size, sequence_length].\n token_type_ids (Tensor, optional):\n Segment token indices to indicate different portions of the inputs.\n Selected in the range ``[0, type_vocab_size - 1]``.\n If `type_vocab_size` is 2, which means the inputs have two portions.\n Indices can either be 0 or 1:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n Its data type should be `int64` and it has a shape of [batch_size, sequence_length].\n Defaults to `None`, which means we don't add segment embeddings.\n attention_mask (Tensor, optional):\n Mask used in multi-head attention to avoid performing attention to some unwanted positions,\n usually the paddings or the subsequent positions.\n Its data type can be int, float and bool.\n When the data type is bool, the `masked` tokens have `False` values and the others have `True` values.\n When the data type is int, the `masked` tokens have `0` values and the others have `1` values.\n When the data type is float, the `masked` tokens have `-INF` values and the others have `0` values.\n It is a tensor with shape broadcasted to `[batch_size, num_attention_heads, sequence_length, sequence_length]`.\n For example, its shape can be [batch_size, sequence_length], [batch_size, sequence_length, sequence_length],\n [batch_size, num_attention_heads, sequence_length, sequence_length].\n Defaults to `None`, which means nothing needed to be prevented attention to.\n\n Returns:\n tuple: Returns tuple (`encoder_output`, `pooled_output`).\n\n With the fields:\n\n - `encoder_output` (Tensor):\n Sequence of hidden-states at the last layer of the model.\n It's data type should be float32 and its shape is [batch_size, sequence_length, hidden_size].\n\n - `pooled_output` (Tensor):\n The output of first token (`[CLS]`) in sequence.\n We \"pool\" the model by simply taking the hidden state corresponding to the first token.\n Its data type should be float32 and its shape is [batch_size, hidden_size].\n\n Example:\n .. code-block::\n\n import paddle\n from paddlenlp.transformers import TinyBertModel, TinyBertTokenizer\n\n tokenizer = TinyBertTokenizer.from_pretrained('tinybert-4l-312d')\n model = TinyBertModel.from_pretrained('tinybert-4l-312d')\n\n inputs = tokenizer(\"Welcome to use PaddlePaddle and PaddleNLP! \")\n inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}\n output = model(**inputs)\n ", "language": "en", "n_whitespaces": 978, "n_words": 358, "vocab_size": 185 }
35
Python
30
b0c35d5e1ff02a634fa26392b60d3885c2c78677
modeling.py
322,100
68
92
forward
https://github.com/PaddlePaddle/PaddleNLP.git
Fix the attention mask for fp16 (#1585)
133
0
118,057
17
1
4
def available(self) -> bool: return self.netdata.available
homeassistant/components/netdata/sensor.py
25
core
{ "docstring": "Could the resource be accessed during the last update call.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
6
Python
6
420733a064286cfe6fc5cf11483835d15ff83462
sensor.py
305,670
3
14
available
https://github.com/home-assistant/core.git
Improve entity type hints [n] (#77824)
20
0
104,454
7
1
6
def vlatex(expr, **settings): r latex_printer = VectorLatexPrinter(settings) return latex_printer.doprint(expr)
sympy/physics/vector/printing.py
38
sympy
{ "docstring": "Function for printing latex representation of sympy.physics.vector\n objects.\n\n For latex representation of Vectors, Dyadics, and dynamicsymbols. Takes the\n same options as SymPy's :func:`~.latex`; see that function for more\n information;\n\n Parameters\n ==========\n\n expr : valid SymPy object\n SymPy expression to represent in LaTeX form\n settings : args\n Same as latex()\n\n Examples\n ========\n\n >>> from sympy.physics.vector import vlatex, ReferenceFrame, dynamicsymbols\n >>> N = ReferenceFrame('N')\n >>> q1, q2 = dynamicsymbols('q1 q2')\n >>> q1d, q2d = dynamicsymbols('q1 q2', 1)\n >>> q1dd, q2dd = dynamicsymbols('q1 q2', 2)\n >>> vlatex(N.x + N.y)\n '\\\\mathbf{\\\\hat{n}_x} + \\\\mathbf{\\\\hat{n}_y}'\n >>> vlatex(q1 + q2)\n 'q_{1} + q_{2}'\n >>> vlatex(q1d)\n '\\\\dot{q}_{1}'\n >>> vlatex(q1 * q2d)\n 'q_{1} \\\\dot{q}_{2}'\n >>> vlatex(q1dd * q1 / q1d)\n '\\\\frac{q_{1} \\\\ddot{q}_{1}}{\\\\dot{q}_{1}}'\n\n ", "language": "en", "n_whitespaces": 205, "n_words": 113, "vocab_size": 84 }
9
Python
9
9a3ffc6781bd44c47cf49e128ef154389c32876a
printing.py
197,452
38
23
vlatex
https://github.com/sympy/sympy.git
Some pep8 cleanup of sympy.physics.vector.
17
0
48,558
8
4
12
def test_pick_two_individuals_eligible_for_crossover_bad(): ind1 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind2 = creator.Individual.from_string( 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)', tpot_obj._pset ) ind3 = creator.Individual.from_string( 'GaussianNB(input_matrix)', tpot_obj._pset ) # Ind1 and ind2 are not a pair because they are the same, ind3 shares no primitive pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3]) assert pick1 is None and pick2 is None # You can not do crossover with a population of only 1. pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1]) assert pick1 is None and pick2 is None # You can not do crossover with a population of 0. pick1, pick2 = pick_two_individuals_eligible_for_crossover([]) assert pick1 is None and pick2 is None
tests/tpot_tests.py
171
tpot
{ "docstring": "Assert that pick_two_individuals_eligible_for_crossover() returns the right output when no pair is eligible", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
102
Python
48
388616b6247ca4ea8de4e2f340d6206aee523541
tpot_tests.py
181,690
19
104
test_pick_two_individuals_eligible_for_crossover_bad
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
192
0
43,477
9
9
27
def _vindex(x, *indexes): indexes = replace_ellipsis(x.ndim, indexes) nonfancy_indexes = [] reduced_indexes = [] for ind in indexes: if isinstance(ind, Number): nonfancy_indexes.append(ind) elif isinstance(ind, slice): nonfancy_indexes.append(ind) reduced_indexes.append(slice(None)) else: nonfancy_indexes.append(slice(None)) reduced_indexes.append(ind) nonfancy_indexes = tuple(nonfancy_indexes) reduced_indexes = tuple(reduced_indexes) x = x[nonfancy_indexes] array_indexes = {} for i, (ind, size) in enumerate(zip(reduced_indexes, x.shape)): if not isinstance(ind, slice): ind = np.array(ind, copy=True) if ind.dtype.kind == "b": raise IndexError("vindex does not support indexing with boolean arrays") if ((ind >= size) | (ind < -size)).any(): raise IndexError( "vindex key has entries out of bounds for " "indexing along axis %s of size %s: %r" % (i, size, ind) ) ind %= size array_indexes[i] = ind if array_indexes: x = _vindex_array(x, array_indexes) return x
dask/array/core.py
355
dask
{ "docstring": "Point wise indexing with broadcasting.\n\n >>> x = np.arange(56).reshape((7, 8))\n >>> x\n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55]])\n\n >>> d = from_array(x, chunks=(3, 4))\n >>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])\n >>> result.compute()\n array([ 0, 9, 48, 7])\n ", "language": "en", "n_whitespaces": 189, "n_words": 95, "vocab_size": 80 }
115
Python
82
b016998fa931f644df4d266a3ed5e7604c20d2a9
core.py
156,931
32
221
_vindex
https://github.com/dask/dask.git
Removed unused loop control variables (`B007`) (#9458) Co-authored-by: James Bourbeau <[email protected]>
379
0
36,811
16
1
13
def test_warn_once(): with warnings.catch_warnings(record=True) as record: # Ignore Deprecation warnings. warnings.filterwarnings("ignore", category=DeprecationWarning) assert not load_checkpoint() assert not load_checkpoint() assert not save_checkpoint(x=2) assert not report(x=2) assert not report(x=3) assert not get_dataset_shard() # Should only warn once. assert len(record) == 4
python/ray/train/tests/test_session.py
130
ray
{ "docstring": "Checks if session misuse warning is only shown once per function.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
39
Python
26
0e8eb8aedb3e158da8c3e7378e818ce87ca7813e
test_session.py
128,341
10
73
test_warn_once
https://github.com/ray-project/ray.git
[AIR] More Train and Tune session deprecations (#28856) Signed-off-by: Amog Kamsetty [email protected] Finish marking train. and tune. session APIs as deprecated
107
0
28,675
11
1
6
def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias @add_start_docstrings( , XLM_ROBERTA_XL_START_DOCSTRING, )
src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py
38
@add_start_docstrings( """ XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLM_ROBERTA_XL_START_DOCSTRING, )
transformers
{ "docstring": "\n XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top\n of the pooled output) e.g. for GLUE tasks.\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 21 }
27
Python
27
e09473a817c5e5871e11cc81004355ef30250502
modeling_xlm_roberta_xl.py
34,690
2
14
_tie_weights
https://github.com/huggingface/transformers.git
Add support for XLM-R XL and XXL models by modeling_xlm_roberta_xl.py (#13727) * add xlm roberta xl * add convert xlm xl fairseq checkpoint to pytorch * fix init and documents for xlm-roberta-xl * fix indention * add test for XLM-R xl,xxl * fix model hub name * fix some stuff * up * correct init * fix more * fix as suggestions * add torch_device * fix default values of doc strings * fix leftovers * merge to master * up * correct hub names * fix docs * fix model * up * finalize * last fix * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * add copied from * make style Co-authored-by: Patrick von Platen <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
44
1
6,311
8
2
27
def _convert_mesh_to_triangles(self, coordinates): if isinstance(coordinates, np.ma.MaskedArray): p = coordinates.data else: p = coordinates p_a = p[:-1, :-1] p_b = p[:-1, 1:] p_c = p[1:, 1:] p_d = p[1:, :-1] p_center = (p_a + p_b + p_c + p_d) / 4.0 triangles = np.concatenate([ p_a, p_b, p_center, p_b, p_c, p_center, p_c, p_d, p_center, p_d, p_a, p_center, ], axis=2).reshape((-1, 3, 2)) c = self.get_facecolor().reshape((*coordinates.shape[:2], 4)) c_a = c[:-1, :-1] c_b = c[:-1, 1:] c_c = c[1:, 1:] c_d = c[1:, :-1] c_center = (c_a + c_b + c_c + c_d) / 4.0 colors = np.concatenate([ c_a, c_b, c_center, c_b, c_c, c_center, c_c, c_d, c_center, c_d, c_a, c_center, ], axis=2).reshape((-1, 3, 4)) return triangles, colors
lib/matplotlib/collections.py
390
matplotlib
{ "docstring": "\n Convert a given mesh into a sequence of triangles, each point\n with its own color. The result can be used to construct a call to\n `~.RendererBase.draw_gouraud_triangles`.\n ", "language": "en", "n_whitespaces": 56, "n_words": 26, "vocab_size": 23 }
112
Python
56
4a5d09cba5f4a20e14553cebd8f70c1f34d20d35
collections.py
109,611
29
273
_convert_mesh_to_triangles
https://github.com/matplotlib/matplotlib.git
Deprecate draw_gouraud_triangle (#23824) * Deprecate draw_gouraud_triangle * DOC: minor rewording Co-authored-by: Elliott Sales de Andrade <[email protected]> Co-authored-by: Thomas A Caswell <[email protected]> Co-authored-by: Elliott Sales de Andrade <[email protected]>
355
0
23,670
12
2
12
def get_binance_available_quotes_for_each_coin() -> dict: trading_pairs = _get_trading_pairs() results = defaultdict(list) for pair in trading_pairs: results[pair["baseAsset"]].append(pair["quoteAsset"]) return results @log_start_end(log=logger)
gamestonk_terminal/cryptocurrency/due_diligence/binance_model.py
82
@log_start_end(log=logger)
OpenBBTerminal
{ "docstring": "Helper methods that for every coin available on Binance add all quote assets. [Source: Binance]\n\n Returns\n -------\n dict:\n All quote assets for given coin\n {'ETH' : ['BTC', 'USDT' ...], 'UNI' : ['ETH', 'BTC','BUSD', ...]\n\n ", "language": "en", "n_whitespaces": 60, "n_words": 34, "vocab_size": 30 }
18
Python
16
e1b6022b9cf156ffc0697d0d25a5ed2772ea8d68
binance_model.py
282,485
15
40
get_binance_available_quotes_for_each_coin
https://github.com/OpenBB-finance/OpenBBTerminal.git
Global plot styles (#1228) * Add default stylesheets * Add terminal style helper class and global style initialization in cfg * Style comments and docstrings * Load rich terminal theme from config file * Add application chart styles to candle charts * Add todos * Remove explicit color setting for some ta charts * Add user styles folder to gitignore * Update default stylesheets * Add matplotlib font manager support * Add matplotlib font manager support * Update docstrings and default style * Update stocks candle chart formatting (return fig to style title) * Style common ta overlap view * Make up and down market colors a part of the style helper * Update stylesheets * Style common ta volume view * Style common ta momentum view * Style common ta trend indicators view * Style common ta volatility view * Style common ta volume view * Style common ta custom indicators view * Fix styling bugs and remove the obvious time x lablel * Style charts in the covid menu * Set legend position to upper left in the mpl stylesheet * Add mpl_rcparams configs for parameters not covered by stylesheets * Remove font configuration files * Update style class utility functions * Implement passing external axes and style utility usage in ema & stoch * Add theme watermark and output helpers * Rename style to theme * Update helper usage in ta/ma and ta/stoch * Update style to theme in sample menus * Style forex (#1305) * Make tight layout optional 'cause mplfinance doesn't support it * Apply global style to the forex menu * Update code layout in oanda view and black * Style common TA (#1315) * Make tight layout optional 'cause mplfinance doesn't support it * Apply global style to the forex menu * Add linewidth to theme for use in mpf's addplots * Add vwap to the stocks notebook api * Update common/ta overlap to follow charting style * Apply style on TerminalStyle init * Enable infrastructure for excluding non-trading days from plots * Update notebook api to include there and resolve bandit warning * Update ta/common/overlap to exclude non-trading days * Enable external ax, style and non-trading days in common/ta/momentum * Enable external ax, style and non-trading days in common/ta/trend * Update vwap to the argument naming convention * Enable external ax, style and non-trading days in common/ta/volatility * Enable external ax, style and non-trading days in common/ta/volume * Enable external ax, style and non-trading days in common/ta/custom * Fix controller tests * Forgot to disable rewriting of the cassettes ... * Fix controller errors that came up because a merge conflict * Fix price label position on fib * Fix line having wrong x values in fib Co-authored-by: Colin Delahunty <[email protected]> * Style economy (#1308) * Began converting * Added alphavan_view * Added CNN View * Updated nasdaq view, fixed glitch * Added fred * Refactored URL * Theo's requested changes * Updated docstrings * Updated tests * Fixed pylint * Fixed tests * Theo changes * Econ Fix * Refactor chart style for Crypto context (#1306) * Remove mock for gff * Mock visualize_output helper function * Refactor * Fix plot helper * Update legend loc * Refactor mplfinance candle plot * Fix errors in the helper function * Fix binbook having the wrong call_ function name * Remove hardcoded style params * Resolve kwargs future warning from pandas * Remove warnings import Co-authored-by: Theodore Aptekarev <[email protected]> * funds + custom (#1311) * funds + custom * cleanup cleanup everybody everywhere * Fix external axes conditional and a typo Co-authored-by: Theodore Aptekarev <[email protected]> * Add external axes mode to covid charts (#1328) * Add portfolio menu plots (#1318) * Portfolio view plots (commenting out report stuff) * PA Menu broken. Commenting out and fix tests * portfolio optimization * comment out commented api line * Add notes on disabling the pa submenu Co-authored-by: Theodore Aptekarev <[email protected]> * Plot updates in common BA (#1335) * Add external axes support to common/ba/finbrain * Add external axes support to common/ba/twitter * Add external axes support to common/ba/google * Add external axes support to common/ba/sentimentinvestor * Add sentimentinvestor to the notebooks API * Fix tests * Etf refactor (#1323) * Refactored no ETF * Fixed gtff import * Fixed tests * Fix pie chart style * Refactored etf/candle * Added pylint fix * Fixed tests * Update candle chart layout * Update etf controller test * Remove strange binary file Co-authored-by: Theodore Aptekarev <[email protected]> * Expose ETF candle function in the notebooks API * Common BA and Common QA charts update (#1342) * Add external axes support to common/ba/finbrain * Add external axes support to common/ba/twitter * Add external axes support to common/ba/google * Add external axes support to common/ba/sentimentinvestor * Add sentimentinvestor to the notebooks API * Fix tests * Update stylesheet files * Refactor charts for common/qa * Update the forgotten line plot * Update tests * Add missing arg to a docstring * Remove scientific notation * Black imports Co-authored-by: Minh Hoang <[email protected]> * Options refactor (#1324) * Fixed alphaquery_view * finished options * Fixed pylint * Fixed tests * Fixed tests * Fixed tests * update yfinance * Tradier + Chartexchange * change mocks from gtff to theme.visualize output * tests Co-authored-by: Theodore Aptekarev <[email protected]> Co-authored-by: james <[email protected]> * Refactor Stocks menu (#1325) * Fix backtesting menu * Refactor comparison analysis * Refactor Dark pool shorts * Refactor rest of menu * Fix test * Fix tests failing * Fix tests fail * Fix test failing * Remove record mode=none to record new output * Rewrite test output * Rewrite test outputs * Adding more rewritten test output * Mock plt.show * Mock missing plt.show * Missing @pytest.mark.vcr * Updating tests : common/behavioural_analysis/finbrain * Improve notebooks API coverage for CA and DPS * Silence annoying flake8 warning Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: Theodore Aptekarev <[email protected]> * Charts update for common/pred (#1344) * Add external axes support to common/ba/finbrain * Add external axes support to common/ba/twitter * Add external axes support to common/ba/google * Add external axes support to common/ba/sentimentinvestor * Add sentimentinvestor to the notebooks API * Fix tests * Update stylesheet files * Refactor charts for common/qa * Update the forgotten line plot * Update tests * Add missing arg to a docstring * Style pred helper and controllers * Update ETS plot * Update plots in KNN and pred helper * Update plot and pretty table for arima * Update plot for common/pred/regression * Refactor mc_view * Fix linting * Fix mypy * Move plot title to the axis level to make more vertical space Co-authored-by: Minh Hoang <[email protected]> Co-authored-by: jmaslek <[email protected]> * linter * Update common/ba test data * Change etf candle to match stock candle * try updating sia test Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: jmaslek <[email protected]> Co-authored-by: minhhoang1023 <[email protected]> Co-authored-by: Minh Hoang <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]>
39
1
84,165
12
9
19
def get_actual_start_end_datetime_of_shift(employee, for_datetime, consider_default_shift=False): actual_shift_start = actual_shift_end = shift_details = None shift_timings_as_per_timestamp = get_employee_shift_timings( employee, for_datetime, consider_default_shift ) timestamp_list = [] for shift in shift_timings_as_per_timestamp: if shift: timestamp_list.extend([shift.actual_start, shift.actual_end]) else: timestamp_list.extend([None, None]) timestamp_index = None for index, timestamp in enumerate(timestamp_list): if timestamp and for_datetime <= timestamp: timestamp_index = index break if timestamp_index and timestamp_index % 2 == 1: shift_details = shift_timings_as_per_timestamp[int((timestamp_index - 1) / 2)] actual_shift_start = shift_details.actual_start actual_shift_end = shift_details.actual_end elif timestamp_index: shift_details = shift_timings_as_per_timestamp[int(timestamp_index / 2)] return actual_shift_start, actual_shift_end, shift_details
erpnext/hr/doctype/shift_assignment/shift_assignment.py
225
erpnext
{ "docstring": "Takes a datetime and returns the 'actual' start datetime and end datetime of the shift in which the timestamp belongs.\n\tHere 'actual' means - taking in to account the \"begin_check_in_before_shift_start_time\" and \"allow_check_out_after_shift_end_time\".\n\tNone is returned if the timestamp is outside any actual shift timings.\n\tShift Details is also returned(current/upcoming i.e. if timestamp not in any actual shift then details of next shift returned)\n\t", "language": "en", "n_whitespaces": 59, "n_words": 63, "vocab_size": 41 }
82
Python
54
494bd9ef78313436f0424b918f200dab8fc7c20b
shift_assignment.py
66,198
23
145
get_actual_start_end_datetime_of_shift
https://github.com/frappe/erpnext.git
style: format code with black
59
0
14,136
14
3
5
def synchronize_labels(self, axis=None): if axis is None: self._deferred_index = True self._deferred_column = True elif axis == 0: self._deferred_index = True else: self._deferred_column = True
modin/core/dataframe/pandas/dataframe/dataframe.py
70
modin
{ "docstring": "\n Set the deferred axes variables for the ``PandasDataframe``.\n\n Parameters\n ----------\n axis : int, default: None\n The deferred axis.\n 0 for the index, 1 for the columns.\n ", "language": "en", "n_whitespaces": 84, "n_words": 26, "vocab_size": 20 }
24
Python
15
3c740dbfcdd69ddc3ab45a42be996e5c61104342
dataframe.py
152,959
8
42
synchronize_labels
https://github.com/modin-project/modin.git
FEAT-#3111: Ensure relabeling Modin Frame does not lose partition shape (#3662) Co-authored-by: Devin Petersohn <[email protected]> Signed-off-by: Naren Krishna <[email protected]>
96
0
35,205
10
2
2
def testResourceDeadlock(self):
python/ray/tune/tests/test_trial_runner_pg.py
13
ray
{ "docstring": "Tests that resource deadlock is avoided for heterogeneous PGFs.\n\n We start 4 trials in a cluster with 2 CPUs. The first two trials\n require 1 CPU each, the third trial 2 CPUs, the fourth trial 1 CPU.\n\n The second trial needs a bit more time to finish. This means that the\n resources from the first trial will be freed, and the PG of the\n _fourth_ trial becomes ready (not that of the third trial, because that\n requires 2 CPUs - however, one is still occupied by trial 2).\n\n After the first two trials finished, the FIFOScheduler tries to start\n the third trial. However, it can't be started because its placement\n group is not ready. Instead, the placement group of the fourth\n trial is ready. Thus, we opt to run the fourth trial instead.\n ", "language": "en", "n_whitespaces": 210, "n_words": 133, "vocab_size": 84 }
2
Python
2
976ece4bc43abdb628cf4cbffc8546abab723a6d
test_trial_runner_pg.py
129,037
24
190
testResourceDeadlock
https://github.com/ray-project/ray.git
[tune] Add test for heterogeneous resource request deadlocks (#21397) This adds a test for potential resource deadlocks in experiments with heterogeneous PGFs. If the PGF of a later trial becomes ready before that of a previous trial, we could run into a deadlock. This is currently avoided, but untested, flagging the code path for removal in #21387.
9
0
28,880
6
1
34
async def test_vocolinc_flowerbud_setup(hass): accessories = await setup_accessories_from_file(hass, "vocolinc_flowerbud.json") await setup_test_accessories(hass, accessories) await assert_devices_and_entities_created( hass, DeviceTestInfo( unique_id=HUB_TEST_ACCESSORY_ID, name="VOCOlinc-Flowerbud-0d324b", model="Flowerbud", manufacturer="VOCOlinc", sw_version="3.121.2", hw_version="0.1", serial_number="AM01121849000327", devices=[], entities=[ EntityTestInfo( entity_id="humidifier.vocolinc_flowerbud_0d324b", friendly_name="VOCOlinc-Flowerbud-0d324b", unique_id="00:00:00:00:00:00_1_30", supported_features=HumidifierEntityFeature.MODES, capabilities={ "available_modes": ["normal", "auto"], "max_humidity": 100.0, "min_humidity": 0.0, }, state="off", ), EntityTestInfo( entity_id="light.vocolinc_flowerbud_0d324b_mood_light", friendly_name="VOCOlinc-Flowerbud-0d324b Mood Light", unique_id="00:00:00:00:00:00_1_9", supported_features=0, capabilities={"supported_color_modes": ["hs"]}, state="on", ), EntityTestInfo( entity_id="number.vocolinc_flowerbud_0d324b_spray_quantity", friendly_name="VOCOlinc-Flowerbud-0d324b Spray Quantity", unique_id="00:00:00:00:00:00_1_30_38", capabilities={ "max": 5, "min": 1, "mode": NumberMode.AUTO, "step": 1, }, state="5", entity_category=EntityCategory.CONFIG, ), EntityTestInfo( entity_id="sensor.vocolinc_flowerbud_0d324b_current_humidity", friendly_name="VOCOlinc-Flowerbud-0d324b Current Humidity", unique_id="00:00:00:00:00:00_1_30_33", capabilities={"state_class": SensorStateClass.MEASUREMENT}, unit_of_measurement=PERCENTAGE, state="45.0", ), ], ), )
tests/components/homekit_controller/specific_devices/test_vocolinc_flowerbud.py
389
core
{ "docstring": "Test that a Vocolinc Flowerbud can be correctly setup in HA.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
84
Python
70
f23b1750e85f07091eb896a0b12b8f95e5646338
test_vocolinc_flowerbud.py
288,885
59
238
test_vocolinc_flowerbud_setup
https://github.com/home-assistant/core.git
Migrate HomeKit Controller to use stable identifiers (#80064)
1,005
0
88,034
19
3
12
def closure(self, rel, depth=-1): from nltk.util import acyclic_breadth_first for synset in acyclic_breadth_first(self, rel, depth): if synset != self: yield synset from nltk.util import acyclic_depth_first as acyclic_tree from nltk.util import unweighted_minimum_spanning_tree as mst # Also add this shortcut? # from nltk.util import unweighted_minimum_spanning_digraph as umsd
nltk/corpus/reader/wordnet.py
89
nltk
{ "docstring": "\n Return the transitive closure of source under the rel\n relationship, breadth-first, discarding cycles:\n\n >>> from nltk.corpus import wordnet as wn\n >>> computer = wn.synset('computer.n.01')\n >>> topic = lambda s:s.topic_domains()\n >>> print(list(computer.closure(topic)))\n [Synset('computer_science.n.01')]\n\n UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2\n\n\n Include redundant paths (but only once), avoiding duplicate searches\n (from 'animal.n.01' to 'entity.n.01'):\n\n >>> dog = wn.synset('dog.n.01')\n >>> hyp = lambda s:s.hypernyms()\n >>> print(list(dog.closure(hyp)))\n [Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\\\n Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\\\n Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\\\n Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\\\n Synset('physical_entity.n.01'), Synset('entity.n.01')]\n\n UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7\n ", "language": "en", "n_whitespaces": 201, "n_words": 88, "vocab_size": 69 }
44
Python
29
692adaff901dd9daf29400fdf3385130aefbfb2a
wordnet.py
42,481
5
38
closure
https://github.com/nltk/nltk.git
Fix some tests in Wordnet-related DocStrings
106
0
7,566
10
2
7
def is_on(self) -> bool: return ( self.coordinator.data[self.entity_description.key] == "TooLow" or self.coordinator.data[self.entity_description.key] == "TooHigh" )
homeassistant/components/flipr/binary_sensor.py
67
core
{ "docstring": "Return true if the binary sensor is on in case of a Problem is detected.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
14
Python
12
c7dfd6b15a3fc9fa81d260b3dfa8a3d836f9afa8
binary_sensor.py
290,673
6
40
is_on
https://github.com/home-assistant/core.git
Add flipr battery level sensor (#81389) * Addition of battery level sensor. Correction of pylint errors * Review improvement for typing * Review improvement for typing * Correction following review
57
0
89,787
11
1
3
def test_archive_too_large_for_mem_cache(self, cache_set):
tests/sentry/lang/javascript/test_processor.py
15
sentry
{ "docstring": "cache.set is never called if the archive is too large", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
3
Python
3
8cdaa4e86e8296cdbc145f2a53d3eb38cb7a1c2b
test_processor.py
90,778
8
74
test_archive_too_large_for_mem_cache
https://github.com/getsentry/sentry.git
ref: close files explicitly in tests.sentry.lang.javascript.test_processor (#35262)
10
0
18,689
6
1
2
def BurstTaskRunner(): job_queue = []
src/sentry/testutils/helpers/task_runner.py
19
sentry
{ "docstring": "\n A fixture for queueing up Celery tasks and working them off in bursts.\n\n The main interesting property is that one can run tasks at a later point in\n the future, testing \"concurrency\" without actually spawning any kind of\n worker.\n ", "language": "en", "n_whitespaces": 55, "n_words": 39, "vocab_size": 37 }
5
Python
5
ce3e457ef18fe0046d6aca0b545eac55eae8f17c
task_runner.py
87,360
7
28
BurstTaskRunner
https://github.com/getsentry/sentry.git
feat(perf-issues): Move queue info for post_process into headers (ISP… (#40239) Re-do of https://github.com/getsentry/sentry/pull/39946 as merge conflict didn't mesh right. Sends dedicated issue category data to post_process_group call so we can route to the appropriate celery queue Will need to include changes from https://github.com/getsentry/sentry/pull/40283 to be merged first and an ensuing PR to remove the old queue.
11
0
18,288
7
8
52
def update_proxy_model_permissions(apps, schema_editor, reverse=False): style = color_style() Permission = apps.get_model("auth", "Permission") ContentType = apps.get_model("contenttypes", "ContentType") alias = schema_editor.connection.alias for Model in apps.get_models(): opts = Model._meta if not opts.proxy: continue proxy_default_permissions_codenames = [ "%s_%s" % (action, opts.model_name) for action in opts.default_permissions ] permissions_query = Q(codename__in=proxy_default_permissions_codenames) for codename, name in opts.permissions: permissions_query = permissions_query | Q(codename=codename, name=name) content_type_manager = ContentType.objects.db_manager(alias) concrete_content_type = content_type_manager.get_for_model( Model, for_concrete_model=True ) proxy_content_type = content_type_manager.get_for_model( Model, for_concrete_model=False ) old_content_type = proxy_content_type if reverse else concrete_content_type new_content_type = concrete_content_type if reverse else proxy_content_type try: with transaction.atomic(using=alias): Permission.objects.using(alias).filter( permissions_query, content_type=old_content_type, ).update(content_type=new_content_type) except IntegrityError: old = "{}_{}".format(old_content_type.app_label, old_content_type.model) new = "{}_{}".format(new_content_type.app_label, new_content_type.model) sys.stdout.write( style.WARNING(WARNING.format(old=old, new=new, query=permissions_query)) )
django/contrib/auth/migrations/0011_update_proxy_permissions.py
413
django
{ "docstring": "\n Update the content_type of proxy model permissions to use the ContentType\n of the proxy model.\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 11 }
106
Python
74
9c19aff7c7561e3a82978a272ecdaad40dda5c00
0011_update_proxy_permissions.py
203,669
36
259
update_proxy_model_permissions
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
422
0
50,502
18
7
20
def get_loss(self, session_id=None): logger.debug("Getting loss: (session_id: %s)", session_id) retval = {} for idx in [session_id] if session_id else self.session_ids: self._check_cache(idx) data = self._cache.get_data(idx, "loss") if not data: continue data = data[idx] retval[idx] = {title: data["loss"][:, idx] for idx, title in enumerate(data["labels"])} logger.debug({key: {k: v.shape for k, v in val.items()} for key, val in retval.items()}) return retval
lib/gui/analysis/event_reader.py
210
faceswap
{ "docstring": " Read the loss from the TensorBoard event logs\n\n Parameters\n ----------\n session_id: int, optional\n The Session ID to return the loss for. Set to ``None`` to return all session\n losses. Default ``None``\n\n Returns\n -------\n dict\n The session id(s) as key, with a further dictionary as value containing the loss name\n and list of loss values for each step\n ", "language": "en", "n_whitespaces": 151, "n_words": 57, "vocab_size": 44 }
56
Python
44
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
event_reader.py
100,308
13
132
get_loss
https://github.com/deepfakes/faceswap.git
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
189
0
19,805
14