complexity
int64 1
56
| n_identifiers
int64 1
114
| code
stringlengths 19
12.7k
| path
stringlengths 8
134
| n_ast_nodes
int64 12
2.35k
| ast_errors
stringlengths 0
4.01k
| repo
stringlengths 3
28
| documentation
dict | n_words
int64 2
866
| language
stringclasses 1
value | vocab_size
int64 2
323
| commit_id
stringlengths 40
40
| file_name
stringlengths 5
79
| id
int64 243
338k
| nloc
int64 1
228
| token_counts
int64 5
1.4k
| fun_name
stringlengths 1
77
| url
stringlengths 31
60
| commit_message
stringlengths 3
15.3k
| n_whitespaces
int64 1
3.23k
| n_ast_errors
int64 0
20
| d_id
int64 74
121k
| ast_levels
int64 4
29
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2 | 9 | def writelines(self, lines):
self._checkClosed()
for line in lines:
self.write(line)
io.IOBase.register(IOBase)
| python3.10.4/Lib/_pyio.py | 54 | XX-Net | {
"docstring": "Write a list of lines to the stream.\n\n Line separators are not added, so it is usual for each of the lines\n provided to have a line separator at the end.\n ",
"language": "en",
"n_whitespaces": 52,
"n_words": 31,
"vocab_size": 25
} | 10 | Python | 10 | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | _pyio.py | 219,909 | 4 | 24 | writelines | https://github.com/XX-net/XX-Net.git | add python 3.10.4 for windows | 41 | 0 | 55,895 | 9 |
|
1 | 5 | def _on_slider_update(self, value) -> None:
self.scale_var.set(f"{value}%")
| lib/training/preview_tk.py | 37 | faceswap | {
"docstring": " Callback for when the scale slider is adjusted. Adjusts the combo box display to the\n current slider value.\n\n Parameters\n ----------\n value: int\n The value that the slider has been set to\n ",
"language": "en",
"n_whitespaces": 79,
"n_words": 31,
"vocab_size": 25
} | 6 | Python | 6 | 7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5 | preview_tk.py | 101,558 | 10 | 19 | _on_slider_update | https://github.com/deepfakes/faceswap.git | Training - Use custom preview pop-out | 20 | 0 | 20,968 | 9 |
|
1 | 10 | def unrank_binary(self, rank, superset):
bits = bin(rank)[2:].rjust(len(superset), '0')
return Subset.subset_from_bitlist(superset, bits)
| sympy/combinatorics/subsets.py | 63 | sympy | {
"docstring": "\n Gets the binary ordered subset of the specified rank.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Subset\n >>> Subset.unrank_binary(4, ['a', 'b', 'c', 'd']).subset\n ['b']\n\n See Also\n ========\n\n iterate_binary, rank_binary\n ",
"language": "en",
"n_whitespaces": 99,
"n_words": 28,
"vocab_size": 25
} | 11 | Python | 11 | 498015021131af4dbb07eb110e5badaba8250c7b | subsets.py | 196,204 | 3 | 39 | unrank_binary | https://github.com/sympy/sympy.git | Updated import locations | 32 | 0 | 47,704 | 11 |
|
16 | 5 | def lcm_list(seq, *gens, **args):
seq = sympify(seq)
| sympy/polys/polytools.py | 31 | sympy | {
"docstring": "\n Compute LCM of a list of polynomials.\n\n Examples\n ========\n\n >>> from sympy import lcm_list\n >>> from sympy.abc import x\n\n >>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])\n x**5 - x**4 - 2*x**3 - x**2 + x + 2\n\n ",
"language": "en",
"n_whitespaces": 67,
"n_words": 42,
"vocab_size": 26
} | 7 | Python | 7 | 29e153dd0a70a3fe97c2a9a5f752334e937023c5 | polytools.py | 197,244 | 35 | 254 | lcm_list | https://github.com/sympy/sympy.git | update some type hints | 13 | 0 | 48,406 | 8 |
|
2 | 8 | def _calculate_mean_and_var(self, x, axes, keep_dims):
if self.synchronized:
return self._sync_calculate_mean_and_var(x, axes, keep_dims)
else:
return super()._calculate_mean_and_var(x, axes, keep_dims)
| keras/layers/normalization/batch_normalization.py | 65 | keras | {
"docstring": "Override mean and var calculation when used with `synchronized`.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | 16 | Python | 12 | 8c401c032b3021f89609eac79bd1c881b9bbc84f | batch_normalization.py | 280,181 | 5 | 43 | _calculate_mean_and_var | https://github.com/keras-team/keras.git | Merge `SyncBatchNormalization` into `BatchNormalization` with parameter `use_sync`
PiperOrigin-RevId: 482921013 | 59 | 0 | 83,281 | 12 |
|
3 | 14 | def format_block(self) -> str:
if self.summary:
block = self.summary
else:
block = '\n'.join(m.format() for m in self.messages)
message = block.strip()
# Hack to remove ANSI color reset code from SubprocessError messages.
message = message.replace(display.clear, '')
return message
| test/lib/ansible_test/_internal/test.py | 103 | ansible | {
"docstring": "Format the test summary or messages as a block of text and return the result.",
"language": "en",
"n_whitespaces": 14,
"n_words": 15,
"vocab_size": 14
} | 37 | Python | 31 | 3eb0485dd92c88cc92152d3656d94492db44b183 | test.py | 268,054 | 9 | 59 | format_block | https://github.com/ansible/ansible.git | ansible-test - Use more native type hints. (#78435)
* ansible-test - Use more native type hints.
Simple search and replace to switch from comments to native type hints for return types of functions with no arguments.
* ansible-test - Use more native type hints.
Conversion of simple single-line function annotation type comments to native type hints.
* ansible-test - Use more native type hints.
Conversion of single-line function annotation type comments with default values to native type hints.
* ansible-test - Use more native type hints.
Manual conversion of type annotation comments for functions which have pylint directives. | 108 | 0 | 79,328 | 13 |
|
1 | 2 | def row(self):
return self["row"]
| packages/python/plotly/plotly/graph_objs/funnelarea/_domain.py | 22 | plotly.py | {
"docstring": "\n If there is a layout grid, use the domain for this row in the\n grid for this funnelarea trace .\n\n The 'row' property is a integer and may be specified as:\n - An int (or float that will be cast to an int)\n in the interval [0, 9223372036854775807]\n\n Returns\n -------\n int\n ",
"language": "en",
"n_whitespaces": 121,
"n_words": 51,
"vocab_size": 42
} | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _domain.py | 229,865 | 2 | 11 | row | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 61,538 | 7 |
|
1 | 7 | def active(self) -> Optional[Scope]:
ctx = current_context()
return ctx.scope
| synapse/logging/scopecontextmanager.py | 35 | synapse | {
"docstring": "\n Returns the currently active Scope which can be used to access the\n currently active Scope.span.\n If there is a non-null Scope, its wrapped Span\n becomes an implicit parent of any newly-created Span at\n Tracer.start_active_span() time.\n\n Return:\n The Scope that is active, or None if not available.\n ",
"language": "en",
"n_whitespaces": 107,
"n_words": 46,
"vocab_size": 40
} | 9 | Python | 9 | 6ad012ef89c966cbb3616c1be63d964db48d49ca | scopecontextmanager.py | 248,680 | 13 | 20 | active | https://github.com/matrix-org/synapse.git | More type hints for `synapse.logging` (#13103)
Completes type hints for synapse.logging.scopecontextmanager and (partially)
for synapse.logging.opentracing. | 30 | 0 | 72,415 | 8 |
|
1 | 9 | def to_sql(cls, qc, **kwargs):
# we first insert an empty DF in order to create the full table in the database
# This also helps to validate the input against pandas
# we would like to_sql() to complete only when all rows have been inserted into the database
# since the mapping operation is non-blocking, each partition will return an empty DF
# so at the end, the blocking operation will be this empty DF to_pandas
empty_df = qc.getitem_row_array([0]).to_pandas().head(0)
empty_df.to_sql(**kwargs)
# so each partition will append its respective DF
kwargs["if_exists"] = "append"
columns = qc.columns
| modin/core/execution/ray/implementations/pandas_on_ray/io/io.py | 89 | modin | {
"docstring": "\n Write records stored in the `qc` to a SQL database.\n\n Parameters\n ----------\n qc : BaseQueryCompiler\n The query compiler of the Modin dataframe that we want to run ``to_sql`` on.\n **kwargs : dict\n Parameters for ``pandas.to_sql(**kwargs)``.\n ",
"language": "en",
"n_whitespaces": 100,
"n_words": 35,
"vocab_size": 31
} | 95 | Python | 65 | 0faf4675140415e17d4112f9d0d37cfe87770b9e | io.py | 152,977 | 8 | 77 | to_sql | https://github.com/modin-project/modin.git | REFACTOR-#3871: move related to pandas functionality into 'PandasOnRayIO' class (#3872)
Signed-off-by: Anatoly Myachev <[email protected]> | 172 | 0 | 35,219 | 13 |
|
11 | 21 | def align_xlabels(self, axs=None):
if axs is None:
axs = self.axes
axs = [ax for ax in np.ravel(axs) if ax.get_subplotspec() is not None]
for ax in axs:
_log.debug(' Working on: %s', ax.get_xlabel())
rowspan = ax.get_subplotspec().rowspan
pos = ax.xaxis.get_label_position() # top or bottom
# Search through other axes for label positions that are same as
# this one and that share the appropriate row number.
# Add to a grouper associated with each axes of siblings.
# This list is inspected in `axis.draw` by
# `axis._update_label_position`.
for axc in axs:
if axc.xaxis.get_label_position() == pos:
rowspanc = axc.get_subplotspec().rowspan
if (pos == 'top' and rowspan.start == rowspanc.start or
pos == 'bottom' and rowspan.stop == rowspanc.stop):
# grouper for groups of xlabels to align
self._align_label_groups['x'].join(ax, axc)
| lib/matplotlib/figure.py | 240 | matplotlib | {
"docstring": "\n Align the xlabels of subplots in the same subplot column if label\n alignment is being done automatically (i.e. the label position is\n not manually set).\n\n Alignment persists for draw events after this is called.\n\n If a label is on the bottom, it is aligned with labels on Axes that\n also have their label on the bottom and that have the same\n bottom-most subplot row. If the label is on the top,\n it is aligned with labels on Axes with the same top-most row.\n\n Parameters\n ----------\n axs : list of `~matplotlib.axes.Axes`\n Optional list of (or ndarray) `~matplotlib.axes.Axes`\n to align the xlabels.\n Default is to align all Axes on the figure.\n\n See Also\n --------\n matplotlib.figure.Figure.align_ylabels\n matplotlib.figure.Figure.align_labels\n\n Notes\n -----\n This assumes that ``axs`` are from the same `.GridSpec`, so that\n their `.SubplotSpec` positions correspond to figure positions.\n\n Examples\n --------\n Example with rotated xtick labels::\n\n fig, axs = plt.subplots(1, 2)\n for tick in axs[0].get_xticklabels():\n tick.set_rotation(55)\n axs[0].set_xlabel('XLabel 0')\n axs[1].set_xlabel('XLabel 1')\n fig.align_xlabels()\n ",
"language": "en",
"n_whitespaces": 422,
"n_words": 156,
"vocab_size": 99
} | 121 | Python | 82 | c73f4c455514cf5422d27bf38c93250de8316b21 | figure.py | 109,453 | 14 | 143 | align_xlabels | https://github.com/matplotlib/matplotlib.git | Merge SubplotBase into AxesBase. | 386 | 0 | 23,596 | 17 |
|
3 | 17 | def _pad_spatial_dims(x, x_shape, padding, is_conv1d):
# Add empty padding for batch and feature dimensions.
no_pad = ((0, 0),)
padding = tuple(padding)
if is_conv1d:
padding = no_pad + padding + no_pad
# Add empty padding for dummy dimension, too.
padding = no_pad + padding + no_pad + no_pad
else:
padding = no_pad + padding + no_pad
x = tf.pad(x, padding)
assert len(x.shape) == len(padding)
x_shape = tuple(p0 + xs + p1 for xs, (p0, p1) in zip(x_shape, padding))
jax2tf._assert_matching_abstract_shape(x, x_shape)
return x, x_shape
| jax/experimental/jax2tf/impl_no_xla.py | 178 | jax | {
"docstring": "Pads `x` using `padding`, which specifies padding for the spatial dimensions.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | 82 | Python | 47 | b22121c0c1579dd5108825becac42d5db1b29276 | impl_no_xla.py | 121,203 | 13 | 115 | _pad_spatial_dims | https://github.com/google/jax.git | [jax2tf] Fixes for handling of convolutions with shape_polymorphism and enable_xla=False
Issue: #11402
Due to a typo we were running no tests for convolutions with shape
polymorphism and enable_xla=False.
Added a few more tests from #11402 (Thanks @sdenton4).
The main issue was that in presence of shape polymorphism we cannot
just use `x.shape` for a TF value `x` because it will contain `None`
in the place of unknown dimensions. We must use instead the JAX
abstract values.
This does not fix all issues reported in #11402, there is still the
computation of padding or padding="SAME". Commented out the
corresponding test. | 105 | 0 | 27,040 | 11 |
|
31 | 9 | def _intervals(self, sym):
from sympy.solvers.inequalities import _solve_inequality
assert isinstance(self, Piecewise)
| sympy/functions/elementary/piecewise.py | 36 | sympy | {
"docstring": "Return a list of unique tuples, (a, b, e, i), where a and b\n are the lower and upper bounds in which the expression e of\n argument i in self is defined and $a < b$ (when involving\n numbers) or $a \\le b$ when involving symbols.\n\n If there are any relationals not involving sym, or any\n relational cannot be solved for sym, NotImplementedError is\n raised. The calling routine should have removed such\n relationals before calling this routine.\n\n The evaluated conditions will be returned as ranges.\n Discontinuous ranges will be returned separately with\n identical expressions. The first condition that evaluates to\n True will be returned as the last tuple with a, b = -oo, oo.\n ",
"language": "en",
"n_whitespaces": 198,
"n_words": 114,
"vocab_size": 84
} | 10 | Python | 10 | 498015021131af4dbb07eb110e5badaba8250c7b | piecewise.py | 196,246 | 82 | 577 | _intervals | https://github.com/sympy/sympy.git | Updated import locations | 31 | 0 | 47,746 | 7 |
|
4 | 10 | def subword_index(self, word, w):
low = -1
high = -1
for i in range(len(word)-len(w)+1):
if word.subword(i, i+len(w)) == w:
low = i
high = i+len(w)
break
if low == high == -1:
return -1, -1
return low, high
| sympy/combinatorics/pc_groups.py | 133 | sympy | {
"docstring": "\n Returns the start and ending index of a given\n subword in a word.\n\n Parameters\n ==========\n\n word : FreeGroupElement\n word defined on free group elements for a\n polycyclic group.\n w : FreeGroupElement\n subword of a given word, whose starting and\n ending index to be computed.\n\n Returns\n =======\n\n (i, j)\n A tuple containing starting and ending index of ``w``\n in the given word.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.named_groups import SymmetricGroup\n >>> from sympy.combinatorics import free_group\n >>> G = SymmetricGroup(4)\n >>> PcGroup = G.polycyclic_group()\n >>> collector = PcGroup.collector\n >>> F, x1, x2 = free_group(\"x1, x2\")\n >>> word = x2**2*x1**7\n >>> w = x2**2*x1\n >>> collector.subword_index(word, w)\n (0, 3)\n >>> w = x1**7\n >>> collector.subword_index(word, w)\n (2, 9)\n\n ",
"language": "en",
"n_whitespaces": 356,
"n_words": 114,
"vocab_size": 69
} | 38 | Python | 23 | 498015021131af4dbb07eb110e5badaba8250c7b | pc_groups.py | 196,113 | 11 | 83 | subword_index | https://github.com/sympy/sympy.git | Updated import locations | 147 | 0 | 47,613 | 13 |
|
4 | 11 | def _add_unique_metric_name(self, metric_name, metric_fn, output_index):
# For multi-output models, prepend the output names to the metric name.
if len(self.output_names) > 1:
# If we're loading from an already-serialized model, we've already
# prepended the output name, and we don't want to do it again.
#
# Alternatively, we may be receiving a stateless metric (e.g. the string
# "accuracy") rather than a `Metric` object, in which case we want to
# prepend the output name even if we are loading a serialized model.
if not getattr(metric_fn, "_from_serialized", False):
metric_name = "%s_%s" % (
self.output_names[output_index],
metric_name,
)
j = 1
base_metric_name = metric_name
while metric_name in self.metrics_names:
metric_name = "%s_%d" % (base_metric_name, j)
j += 1
return metric_name
| keras/engine/training_v1.py | 127 | keras | {
"docstring": "Makes the metric name unique.\n\n If there are multiple outputs for which the metrics are calculated, the\n metric names have to be made unique by appending an integer.\n\n Args:\n metric_name: Metric name that corresponds to the metric specified by the\n user. For example: 'acc'.\n metric_fn: The Metric object.\n output_index: The index of the model output for which the metric name is\n being added.\n\n Returns:\n string, name of the model's unique metric name\n ",
"language": "en",
"n_whitespaces": 171,
"n_words": 72,
"vocab_size": 48
} | 117 | Python | 80 | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | training_v1.py | 271,980 | 13 | 75 | _add_unique_metric_name | https://github.com/keras-team/keras.git | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 333 | 0 | 80,920 | 14 |
|
1 | 2 | def close(self) -> None:
return
| plugins/convert/writer/opencv.py | 18 | faceswap | {
"docstring": " Does nothing as OpenCV writer does not need a close method ",
"language": "en",
"n_whitespaces": 12,
"n_words": 11,
"vocab_size": 11
} | 5 | Python | 5 | 049314429f71a21e6595e9d27e9e36f6a3479c42 | opencv.py | 101,067 | 3 | 9 | close | https://github.com/deepfakes/faceswap.git | Convert: Add option to output mask separately for draw-transparent | 19 | 0 | 20,504 | 6 |
|
1 | 14 | def get_tables(self) -> Response:
return self.native_query(
str(text(f).bindparams(
bindparam('database', value=self.database, type_=String)
).compile(compile_kwargs={"literal_binds": True}))
)
| mindsdb/integrations/handlers/teradata_handler/teradata_handler.py | 86 | mindsdb | {
"docstring": "\n List all tables in Teradata in the current database\n \n SELECT DataBaseName,\n TableName,\n TableKind\n FROM DBC.TablesV\n WHERE DatabaseName = :database\n AND (TableKind = 'T'\n OR TableKind = 'O'\n OR TableKind = 'Q')\n ",
"language": "en",
"n_whitespaces": 168,
"n_words": 31,
"vocab_size": 24
} | 13 | Python | 13 | 47c5e0ac2d89807f8ff7239d423a3d346bd39a1e | teradata_handler.py | 116,755 | 18 | 51 | get_tables | https://github.com/mindsdb/mindsdb.git | feat: add teradata integration | 71 | 0 | 25,827 | 17 |
|
5 | 28 | def get_project_name(doctype, txt, searchfield, start, page_len, filters):
cond = ""
if filters and filters.get("customer"):
cond = % (
frappe.db.escape(filters.get("customer"))
)
fields = get_fields("Project", ["name", "project_name"])
searchfields = frappe.get_meta("Project").get_search_fields()
searchfields = " or ".join([field + " like %(txt)s" for field in searchfields])
return frappe.db.sql(
.format(
fields=", ".join(["`tabProject`.{0}".format(f) for f in fields]),
cond=cond,
scond=searchfields,
match_cond=get_match_cond(doctype),
start=start,
page_len=page_len,
),
{"txt": "%{0}%".format(txt), "_txt": txt.replace("%", "")},
)
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs | erpnext/controllers/queries.py | 296 | @frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs | erpnext | {
"docstring": "(`tabProject`.customer = %s or\n\t\t\tifnull(`tabProject`.customer,\"\")=\"\") andselect {fields} from `tabProject`\n\t\twhere\n\t\t\t`tabProject`.status not in (\"Completed\", \"Cancelled\")\n\t\t\tand {cond} {scond} {match_cond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tidx desc,\n\t\t\t`tabProject`.name asc\n\t\tlimit {start}, {page_len}",
"language": "en",
"n_whitespaces": 23,
"n_words": 33,
"vocab_size": 32
} | 64 | Python | 54 | 494bd9ef78313436f0424b918f200dab8fc7c20b | queries.py | 65,644 | 29 | 166 | get_project_name | https://github.com/frappe/erpnext.git | style: format code with black | 43 | 1 | 13,966 | 16 |
1 | 16 | async def test_visit_collection_with_private_pydantic(self):
input = PrivatePydantic(x=1)
input._y = 2
input._z = 4
result = await visit_collection(
input, visit_fn=visit_even_numbers, return_data=False
)
assert result is None
assert EVEN == {2, 4}
result = await visit_collection(
input, visit_fn=negative_even_numbers, return_data=True
)
assert result == input
assert result.__private_attributes__ == input.__private_attributes__
breakpoint()
assert result._y == -2
assert result._z == -4
| tests/utilities/test_collections.py | 150 | prefect | {
"docstring": "Check that we successfully capture private pydantic fields",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | 54 | Python | 33 | c33f87fc7e0b6fb4714a88b492e7545f4dbd821f | test_collections.py | 56,293 | 17 | 95 | test_visit_collection_with_private_pydantic | https://github.com/PrefectHQ/prefect.git | get private attrs working | 181 | 0 | 11,499 | 10 |
|
12 | 64 | def load_data(self, ds, basedir, variable_manager=None, loader=None):
# import here to avoid a dependency loop
from ansible.playbook import Playbook
from ansible.playbook.play import Play
# first, we use the original parent method to correctly load the object
# via the load_data/preprocess_data system we normally use for other
# playbook objects
new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
all_vars = self.vars.copy()
if variable_manager:
all_vars.update(variable_manager.get_vars())
templar = Templar(loader=loader, variables=all_vars)
# then we use the object to load a Playbook
pb = Playbook(loader=loader)
file_name = templar.template(new_obj.import_playbook)
# check for FQCN
resource = _get_collection_playbook_path(file_name)
if resource is not None:
playbook = resource[1]
playbook_collection = resource[2]
else:
# not FQCN try path
playbook = file_name
if not os.path.isabs(playbook):
playbook = os.path.join(basedir, playbook)
# might still be collection playbook
playbook_collection = _get_collection_name_from_path(playbook)
if playbook_collection:
# it is a collection playbook, setup default collections
AnsibleCollectionConfig.default_collection = playbook_collection
else:
# it is NOT a collection playbook, setup adjecent paths
AnsibleCollectionConfig.playbook_paths.append(os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict'))))
pb._load_playbook_data(file_name=playbook, variable_manager=variable_manager, vars=self.vars.copy())
# finally, update each loaded playbook entry with any variables specified
# on the included playbook and/or any tags which may have been set
for entry in pb._entries:
# conditional includes on a playbook need a marker to skip gathering
if new_obj.when and isinstance(entry, Play):
entry._included_conditional = new_obj.when[:]
temp_vars = entry.vars.copy()
temp_vars.update(new_obj.vars)
param_tags = temp_vars.pop('tags', None)
if param_tags is not None:
entry.tags.extend(param_tags.split(','))
entry.vars = temp_vars
entry.tags = list(set(entry.tags).union(new_obj.tags))
if entry._included_path is None:
entry._included_path = os.path.dirname(playbook)
# Check to see if we need to forward the conditionals on to the included
# plays. If so, we can take a shortcut here and simply prepend them to
# those attached to each block (if any)
if new_obj.when:
for task_block in (entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks):
task_block._when = new_obj.when[:] + task_block.when[:]
return pb
| lib/ansible/playbook/playbook_include.py | 620 | ansible | {
"docstring": "\n Overrides the base load_data(), as we're actually going to return a new\n Playbook() object rather than a PlaybookInclude object\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 17
} | 285 | Python | 164 | 43153c58310d02223f2cb0964f4255ba1ac4ed53 | playbook_include.py | 267,584 | 40 | 384 | load_data | https://github.com/ansible/ansible.git | `FieldAttribute`s as descriptors (#73908) | 829 | 0 | 78,964 | 18 |
|
1 | 2 | def arraydtick(self):
return self["arraydtick"]
| packages/python/plotly/plotly/graph_objs/carpet/_aaxis.py | 22 | plotly.py | {
"docstring": "\n The stride between grid lines along the axis\n\n The 'arraydtick' property is a integer and may be specified as:\n - An int (or float that will be cast to an int)\n in the interval [1, 9223372036854775807]\n\n Returns\n -------\n int\n ",
"language": "en",
"n_whitespaces": 102,
"n_words": 39,
"vocab_size": 35
} | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _aaxis.py | 229,084 | 2 | 11 | arraydtick | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 60,757 | 7 |
|
6 | 17 | def to_dict(self) -> Dict[str, any]:
input_features = [feat for feat in self.input_features.to_list() if feat["active"]]
output_features = [feat for feat in self.output_features.to_list() if feat["active"]]
config_dict = {
"model_type": self.model_type,
"input_features": input_features,
"output_features": output_features,
"trainer": self.trainer.to_dict(),
"preprocessing": self.preprocessing.to_dict(),
"hyperopt": self.hyperopt,
"defaults": self.defaults.to_dict(),
}
if self.combiner is not None:
config_dict["combiner"] = self.combiner.to_dict()
return convert_submodules(config_dict)
| ludwig/schema/model_config.py | 219 | ludwig | {
"docstring": "This method converts the current config object into an equivalent dictionary representation for the\n parts of the codebase that use the dictionary representation of the config.\n\n Returns:\n Config Dictionary\n ",
"language": "en",
"n_whitespaces": 61,
"n_words": 29,
"vocab_size": 22
} | 51 | Python | 41 | e2dbab9adf85a018bc6279c9538d995a2227f619 | model_config.py | 8,745 | 21 | 132 | to_dict | https://github.com/ludwig-ai/ludwig.git | fix: Restrict allowed top-level config keys (#2826)
* fix
* add ludwig_version
* prints
* remove extra | 188 | 0 | 1,495 | 11 |
|
2 | 18 | def local_rank() -> int:
| python/ray/train/train_loop_utils.py | 57 | """Get the local rank of thisrank of the worker on its..block:: | ray | {
"docstring": "Get the local rank of this worker (rank of the worker on its node).\n\n .. code-block:: python\n\n import time\n from ray import train\n",
"language": "en",
"n_whitespaces": 39,
"n_words": 23,
"vocab_size": 19
} | 4 | Python | 4 | 0e8eb8aedb3e158da8c3e7378e818ce87ca7813e | train_loop_utils.py | 128,345 | 28 | 47 | local_rank | https://github.com/ray-project/ray.git | [AIR] More Train and Tune session deprecations (#28856)
Signed-off-by: Amog Kamsetty [email protected]
Finish marking train. and tune. session APIs as deprecated | 7 | 4 | 28,679 | 10 |
13 | 46 | def _build_test_case(self, task_data, host_data):
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
duration = host_data.finish - task_data.start
if self._task_relative_path and task_data.path:
junit_classname = os.path.relpath(task_data.path, self._task_relative_path)
else:
junit_classname = task_data.path
if self._replace_out_of_tree_path is not None and junit_classname.startswith('../'):
junit_classname = self._replace_out_of_tree_path + os.path.basename(junit_classname)
if self._task_class == 'true':
junit_classname = re.sub(r'\.yml:[0-9]+$', '', junit_classname)
if host_data.status == 'included':
return TestCase(name=name, classname=junit_classname, time=duration, system_out=str(host_data.result))
res = host_data.result._result
rc = res.get('rc', 0)
dump = self._dump_results(res, indent=0)
dump = self._cleanse_string(dump)
if host_data.status == 'ok':
return TestCase(name=name, classname=junit_classname, time=duration, system_out=dump)
test_case = TestCase(name=name, classname=junit_classname, time=duration)
if host_data.status == 'failed':
if 'exception' in res:
message = res['exception'].strip().split('\n')[-1]
output = res['exception']
test_case.errors.append(TestError(message=message, output=output))
elif 'msg' in res:
message = res['msg']
test_case.failures.append(TestFailure(message=message, output=dump))
else:
test_case.failures.append(TestFailure(message='rc=%s' % rc, output=dump))
elif host_data.status == 'skipped':
if 'skip_reason' in res:
message = res['skip_reason']
else:
message = 'skipped'
test_case.skipped = message
return test_case
| lib/ansible/plugins/callback/junit.py | 589 | ansible | {
"docstring": " build a TestCase from the given TaskData and HostData ",
"language": "en",
"n_whitespaces": 10,
"n_words": 9,
"vocab_size": 9
} | 138 | Python | 81 | fbb5d56bd274c44b193cb95f0230b9352f62aab2 | junit.py | 266,525 | 37 | 360 | _build_test_case | https://github.com/ansible/ansible.git | ansible-test - Use relative paths in junit output. (#76871)
* ansible-test - Use relative paths in junit output.
Also fix a traceback in the junit callback during automatic fact gathering.
* ansible-test - Handle out-of-tree JUnit paths. | 509 | 0 | 78,457 | 17 |
|
23 | 114 | def model_fn(features, labels, mode, params, config):
del config
hparams = params
length = features.length
spec = features.spec
is_training = mode == tf_estimator.ModeKeys.TRAIN
if is_training:
onset_labels = labels.onsets
offset_labels = labels.offsets
velocity_labels = labels.velocities
frame_labels = labels.labels
frame_label_weights = labels.label_weights
if hparams.stop_activation_gradient and not hparams.activation_loss:
raise ValueError(
'If stop_activation_gradient is true, activation_loss must be true.')
losses = {}
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
with tf.variable_scope('onsets'):
onset_outputs = acoustic_model(
spec,
hparams,
lstm_units=hparams.onset_lstm_units,
lengths=length)
onset_probs = slim.fully_connected(
onset_outputs,
constants.MIDI_PITCHES,
activation_fn=tf.sigmoid,
scope='onset_probs')
# onset_probs_flat is used during inference.
onset_probs_flat = flatten_maybe_padded_sequences(onset_probs, length)
if is_training:
onset_labels_flat = flatten_maybe_padded_sequences(onset_labels, length)
onset_losses = tf_utils.log_loss(onset_labels_flat, onset_probs_flat)
tf.losses.add_loss(tf.reduce_mean(onset_losses))
losses['onset'] = onset_losses
with tf.variable_scope('offsets'):
offset_outputs = acoustic_model(
spec,
hparams,
lstm_units=hparams.offset_lstm_units,
lengths=length)
offset_probs = slim.fully_connected(
offset_outputs,
constants.MIDI_PITCHES,
activation_fn=tf.sigmoid,
scope='offset_probs')
# offset_probs_flat is used during inference.
offset_probs_flat = flatten_maybe_padded_sequences(offset_probs, length)
if is_training:
offset_labels_flat = flatten_maybe_padded_sequences(
offset_labels, length)
offset_losses = tf_utils.log_loss(offset_labels_flat, offset_probs_flat)
tf.losses.add_loss(tf.reduce_mean(offset_losses))
losses['offset'] = offset_losses
with tf.variable_scope('velocity'):
velocity_outputs = acoustic_model(
spec,
hparams,
lstm_units=hparams.velocity_lstm_units,
lengths=length)
velocity_values = slim.fully_connected(
velocity_outputs,
constants.MIDI_PITCHES,
activation_fn=None,
scope='onset_velocities')
velocity_values_flat = flatten_maybe_padded_sequences(
velocity_values, length)
if is_training:
velocity_labels_flat = flatten_maybe_padded_sequences(
velocity_labels, length)
velocity_loss = tf.reduce_sum(
onset_labels_flat *
tf.square(velocity_labels_flat - velocity_values_flat),
axis=1)
tf.losses.add_loss(tf.reduce_mean(velocity_loss))
losses['velocity'] = velocity_loss
with tf.variable_scope('frame'):
if not hparams.share_conv_features:
# TODO(eriche): this is broken when hparams.frame_lstm_units > 0
activation_outputs = acoustic_model(
spec,
hparams,
lstm_units=hparams.frame_lstm_units,
lengths=length)
activation_probs = slim.fully_connected(
activation_outputs,
constants.MIDI_PITCHES,
activation_fn=tf.sigmoid,
scope='activation_probs')
else:
activation_probs = slim.fully_connected(
onset_outputs,
constants.MIDI_PITCHES,
activation_fn=tf.sigmoid,
scope='activation_probs')
probs = []
if hparams.stop_onset_gradient:
probs.append(tf.stop_gradient(onset_probs))
else:
probs.append(onset_probs)
if hparams.stop_activation_gradient:
probs.append(tf.stop_gradient(activation_probs))
else:
probs.append(activation_probs)
if hparams.stop_offset_gradient:
probs.append(tf.stop_gradient(offset_probs))
else:
probs.append(offset_probs)
combined_probs = tf.concat(probs, 2)
if hparams.combined_lstm_units > 0:
outputs = lstm_layer(
combined_probs,
hparams.combined_lstm_units,
lengths=length if hparams.use_lengths else None,
stack_size=hparams.combined_rnn_stack_size,
use_cudnn=hparams.use_cudnn,
bidirectional=hparams.bidirectional)
else:
outputs = combined_probs
frame_probs = slim.fully_connected(
outputs,
constants.MIDI_PITCHES,
activation_fn=tf.sigmoid,
scope='frame_probs')
frame_probs_flat = flatten_maybe_padded_sequences(frame_probs, length)
if is_training:
frame_labels_flat = flatten_maybe_padded_sequences(frame_labels, length)
frame_label_weights_flat = flatten_maybe_padded_sequences(
frame_label_weights, length)
if hparams.weight_frame_and_activation_loss:
frame_loss_weights = frame_label_weights_flat
else:
frame_loss_weights = None
frame_losses = tf_utils.log_loss(
frame_labels_flat, frame_probs_flat, weights=frame_loss_weights)
tf.losses.add_loss(tf.reduce_mean(frame_losses))
losses['frame'] = frame_losses
if hparams.activation_loss:
if hparams.weight_frame_and_activation_loss:
activation_loss_weights = frame_label_weights
else:
activation_loss_weights = None
activation_losses = tf_utils.log_loss(
frame_labels_flat,
flatten_maybe_padded_sequences(activation_probs, length),
weights=activation_loss_weights)
tf.losses.add_loss(tf.reduce_mean(activation_losses))
losses['activation'] = activation_losses
frame_predictions = frame_probs_flat > hparams.predict_frame_threshold
onset_predictions = onset_probs_flat > hparams.predict_onset_threshold
offset_predictions = offset_probs_flat > hparams.predict_offset_threshold
frame_predictions = tf.expand_dims(frame_predictions, axis=0)
onset_predictions = tf.expand_dims(onset_predictions, axis=0)
offset_predictions = tf.expand_dims(offset_predictions, axis=0)
velocity_values = tf.expand_dims(velocity_values_flat, axis=0)
metrics_values = metrics.define_metrics(
frame_probs=frame_probs,
onset_probs=onset_probs,
frame_predictions=frame_predictions,
onset_predictions=onset_predictions,
offset_predictions=offset_predictions,
velocity_values=velocity_values,
length=features.length,
sequence_label=labels.note_sequence,
frame_labels=labels.labels,
sequence_id=features.sequence_id,
hparams=hparams)
for label, loss_collection in losses.items():
loss_label = 'losses/' + label
metrics_values[loss_label] = loss_collection
def predict_sequence():
| magenta/models/onsets_frames_transcription/model.py | 1,442 | magenta | {
"docstring": "Builds the acoustic model.Convert frame predictions into a sequence (TF).",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | 385 | Python | 217 | f73ff0c91f0159a925fb6547612199bb7c915248 | model.py | 173,538 | 228 | 1,401 | model_fn | https://github.com/magenta/magenta.git | Explicitly import estimator from tensorflow as a separate import instead of accessing it via tf.estimator and depend on the tensorflow estimator target.
PiperOrigin-RevId: 436568278 | 1,488 | 0 | 40,851 | 18 |
|
1 | 12 | async def test_reload_entry_with_new_config(hass, tmp_path):
config_old = [{"name": "test_old1", "command_topic": "test-topic_old"}]
config_yaml_new = {
"mqtt": {
"light": [{"name": "test_new_modern", "command_topic": "test-topic_new"}]
},
# Test deprecated YAML configuration under the platform key
# Scheduled to be removed in HA core 2022.12
"light": [
{
"platform": "mqtt",
"name": "test_new_legacy",
"command_topic": "test-topic_new",
}
],
}
await help_test_setup_manual_entity_from_yaml(hass, "light", config_old)
assert hass.states.get("light.test_old1") is not None
await help_test_entry_reload_with_new_config(hass, tmp_path, config_yaml_new)
assert hass.states.get("light.test_old1") is None
assert hass.states.get("light.test_new_modern") is not None
assert hass.states.get("light.test_new_legacy") is not None
@patch("homeassistant.components.mqtt.PLATFORMS", [Platform.LIGHT]) | tests/components/mqtt/test_init.py | 254 | @patch("homeassistant.components.mqtt.PLATFORMS", [Platform.LIGHT]) | core | {
"docstring": "Test reloading the config entry with a new yaml config.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | 80 | Python | 58 | b3a48389789549b3cb1aabd042310137baccc9b9 | test_init.py | 287,034 | 20 | 127 | test_reload_entry_with_new_config | https://github.com/home-assistant/core.git | Refactor MQTT tests to use modern platform schema part 1 (#77387)
* Tests alarm_control_panel
* Tests binary_sensor
* Tests button
* Tests camera
* Tests Climate + corrections default config
* Tests cover
* Tests device_tracker
* Tests fan
* Tests humidifier
* Fix test_supported_features test fan
* Tests init
* Tests legacy vacuum
* Derive DEFAULT_CONFIG_LEGACY from DEFAULT_CONFIG
* Commit suggestion comment changes | 229 | 1 | 86,227 | 14 |
2 | 7 | def suspend(self):
if POSIX:
self._send_signal(signal.SIGSTOP)
else: # pragma: no cover
self._proc.suspend()
| psutil/__init__.py | 48 | psutil | {
"docstring": "Suspend process execution with SIGSTOP pre-emptively checking\n whether PID has been reused.\n On Windows this has the effect of suspending all process threads.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 23,
"vocab_size": 21
} | 11 | Python | 11 | 471b19d2aa799cd73bded23379e864dd35bec2b6 | __init__.py | 188,993 | 5 | 26 | suspend | https://github.com/giampaolo/psutil.git | Fix typos | 55 | 0 | 45,957 | 11 |
|
2 | 17 | def _simulate_installation_of(to_install, package_set):
# type: (List[InstallRequirement], PackageSet) -> Set[NormalizedName]
# Keep track of packages that were installed
installed = set()
# Modify it as installing requirement_set would (assuming no errors)
for inst_req in to_install:
abstract_dist = make_distribution_for_install_requirement(inst_req)
dist = abstract_dist.get_pkg_resources_distribution()
assert dist is not None
name = canonicalize_name(dist.key)
package_set[name] = PackageDetails(dist.version, dist.requires())
installed.add(name)
return installed
| .venv/lib/python3.8/site-packages/pip/_internal/operations/check.py | 115 | transferlearning | {
"docstring": "Computes the version of packages after installing to_install.\n ",
"language": "en",
"n_whitespaces": 11,
"n_words": 8,
"vocab_size": 8
} | 55 | Python | 46 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | check.py | 60,923 | 10 | 69 | _simulate_installation_of | https://github.com/jindongwang/transferlearning.git | upd; format | 118 | 0 | 12,346 | 12 |
|
1 | 27 | def test_title_column(self):
root_page = Page.objects.filter(depth=2).first()
blog = Site.objects.create(
hostname="blog.example.com", site_name="My blog", root_page=root_page
)
gallery = Site.objects.create(
hostname="gallery.example.com", site_name="My gallery", root_page=root_page
)
data = [blog, gallery]
table = Table(
[
TitleColumn(
"hostname",
url_name="wagtailsites:edit",
link_classname="choose-site",
link_attrs={"data-chooser": "yes"},
),
Column("site_name", label="Site name"),
],
data,
)
html = self.render_component(table)
self.assertHTMLEqual(
html,
% (blog.pk, gallery.pk),
)
| wagtail/admin/tests/ui/test_tables.py | 223 | wagtail | {
"docstring": "\n <table class=\"listing\">\n <thead>\n <tr><th>Hostname</th><th>Site name</th></tr>\n </thead>\n <tbody>\n <tr>\n <td class=\"title\">\n <div class=\"title-wrapper\">\n <a href=\"/admin/sites/%d/\" class=\"choose-site\" data-chooser=\"yes\">blog.example.com</a>\n </div>\n </td>\n <td>My blog</td>\n </tr>\n <tr>\n <td class=\"title\">\n <div class=\"title-wrapper\">\n <a href=\"/admin/sites/%d/\" class=\"choose-site\" data-chooser=\"yes\">gallery.example.com</a>\n </div>\n </td>\n <td>My gallery</td>\n </tr>\n </tbody>\n </table>\n ",
"language": "en",
"n_whitespaces": 530,
"n_words": 37,
"vocab_size": 25
} | 51 | Python | 40 | 5994cc43dfc5cc1ed891ab78eff3a3bcf56f6830 | test_tables.py | 77,580 | 51 | 136 | test_title_column | https://github.com/wagtail/wagtail.git | Allow passing arbitrary link attributes to TitleColumn | 337 | 0 | 16,677 | 15 |
|
1 | 2 | def silence_transformers_logs(from_pretrained_func):
| haystack/modeling/model/language_model.py | 13 | haystack | {
"docstring": "\n Wrapper that raises the log level of Transformers to\n ERROR to hide some unnecessary warnings\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 15,
"vocab_size": 14
} | 2 | Python | 2 | a59bca366174d9c692fa19750c24d65f47660ef7 | language_model.py | 256,238 | 4 | 15 | silence_transformers_logs | https://github.com/deepset-ai/haystack.git | Apply black formatting (#2115)
* Testing black on ui/
* Applying black on docstores
* Add latest docstring and tutorial changes
* Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too
* Remove comments
* Relax constraints on pydoc-markdown
* Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade
* Fix a couple of bugs
* Add a type: ignore that was missing somehow
* Give path to black
* Apply Black
* Apply Black
* Relocate a couple of type: ignore
* Update documentation
* Make Linux CI run after applying Black
* Triggering Black
* Apply Black
* Remove dependency, does not work well
* Remove manually double trailing commas
* Update documentation
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> | 5 | 0 | 74,819 | 6 |
|
1 | 3 | def num_columns(self):
return self.table.num_columns
| src/datasets/table.py | 22 | datasets | {
"docstring": "\n Number of columns in this table.\n\n Returns:\n int:\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 8,
"vocab_size": 8
} | 4 | Python | 4 | e35be138148333078284b942ccc9ed7b1d826f97 | table.py | 104,392 | 2 | 12 | num_columns | https://github.com/huggingface/datasets.git | Update docs to new frontend/UI (#3690)
* WIP: update docs to new UI
* make style
* Rm unused
* inject_arrow_table_documentation __annotations__
* hasattr(arrow_table_method, "__annotations__")
* Update task_template.rst
* Codeblock PT-TF-SPLIT
* Convert loading scripts
* Convert docs to mdx
* Fix mdx
* Add <Tip>
* Convert mdx tables
* Fix codeblock
* Rm unneded hashlinks
* Update index.mdx
* Redo dev change
* Rm circle ci `build_doc` & `deploy_doc`
* Rm unneeded files
* Update docs reamde
* Standardize to `Example::`
* mdx logging levels doc
* Table properties inject_arrow_table_documentation
* ``` to ```py mdx
* Add Tips mdx
* important,None -> <Tip warning={true}>
* More misc
* Center imgs
* Update instllation page
* `setup.py` docs section
* Rm imgs since they are in hf.co
* Update docs/source/access.mdx
Co-authored-by: Steven Liu <[email protected]>
* Update index mdx
* Update docs/source/access.mdx
Co-authored-by: Steven Liu <[email protected]>
* just `Dataset` obj
* Addedversion just italics
* Update ReadInstruction doc example syntax
* Change docstring for `prepare_for_task`
* Chore
* Remove `code` syntax from headings
* Rm `code` syntax from headings
* Hashlink backward compatability
* S3FileSystem doc
* S3FileSystem doc updates
* index.mdx updates
* Add darkmode gifs
* Index logo img css classes
* Index mdx dataset logo img size
* Docs for DownloadMode class
* Doc DownloadMode table
* format docstrings
* style
* Add doc builder scripts (#3790)
* add doc builder scripts
* fix docker image
* Docs new UI actions no self hosted (#3793)
* No self hosted
* replace doc injection by actual docstrings
* Docstring formatted
Co-authored-by: Quentin Lhoest <[email protected]>
Co-authored-by: Mishig Davaadorj <[email protected]>
Co-authored-by: Lysandre Debut <[email protected]>
Co-authored-by: Mishig Davaadorj <[email protected]>
* Rm notebooks from docs actions since they dont exi
* Update tsting branch
* More docstring
* Chore
* bump up node version
* bump up node
* ``` -> ```py for audio_process.mdx
* Update .github/workflows/build_documentation.yml
Co-authored-by: Quentin Lhoest <[email protected]>
* Uodate dev doc build
* remove run on PR
* fix action
* Fix gh doc workflow
* forgot this change when merging master
* Update build doc
Co-authored-by: Steven Liu <[email protected]>
Co-authored-by: Quentin Lhoest <[email protected]>
Co-authored-by: Quentin Lhoest <[email protected]>
Co-authored-by: Lysandre Debut <[email protected]> | 18 | 0 | 21,829 | 7 |
|
2 | 15 | def compute(self) -> Tensor:
# self.total maps to the number of observations in preds/target computed during update()
if self.total <= 1:
logger.warning(
)
return torch.tensor(float("nan"))
return _r2_score_compute(
self.sum_squared_error, self.sum_error, self.residual, self.total, self.adjusted, self.multioutput
)
| ludwig/modules/metric_modules.py | 94 | ludwig | {
"docstring": "Computes r2 score over the metric states.R-squared (r2) is not defined for one sample. It needs at least two samples. Returning NaN.",
"language": "en",
"n_whitespaces": 21,
"n_words": 22,
"vocab_size": 22
} | 34 | Python | 31 | dfdc98caa35f38665dbe045ccff431715e976841 | metric_modules.py | 7,263 | 10 | 58 | compute | https://github.com/ludwig-ai/ludwig.git | Update R2 score to handle single sample computation (#2235)
* Update R2 scores to handle single sample computation | 129 | 0 | 1,171 | 12 |
|
4 | 23 | def convert_to_legacy_optimizer(optimizer):
if not isinstance(optimizer, base_optimizer.Optimizer):
raise ValueError(
"`convert_to_legacy_optimizer` should only be called "
"on instances of `tf.keras.optimizers.Optimizer`, but "
f"received {optimizer} of type {type(optimizer)}."
)
optimizer_name = optimizer.__class__.__name__.lower()
config = optimizer.get_config()
# Remove fields that only exist in experimental optimizer.
keys_to_remove = [
"weight_decay",
"use_ema",
"ema_momentum",
"ema_overwrite_frequency",
"jit_compile",
"is_legacy_optimizer",
]
for key in keys_to_remove:
config.pop(key, None)
# Learning rate can be a custom LearningRateSchedule, which is stored as
# a dict in config, and cannot be deserialized.
if isinstance(
optimizer._learning_rate, learning_rate_schedule.LearningRateSchedule
):
config["learning_rate"] = optimizer._learning_rate
legacy_optimizer_config = {
"class_name": optimizer_name,
"config": config,
}
return deserialize(legacy_optimizer_config, use_legacy_optimizer=True)
@keras_export("keras.optimizers.get") | keras/optimizers/__init__.py | 220 | @keras_export("keras.optimizers.get") | keras | {
"docstring": "Convert experimental optimizer to legacy optimizer.\n\n This function takes in a `tf.keras.optimizers.experimental.Optimizer`\n instance and converts it to the corresponding\n `tf.keras.optimizers.legacy.Optimizer` instance.\n For example, `tf.keras.optimizers.experimental.Adam(...)` to\n `tf.keras.optimizers.legacy.Adam(...)`.\n\n Args:\n optimizer: An instance of `tf.keras.optimizers.experimental.Optimizer`.\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 32,
"vocab_size": 29
} | 98 | Python | 82 | 5a105aadbdc6fde2c2529280c4789864adbb81c7 | __init__.py | 280,501 | 28 | 113 | convert_to_legacy_optimizer | https://github.com/keras-team/keras.git | Move new optimizer out of optimizer_experimental/ directory.
PiperOrigin-RevId: 488998585 | 266 | 1 | 83,358 | 14 |
1 | 13 | def test_non_existing_file_download(self) -> None:
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
response = self.client_get(
f"http://{hamlet.realm.host}/user_uploads/{hamlet.realm_id}/ff/gg/abc.py"
)
self.assertEqual(response.status_code, 404)
self.assert_in_response("File not found.", response)
| zerver/tests/test_upload.py | 102 | zulip | {
"docstring": "\n Trying to download a file that was never uploaded will return a json_error\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 12
} | 19 | Python | 18 | 5ff4754090259dea52c0554d82eeaf601490f383 | test_upload.py | 84,031 | 11 | 49 | test_non_existing_file_download | https://github.com/zulip/zulip.git | test_upload: Fix some URLs to uploaded files.
Using http://localhost:9991 is incorrect - e.g. messages sent with file
urls constructed trigger do_claim_attachments to be called with empty
list in potential_path_ids.
realm.host should be used in all these places, like in the other tests
in the file. | 79 | 0 | 17,766 | 12 |
|
1 | 2 | def readable(self):
# type: () -> bool
return True
| .venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py | 17 | transferlearning | {
"docstring": "Return whether the file is readable, which is True.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | 9 | Python | 9 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | lazy_wheel.py | 60,897 | 2 | 8 | readable | https://github.com/jindongwang/transferlearning.git | upd; format | 30 | 0 | 12,329 | 6 |
|
3 | 28 | def _generate_pynsist_config(repo_path, build_path):
print('Generate pynsist configuration')
installer_cfg_path = os.path.join(build_path, 'installer.cfg')
certbot_pkg_path = os.path.join(repo_path, 'certbot')
certbot_version = subprocess.check_output([sys.executable, '-c', 'import certbot; print(certbot.__version__)'],
universal_newlines=True, cwd=certbot_pkg_path).strip()
# If we change the installer name from `certbot-beta-installer-win_amd64.exe`, it should
# also be changed in tools/create_github_release.py
with open(installer_cfg_path, 'w') as file_h:
file_h.write(.format(certbot_version=certbot_version,
installer_suffix='win_amd64' if PYTHON_BITNESS == 64 else 'win32',
python_bitness=PYTHON_BITNESS,
python_version='.'.join(str(item) for item in PYTHON_VERSION)))
return installer_cfg_path
| windows-installer/windows_installer/construct.py | 202 | certbot | {
"docstring": "\\\n[Application]\nname=Certbot\nversion={certbot_version}\nicon=certbot.ico\npublisher=Electronic Frontier Foundation\ntarget=$INSTDIR\\\\run.bat\n\n[Build]\ndirectory=nsis\nnsi_template=template.nsi\ninstaller_name=certbot-beta-installer-{installer_suffix}.exe\n\n[Python]\nversion={python_version}\nbitness={python_bitness}\n\n[Include]\nlocal_wheels=wheels\\\\*.whl\nfiles=run.bat\n renew-up.ps1\n renew-down.ps1\n\n[Command certbot]\nentry_point=certbot.main:main\nextra_preamble=preamble.py\n",
"language": "en",
"n_whitespaces": 15,
"n_words": 25,
"vocab_size": 25
} | 61 | Python | 56 | f251a13f322e10c530897be31aa07a1199061f10 | construct.py | 186,741 | 38 | 118 | _generate_pynsist_config | https://github.com/certbot/certbot.git | Remove Windows 2016 environment, generate 64 bit installer (#9202)
* Remove Windows 2016 environment, generate 64 bit installer
* Add note to changelog
* Use win_amd64 as installer suffix
* Bump PYTHON_BITNESS to 64
* Require 64 bit Windows for the installer_build job
* Update certbot install path
* update windows test name
* Base installer suffix on PYTHON_BITNESS again
* Update changelog to request users uninstall old version | 170 | 0 | 45,615 | 16 |
|
5 | 9 | def get_worker_host(pod_args, pod_is_container, head_is_container):
# Check if the current pod and head are both containerized on the same host
# If so __docker_host__ needs to be advertised as the worker's address to the head
worker_host = (
__docker_host__
if (pod_is_container and (head_is_container or in_docker()))
and host_is_local(pod_args.host)
else pod_args.host
)
return worker_host
| jina/orchestrate/deployments/__init__.py | 65 | jina | {
"docstring": "\n Check if the current pod and head are both containerized on the same host\n If so __docker_host__ needs to be advertised as the worker's address to the head\n\n :param pod_args: arguments of the worker pod\n :param pod_is_container: boolean specifying if pod is to be run in container\n :param head_is_container: boolean specifying if head pod is to be run in container\n :return: host to pass in connection list of the head\n ",
"language": "en",
"n_whitespaces": 120,
"n_words": 70,
"vocab_size": 40
} | 51 | Python | 40 | ef662b529b2a2eecea7bb99759a9f7b9d86d3062 | __init__.py | 12,497 | 8 | 40 | get_worker_host | https://github.com/jina-ai/jina.git | feat: add grpc health checking (#4779) | 137 | 0 | 2,318 | 15 |
|
6 | 36 | def step(self, action_dict):
self.resetted = False
self.steps += 1
logger.debug(
"====> [SUMOTestMultiAgentEnv:step] Episode: %d - Step: %d <====",
self.episodes,
self.steps,
)
dones = {}
dones["__all__"] = False
shuffled_agents = sorted(
action_dict.keys()
) # it may seem not smar to sort something that
# may need to be shuffled afterwards, but it
# is a matter of consistency instead of using
# whatever insertion order was used in the dict
if self._config["scenario_config"]["agent_rnd_order"]:
# randomize the agent order to minimize SUMO's
# insertion queues impact
logger.debug("Shuffling the order of the agents.")
self.rndgen.shuffle(shuffled_agents) # in-place shuffle
# Take action
for agent in shuffled_agents:
self.agents[agent].step(action_dict[agent], self.simulation)
logger.debug("Before SUMO")
ongoing_simulation = self.simulation.step(
until_end=False, agents=set(action_dict.keys())
)
logger.debug("After SUMO")
# end of the episode
if not ongoing_simulation:
logger.info("Reached the end of the SUMO simulation.")
dones["__all__"] = True
obs, rewards, infos = {}, {}, {}
for agent in action_dict:
# check for collisions
if self.simulation.collisions[agent] > 0:
# punish the agent and remove it from the simulation
dones[agent] = True
obs[agent] = [0, 0]
rewards[agent] = -self.agents[agent].config["max_speed"]
# infos[agent] = "Collision"
self.simulation.traci_handler.remove(agent, reason=tc.REMOVE_VAPORIZED)
else:
dones[agent] = agent not in self.simulation.veh_subscriptions
obs[agent] = self.get_observation(agent)
rewards[agent] = self.get_reward(agent)
# infos[agent] = ""
logger.debug("Observations: %s", pformat(obs))
logger.debug("Rewards: %s", pformat(rewards))
logger.debug("Dones: %s", pformat(dones))
logger.debug("Info: %s", pformat(infos))
logger.debug("========================================================")
return obs, rewards, dones, dones, infos
###########################################################################
# ACTIONS & OBSERATIONS SPACE
| rllib/examples/simulators/sumo/marlenvironment.py | 549 | ray | {
"docstring": "\n Returns observations from ready agents.\n\n The returns are dicts mapping from agent_id strings to values. The\n number of agents in the env can vary over time.\n\n Returns\n -------\n obs: New observations for each ready agent.\n rewards: Reward values for each ready agent. If the\n episode is just started, the value will be None.\n dones: Done values for each ready agent. The special key\n \"__all__\" (required) is used to indicate env termination.\n infos: Optional info values for each agent id.\n ",
"language": "en",
"n_whitespaces": 196,
"n_words": 79,
"vocab_size": 56
} | 217 | Python | 136 | 8e680c483ce326cefc62e44f68ab1a6948b1c3d2 | marlenvironment.py | 137,963 | 43 | 329 | step | https://github.com/ray-project/ray.git | [RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369) | 743 | 0 | 31,259 | 15 |
|
8 | 20 | def _peeloff_pi(arg):
r
pi_coeff = S.Zero
rest_terms = []
for a in Add.make_args(arg):
K = a.coeff(S.Pi)
if K and K.is_rational:
pi_coeff += K
else:
rest_terms.append(a)
if pi_coeff is S.Zero:
return arg, S.Zero
m1 = (pi_coeff % S.Half)
m2 = pi_coeff - m1
if m2.is_integer or ((2*m2).is_integer and m2.is_even is False):
return Add(*(rest_terms + [m1*pi])), m2
return arg, S.Zero
| sympy/functions/elementary/trigonometric.py | 197 | sympy | {
"docstring": "\n Split ARG into two parts, a \"rest\" and a multiple of $\\pi$.\n This assumes ARG to be an Add.\n The multiple of $\\pi$ returned in the second position is always a Rational.\n\n Examples\n ========\n\n >>> from sympy.functions.elementary.trigonometric import _peeloff_pi as peel\n >>> from sympy import pi\n >>> from sympy.abc import x, y\n >>> peel(x + pi/2)\n (x, 1/2)\n >>> peel(x + 2*pi/3 + pi*y)\n (x + pi*y + pi/6, 1/2)\n\n ",
"language": "en",
"n_whitespaces": 110,
"n_words": 70,
"vocab_size": 51
} | 58 | Python | 38 | cda8dfe6f45dc5ed394c2f5cda706cd6c729f713 | trigonometric.py | 195,866 | 33 | 124 | _peeloff_pi | https://github.com/sympy/sympy.git | Improved documentation formatting | 141 | 0 | 47,453 | 15 |
|
4 | 9 | def _randint(seed=None):
if seed is None:
return randint
elif isinstance(seed, int):
rng.seed(seed)
return randint
elif is_sequence(seed):
seed = list(seed) # make a copy
seed.reverse()
| sympy/core/random.py | 82 | sympy | {
"docstring": "Return a randint generator.\n\n ``seed`` can be\n\n * None - return randomly seeded generator\n * int - return a generator seeded with the int\n * list - the values to be returned will be taken from the list\n in the order given; the provided list is not modified.\n\n Examples\n ========\n\n >>> from sympy.core.random import _randint\n >>> ri = _randint()\n >>> ri(1, 1000) # doctest: +SKIP\n 999\n >>> ri = _randint(3)\n >>> ri(1, 1000) # doctest: +SKIP\n 238\n >>> ri = _randint([0, 5, 1, 2, 4])\n >>> ri(1, 3), ri(1, 3)\n (1, 2)\n ",
"language": "en",
"n_whitespaces": 148,
"n_words": 92,
"vocab_size": 57
} | 24 | Python | 20 | 092c0c6ea1e6f435a2cddb6e6fe723088b73bd81 | random.py | 197,150 | 13 | 59 | _randint | https://github.com/sympy/sympy.git | Add sympy.core.random to Sphinx | 72 | 0 | 48,353 | 11 |
|
8 | 30 | def _get_curr_status(self) -> Tuple[DeploymentStatusInfo, bool]:
# TODO(edoakes): we could make this more efficient in steady-state by
# having a "healthy" flag that gets flipped if an update or replica
# failure happens.
target_version = self._target_version
target_replica_count = self._target_replicas
all_running_replica_cnt = self._replicas.count(states=[ReplicaState.RUNNING])
running_at_target_version_replica_cnt = self._replicas.count(
states=[ReplicaState.RUNNING], version=target_version
)
failed_to_start_count = self._replica_constructor_retry_counter
failed_to_start_threshold = min(
MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_COUNT, target_replica_count * 3
)
# Got to make a call to complete current deploy() goal after
# start failure threshold reached, while we might still have
# pending replicas in current goal.
if (
failed_to_start_count >= failed_to_start_threshold
and failed_to_start_threshold != 0
):
if running_at_target_version_replica_cnt > 0:
# At least one RUNNING replica at target state, partial
# success; We can stop tracking constructor failures and
# leave it to the controller to fully scale to target
# number of replicas and only return as completed once
# reached target replica count
self._replica_constructor_retry_counter = -1
else:
return (
DeploymentStatusInfo(
status=DeploymentStatus.FAILED,
message=(
"The Deployment constructor failed "
f"{failed_to_start_count} times in a row. See "
"logs for details."
),
),
False,
)
# If we have pending ops, the current goal is *not* ready.
if (
self._replicas.count(
states=[
ReplicaState.STARTING,
ReplicaState.UPDATING,
ReplicaState.RECOVERING,
ReplicaState.STOPPING,
]
)
== 0
):
# Check for deleting.
if target_replica_count == 0 and all_running_replica_cnt == 0:
return DeploymentStatusInfo(status=DeploymentStatus.UPDATING), True
# Check for a non-zero number of deployments.
elif target_replica_count == running_at_target_version_replica_cnt:
return DeploymentStatusInfo(status=DeploymentStatus.RUNNING), False
return (
DeploymentStatusInfo(
status=DeploymentStatus.UPDATING,
message=(
f"Running replicas of target version: "
f"{running_at_target_version_replica_cnt}, target "
"replicas: {target_replica_count}"
),
),
False,
)
| python/ray/serve/deployment_state.py | 356 | ray | {
"docstring": "Get the current deployment status.\n\n Checks the difference between the target vs. running replica count for\n the target version.\n\n TODO(edoakes): we should report the status as FAILED if replicas are\n repeatedly failing health checks. Need a reasonable heuristic here.\n\n Returns:\n (DeploymentStatusInfo, was_deleted)\n ",
"language": "en",
"n_whitespaces": 95,
"n_words": 42,
"vocab_size": 37
} | 248 | Python | 151 | 48adb6f7bb335b28fb0fb0d1190bd6c5dfc8ddfa | deployment_state.py | 144,666 | 66 | 216 | _get_curr_status | https://github.com/ray-project/ray.git | [serve] Introduce DeploymentStatus, poll for statuses instead of using async goals (#22121) | 1,143 | 0 | 33,279 | 18 |
|
5 | 13 | def _batch_format_to_use(cls) -> BatchFormat:
has_pandas_implemented = cls._predict_pandas != Predictor._predict_pandas
has_numpy_implemented = cls._predict_numpy != Predictor._predict_numpy
if has_pandas_implemented and has_numpy_implemented:
return cls.preferred_batch_format()
elif has_pandas_implemented:
return BatchFormat.PANDAS
elif has_numpy_implemented:
return BatchFormat.NUMPY
else:
raise NotImplementedError(
f"Predictor {cls.__name__} must implement at least one of "
"`_predict_pandas` and `_predict_numpy`."
)
| python/ray/train/predictor.py | 109 | ray | {
"docstring": "Determine the batch format to use for the predictor.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 8
} | 44 | Python | 36 | 326d84f1149319809191e7887155df7f04f6f46a | predictor.py | 136,394 | 15 | 60 | _batch_format_to_use | https://github.com/ray-project/ray.git | [AIR][Predictor] Enable numpy based predictor (#28917)
Co-authored-by: Clark Zinzow <[email protected]>
Co-authored-by: Amog Kamsetty <[email protected]> | 178 | 0 | 30,905 | 14 |
|
3 | 9 | async def read(self) -> discord.Message:
msg = await self.queue.get()
if msg is None and self.expired:
raise ChannelExpiredException()
return msg
| bot/channel_handlers.py | 61 | Open-Assistant | {
"docstring": "Call this method to read the next message from the user in the handler method.",
"language": "en",
"n_whitespaces": 14,
"n_words": 15,
"vocab_size": 13
} | 19 | Python | 17 | 3205491166e190512608bf01754815cadae47a92 | channel_handlers.py | 216,680 | 6 | 35 | read | https://github.com/LAION-AI/Open-Assistant.git | add channel handler async msg routing | 58 | 0 | 54,675 | 10 |
|
2 | 11 | def yamlcheck(python):
result = json.loads(raw_command([python.path, os.path.join(ANSIBLE_TEST_TARGET_TOOLS_ROOT, 'yamlcheck.py')], capture=True)[0])
if not result['yaml']:
return None
return result['cloader']
| test/lib/ansible_test/_internal/util_common.py | 88 | ansible | {
"docstring": "Return True if PyYAML has libyaml support, False if it does not and None if it was not found.",
"language": "en",
"n_whitespaces": 18,
"n_words": 19,
"vocab_size": 15
} | 15 | Python | 14 | d19b506ce8c5ee43865b1cead2246fc07cc8902b | util_common.py | 266,508 | 5 | 53 | yamlcheck | https://github.com/ansible/ansible.git | ansible-test - Clean up future boilerplate. (#76874)
* ansible-test - Clarify need for empty __init__.py
* ansible-test - Update code-smell boilerplate.
* Update code-smell boilerplate for core.
* Update future boilerplate test for ansible-test.
All ansible-test code (except for targets) and core-specific sanity tests now use the same boilerplate.
The test also checks for unwanted `__future__` and `metaclass` boilerplate.
* Relocate target tools to the correct directory.
Several tools used on target Python versions were incorrectly placed in the controller directory. | 34 | 0 | 78,447 | 15 |
|
1 | 2 | def imag(self):
return self["imag"]
| packages/python/plotly/plotly/graph_objs/_scattersmith.py | 22 | plotly.py | {
"docstring": "\n Sets the imaginary component of the data, in units of\n normalized impedance such that real=1, imag=0 is the center of\n the chart.\n\n The 'imag' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ",
"language": "en",
"n_whitespaces": 108,
"n_words": 44,
"vocab_size": 37
} | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _scattersmith.py | 228,102 | 2 | 11 | imag | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 59,775 | 7 |
|
1 | 13 | def test_get_conn_uri_non_existent_key(self):
conn_id = "test_mysql"
secret_id = 'airflow/connections/test_postgres'
create_param = {
'Name': secret_id,
}
param = {
'SecretId': secret_id,
'SecretString': 'postgresql://airflow:airflow@host:5432/airflow',
}
secrets_manager_backend = SecretsManagerBackend()
secrets_manager_backend.client.create_secret(**create_param)
secrets_manager_backend.client.put_secret_value(**param)
assert secrets_manager_backend.get_conn_uri(conn_id=conn_id) is None
assert secrets_manager_backend.get_connection(conn_id=conn_id) is None
| tests/providers/amazon/aws/secrets/test_secrets_manager.py | 137 | airflow | {
"docstring": "\n Test that if the key with connection ID is not present,\n SecretsManagerBackend.get_connection should return None\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 15
} | 35 | Python | 25 | 79a2f79ff85a740d6b3680215dc2c9a143ddafbb | test_secrets_manager.py | 48,448 | 15 | 77 | test_get_conn_uri_non_existent_key | https://github.com/apache/airflow.git | cleanup usage of `get_connections()`` from test suite (#23757)
The function is deprecated and raises warnings https://github.com/apache/airflow/pull/10192
Replacing the usage with `get_connection()` | 152 | 0 | 9,496 | 9 |
|
1 | 11 | def _create_trial_info(self, expr_dir):
meta = self._build_trial_meta(expr_dir)
self.logger.debug("Create trial for %s" % meta)
trial_record = TrialRecord.from_json(meta)
trial_record.save()
| python/ray/tune/automlboard/backend/collector.py | 68 | ray | {
"docstring": "Create information for given trial.\n\n Meta file will be loaded if exists, and the trial information\n will be saved in db backend.\n\n Args:\n expr_dir: Directory path of the experiment.\n ",
"language": "en",
"n_whitespaces": 68,
"n_words": 29,
"vocab_size": 25
} | 16 | Python | 15 | d2f0c3b2f64b41f6541f6521e98cf3a37577c016 | collector.py | 140,346 | 5 | 39 | _create_trial_info | https://github.com/ray-project/ray.git | Clean up docstyle in data, ml, and tune packages (#25188) | 51 | 0 | 31,930 | 9 |
|
1 | 8 | def image_svg(viz, env):
svgstr =
viz.svg(
svgstr=svgstr,
opts=dict(title='Example of SVG Rendering')
)
| example/components/image.py | 46 | visdom | {
"docstring": "\n <svg height=\"300\" width=\"300\">\n <ellipse cx=\"80\" cy=\"80\" rx=\"50\" ry=\"30\"\n style=\"fill:red;stroke:purple;stroke-width:2\" />\n Sorry, your browser does not support inline SVG.\n </svg>\n ",
"language": "en",
"n_whitespaces": 45,
"n_words": 19,
"vocab_size": 19
} | 12 | Python | 12 | b4115c0337b1bacc876bef1ece97e8fa8b3e2834 | image.py | 106,603 | 12 | 27 | image_svg | https://github.com/fossasia/visdom.git | test: split demo.py into seperate files and functions | 35 | 0 | 22,423 | 12 |
|
1 | 3 | def escape_eid(eid):
return eid.replace('/', '_')
| py/visdom/utils/server_utils.py | 30 | visdom | {
"docstring": "Replace slashes with underscores, to avoid recognizing them\n as directories.\n ",
"language": "en",
"n_whitespaces": 16,
"n_words": 10,
"vocab_size": 10
} | 5 | Python | 5 | 60c90e313e106c0af62339d29eeda0e62823c648 | server_utils.py | 106,776 | 2 | 15 | escape_eid | https://github.com/fossasia/visdom.git | Refactoring server.py into more intentional files | 11 | 0 | 22,436 | 8 |
|
1 | 7 | def size(self) -> int | np.signedinteger:
return np.prod(self.shape)
| dask/array/core.py | 36 | dask | {
"docstring": "\n The total number of blocks in the array.\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 8,
"vocab_size": 8
} | 8 | Python | 8 | 1a760229fc18c0c7df41669a13a329a287215819 | core.py | 156,682 | 5 | 21 | size | https://github.com/dask/dask.git | Only import IPython if type checking (#9230) | 22 | 0 | 36,713 | 8 |
|
1 | 2 | def icicle(self):
return self["icicle"]
| packages/python/plotly/plotly/graph_objs/layout/template/_data.py | 22 | plotly.py | {
"docstring": "\n The 'icicle' property is a tuple of instances of\n Icicle that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.layout.template.data.Icicle\n - A list or tuple of dicts of string/value properties that\n will be passed to the Icicle constructor\n\n Supported dict properties:\n\n Returns\n -------\n tuple[plotly.graph_objs.layout.template.data.Icicle]\n ",
"language": "en",
"n_whitespaces": 131,
"n_words": 48,
"vocab_size": 33
} | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _data.py | 232,553 | 2 | 11 | icicle | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 63,997 | 7 |
|
4 | 17 | def _async_update_rssi(self) -> None:
for (
unique_id,
ibeacon_advertisement,
) in self._last_ibeacon_advertisement_by_unique_id.items():
address = unique_id.split("_")[-1]
if (
service_info := bluetooth.async_last_service_info(
self.hass, address, connectable=False
)
) and service_info.rssi != ibeacon_advertisement.rssi:
ibeacon_advertisement.update_rssi(service_info.rssi)
async_dispatcher_send(
self.hass,
signal_seen(unique_id),
ibeacon_advertisement,
)
| homeassistant/components/ibeacon/coordinator.py | 134 | core | {
"docstring": "Check to see if the rssi has changed and update any devices.\n\n We don't callback on RSSI changes so we need to check them\n here and send them over the dispatcher periodically to\n ensure the distance calculation is update.\n ",
"language": "en",
"n_whitespaces": 67,
"n_words": 39,
"vocab_size": 33
} | 34 | Python | 28 | 02731efc4cb3f7ee94b0c08aecc10e3a5209dbf4 | coordinator.py | 287,742 | 23 | 86 | _async_update_rssi | https://github.com/home-assistant/core.git | Handle iBeacons that broadcast multiple different uuids (#79011)
* Handle iBeacons that broadcast multiple different uuids
* fix flip-flopping between uuids
* naming | 261 | 0 | 86,930 | 13 |
|
1 | 2 | def session():
return Session()
| .venv/lib/python3.8/site-packages/pip/_vendor/requests/sessions.py | 19 | transferlearning | {
"docstring": "\n Returns a :class:`Session` for context-management.\n\n .. deprecated:: 1.0.0\n\n This method has been deprecated since version 1.0.0 and is only kept for\n backwards compatibility. New code should use :class:`~requests.sessions.Session`\n to create a session. This may be removed at a future date.\n\n :rtype: Session\n ",
"language": "en",
"n_whitespaces": 76,
"n_words": 42,
"vocab_size": 37
} | 4 | Python | 4 | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | sessions.py | 63,591 | 2 | 9 | session | https://github.com/jindongwang/transferlearning.git | upd; format | 10 | 0 | 13,405 | 7 |
|
6 | 38 | async def async_update(self, log_errors=True):
if not self._async_client:
self._async_client = get_async_client(
self._hass, verify_ssl=self._verify_ssl
)
rendered_headers = template.render_complex(self._headers, parse_result=False)
rendered_params = template.render_complex(self._params)
_LOGGER.debug("Updating from %s", self._resource)
try:
response = await self._async_client.request(
self._method,
self._resource,
headers=rendered_headers,
params=rendered_params,
auth=self._auth,
content=self._request_data,
timeout=self._timeout,
follow_redirects=True,
)
self.data = response.text
self.headers = response.headers
except httpx.TimeoutException as ex:
if log_errors:
_LOGGER.error("Timeout while fetching data: %s", self._resource)
self.last_exception = ex
self.data = None
self.headers = None
except httpx.RequestError as ex:
if log_errors:
_LOGGER.error(
"Error fetching data: %s failed with %s", self._resource, ex
)
self.last_exception = ex
self.data = None
self.headers = None
| homeassistant/components/rest/data.py | 317 | core | {
"docstring": "Get the latest data from REST service with provided method.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | 91 | Python | 56 | 599d61a4da096227ce4d5ba1dc0eaabceea56f49 | data.py | 289,315 | 35 | 202 | async_update | https://github.com/home-assistant/core.git | Fix payload in rest (#80544) | 500 | 0 | 88,457 | 13 |
|
1 | 12 | async def test_auth(hass, aioclient_mock):
expiration_time = time.time() + 86400
create_config_entry(expiration_time).add_to_hass(hass)
# Prepare to capture credentials in API request. Empty payloads just mean
# no devices or structures are loaded.
aioclient_mock.get(f"{API_URL}/enterprises/{PROJECT_ID}/structures", json={})
aioclient_mock.get(f"{API_URL}/enterprises/{PROJECT_ID}/devices", json={})
# Prepare to capture credentials for Subscriber
captured_creds = None
| tests/components/nest/test_api.py | 108 | core | {
"docstring": "Exercise authentication library creates valid credentials.",
"language": "en",
"n_whitespaces": 5,
"n_words": 6,
"vocab_size": 6
} | 43 | Python | 35 | c576a68d336bc91fd82c299d9b3e5dfdc1c14960 | test_api.py | 291,721 | 30 | 208 | test_auth | https://github.com/home-assistant/core.git | Upgrade pytest-aiohttp (#82475)
* Upgrade pytest-aiohttp
* Make sure executors, tasks and timers are closed
Some test will trigger warnings on garbage collect, these warnings
spills over into next test.
Some test trigger tasks that raise errors on shutdown, these spill
over into next test.
This is to mimic older pytest-aiohttp and it's behaviour on test
cleanup.
Discussions on similar changes for pytest-aiohttp are here:
https://github.com/pytest-dev/pytest-asyncio/pull/309
* Replace loop with event_loop
* Make sure time is frozen for tests
* Make sure the ConditionType is not async
/home-assistant/homeassistant/helpers/template.py:2082: RuntimeWarning: coroutine 'AsyncMockMixin._execute_mock_call' was never awaited
def wrapper(*args, **kwargs):
Enable tracemalloc to get traceback where the object was allocated.
See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info.
* Increase litejet press tests with a factor 10
The times are simulated anyway, and we can't stop the normal
event from occuring.
* Use async handlers for aiohttp
tests/components/motioneye/test_camera.py::test_get_still_image_from_camera
tests/components/motioneye/test_camera.py::test_get_still_image_from_camera
tests/components/motioneye/test_camera.py::test_get_stream_from_camera
tests/components/motioneye/test_camera.py::test_get_stream_from_camera
tests/components/motioneye/test_camera.py::test_camera_option_stream_url_template
tests/components/motioneye/test_camera.py::test_camera_option_stream_url_template
/Users/joakim/src/hass/home-assistant/venv/lib/python3.9/site-packages/aiohttp/web_urldispatcher.py:189: DeprecationWarning: Bare functions are deprecated, use async ones
warnings.warn(
* Switch to freezegun in modbus tests
The tests allowed clock to tick in between steps
* Make sure skybell object are fully mocked
Old tests would trigger attempts to post to could services:
```
DEBUG:aioskybell:HTTP post https://cloud.myskybell.com/api/v3/login/ Request with headers: {'content-type': 'application/json', 'accept': '*/*', 'x-skybell-app-id': 'd2b542c7-a7e4-4e1e-b77d-2b76911c7c46', 'x-skybell-client-id': '1f36a3c0-6dee-4997-a6db-4e1c67338e57'}
```
* Fix sorting that broke after rebase | 71 | 0 | 90,825 | 9 |
|
7 | 24 | def authenticate(request=None, **credentials):
username = credentials.get('username')
for backend, backend_path in _get_backends(return_tuples=True):
# 预先检查,不浪费认证时间
if not backend.username_can_authenticate(username):
continue
# 原生
backend_signature = inspect.signature(backend.authenticate)
try:
backend_signature.bind(request, **credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
try:
user = backend.authenticate(request, **credentials)
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
break
if user is None:
continue
# 再次检查遇检查中遗漏的用户
if not backend.user_can_authenticate(user):
continue
# Annotate the user object with the path of the backend.
user.backend = backend_path
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__, credentials=_clean_credentials(credentials), request=request)
auth.authenticate = authenticate
| apps/authentication/mixins.py | 220 | jumpserver | {
"docstring": "\n If the given credentials are valid, return a User object.\n 之所以 hack 这个 auticate\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 14,
"vocab_size": 14
} | 112 | Python | 78 | edfca5eb2486c2f006257723ffeda6f56b170170 | mixins.py | 188,426 | 21 | 125 | authenticate | https://github.com/jumpserver/jumpserver.git | Fix rbac (#7699)
* perf: 优化 suggesstion
* perf: 修改 migrations
* feat: 添加OIDC认证逻辑
* perf: 修改 backend
* perf: 优化认证backends
* perf: 优化认证backends
* perf: 优化CAS认证, 用户多域名进行访问时回调到各自域名
Co-authored-by: ibuler <[email protected]> | 323 | 0 | 45,914 | 13 |
|
2 | 11 | def config_test(self) -> None:
try:
util.run_script([self.conf('ctl'), "-c", self.nginx_conf, "-t"])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
| certbot-nginx/certbot_nginx/_internal/configurator.py | 84 | certbot | {
"docstring": "Check the configuration of Nginx for errors.\n\n :raises .errors.MisconfigurationError: If config_test fails\n\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 12,
"vocab_size": 12
} | 15 | Python | 15 | 16aad35d31a887dab157f9d4f5e0fe9218d06064 | configurator.py | 186,582 | 10 | 48 | config_test | https://github.com/certbot/certbot.git | Fully type certbot-nginx module (#9124)
* Work in progress
* Fix type
* Work in progress
* Work in progress
* Work in progress
* Work in progress
* Work in progress
* Oups.
* Fix typing in UnspacedList
* Fix logic
* Finish typing
* List certbot-nginx as fully typed in tox
* Fix lint
* Fix checks
* Organize imports
* Fix typing for Python 3.6
* Fix checks
* Fix lint
* Update certbot-nginx/certbot_nginx/_internal/configurator.py
Co-authored-by: alexzorin <[email protected]>
* Update certbot-nginx/certbot_nginx/_internal/configurator.py
Co-authored-by: alexzorin <[email protected]>
* Fix signature of deploy_cert regarding the installer interface
* Update certbot-nginx/certbot_nginx/_internal/obj.py
Co-authored-by: alexzorin <[email protected]>
* Fix types
* Update certbot-nginx/certbot_nginx/_internal/parser.py
Co-authored-by: alexzorin <[email protected]>
* Precise type
* Precise _coerce possible inputs/outputs
* Fix type
* Update certbot-nginx/certbot_nginx/_internal/http_01.py
Co-authored-by: ohemorange <[email protected]>
* Fix type
* Remove an undesirable implementation.
* Fix type
Co-authored-by: alexzorin <[email protected]>
Co-authored-by: ohemorange <[email protected]> | 58 | 0 | 45,498 | 13 |
|
1 | 6 | def print_stack(self, *, limit=None, file=None):
return base_tasks._task_print_stack(self, limit, file)
| python3.10.4/Lib/asyncio/tasks.py | 41 | XX-Net | {
"docstring": "Print the stack or traceback for this task's coroutine.\n\n This produces output similar to that of the traceback module,\n for the frames retrieved by get_stack(). The limit argument\n is passed to get_stack(). The file argument is an I/O stream\n to which the output is written; by default output is written\n to sys.stderr.\n ",
"language": "en",
"n_whitespaces": 96,
"n_words": 52,
"vocab_size": 35
} | 9 | Python | 9 | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | tasks.py | 220,797 | 2 | 27 | print_stack | https://github.com/XX-net/XX-Net.git | add python 3.10.4 for windows | 23 | 0 | 56,120 | 7 |
|
2 | 11 | def _rename_tmp_file(self) -> None:
os.rename(self._video_tmp_file, self._output_filename)
logger.debug("Removing temp file")
if os.path.isfile(self._video_tmp_file):
os.remove(self._video_tmp_file)
| plugins/convert/writer/ffmpeg.py | 78 | faceswap | {
"docstring": " Rename the temporary video file if not muxing audio. ",
"language": "en",
"n_whitespaces": 10,
"n_words": 9,
"vocab_size": 9
} | 12 | Python | 12 | 60291d49c4da1cd260fbc0b04aa6a312eedfefbb | ffmpeg.py | 100,615 | 6 | 46 | _rename_tmp_file | https://github.com/deepfakes/faceswap.git | ffmpeg writer: Create new filename if output pre-exists | 51 | 0 | 20,077 | 10 |
|
4 | 14 | def get_tightbbox(self, renderer=None):
bbox = self.get_window_extent(renderer)
if self.get_clip_on():
clip_box = self.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = self.get_clip_path()
if clip_path is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox, clip_path.get_extents())
return bbox
| lib/matplotlib/artist.py | 137 | matplotlib | {
"docstring": "\n Like `.Artist.get_window_extent`, but includes any clipping.\n\n Parameters\n ----------\n renderer : `.RendererBase` subclass\n renderer that will be used to draw the figures (i.e.\n ``fig.canvas.get_renderer()``)\n\n Returns\n -------\n `.Bbox`\n The enclosing bounding box (in figure pixel coordinates).\n ",
"language": "en",
"n_whitespaces": 124,
"n_words": 34,
"vocab_size": 33
} | 37 | Python | 20 | 24b16804731d3a724e4ec0984da140b1a6b05c66 | artist.py | 108,560 | 11 | 84 | get_tightbbox | https://github.com/matplotlib/matplotlib.git | MNT: make renderer always optional | 154 | 0 | 23,258 | 14 |
|
1 | 17 | async def logout():
confirm_logged_in()
profiles = prefect.settings.load_profiles()
profiles.update_active_profile()
update_profile(PREFECT_API_URL=None, PREFECT_API_KEY=None)
profile = prefect.context.get_settings_context()
exit_with_success(f"Successfully logged out in profile {profile.name!r}")
@workspace_app.command() | src/prefect/cli/cloud.py | 101 | @workspace_app.command() | prefect | {
"docstring": "\n Log out of Prefect Cloud.\n Removes PREFECT_API_URL and PREFECT_API_KEY from profile.\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 11,
"vocab_size": 11
} | 20 | Python | 18 | b0af6cf8b1eaea33ee6809efc770fc041908b7ca | cloud.py | 55,096 | 7 | 46 | logout | https://github.com/PrefectHQ/prefect.git | Refactor settings context | 40 | 1 | 11,209 | 10 |
1 | 18 | def test_post_save_change_redirect(self):
Person.objects.create(name="John Doe")
self.assertEqual(Person.objects.count(), 1)
person = Person.objects.all()[0]
post_url = reverse(
"admin_custom_urls:admin_custom_urls_person_change", args=[person.pk]
)
response = self.client.post(post_url, {"name": "Jack Doe"})
self.assertRedirects(
response,
reverse(
"admin_custom_urls:admin_custom_urls_person_delete", args=[person.pk]
),
)
| tests/admin_custom_urls/tests.py | 154 | django | {
"docstring": "\n ModelAdmin.response_post_save_change() controls the redirection after\n the 'Save' button has been pressed when editing an existing object.\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 16,
"vocab_size": 15
} | 28 | Python | 23 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | tests.py | 207,062 | 14 | 92 | test_post_save_change_redirect | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 150 | 0 | 51,854 | 12 |
|
1 | 2 | def prefixsrc(self):
return self["prefixsrc"]
| packages/python/plotly/plotly/graph_objs/table/_cells.py | 22 | plotly.py | {
"docstring": "\n Sets the source reference on Chart Studio Cloud for `prefix`.\n\n The 'prefixsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ",
"language": "en",
"n_whitespaces": 77,
"n_words": 27,
"vocab_size": 25
} | 4 | Python | 4 | 43e3a4011080911901176aab919c0ecf5046ddd3 | _cells.py | 235,481 | 2 | 11 | prefixsrc | https://github.com/plotly/plotly.py.git | switch to black .22 | 18 | 0 | 66,925 | 7 |
|
6 | 16 | def get_conditions(filters):
filters = frappe._dict(filters) if filters else frappe._dict({})
conditions = frappe._dict({})
conditions.company = filters.company or frappe.defaults.get_user_default("company")
conditions.end_date = filters.period_end_date or frappe.utils.today()
conditions.start_date = filters.period_start_date or frappe.utils.add_months(
conditions.end_date, -1
)
conditions.sales_order = filters.sales_order or []
return conditions
| erpnext/selling/report/payment_terms_status_for_sales_order/payment_terms_status_for_sales_order.py | 158 | erpnext | {
"docstring": "\n\tConvert filter options to conditions used in query\n\t",
"language": "en",
"n_whitespaces": 7,
"n_words": 8,
"vocab_size": 8
} | 37 | Python | 26 | 1bac7930834d6f688950e836c45305a62e7ecb3f | payment_terms_status_for_sales_order.py | 63,938 | 10 | 97 | get_conditions | https://github.com/frappe/erpnext.git | feat: Payment Terms Status report
- calculate status at runtime for payment terms based on invoices
- invoices are used in FIFO method | 27 | 0 | 13,537 | 10 |
|
22 | 19 | def call(self, src_files, dest_files):
# :var src_done: True if there are no more files from the source left.
src_done = False
# :var dest_done: True if there are no more files form the dest left.
dest_done = False
# :var src_take: Take the next source file from the generated files if
# true
src_take = True
# :var dest_take: Take the next dest file from the generated files if
# true
dest_take = True
while True:
try:
if (not src_done) and src_take:
src_file = advance_iterator(src_files)
except StopIteration:
src_file = None
src_done = True
try:
if (not dest_done) and dest_take:
dest_file = advance_iterator(dest_files)
except StopIteration:
dest_file = None
dest_done = True
if (not src_done) and (not dest_done):
src_take = True
dest_take = True
compare_keys = self.compare_comp_key(src_file, dest_file)
if compare_keys == 'equal':
should_sync = self._sync_strategy.determine_should_sync(
src_file, dest_file
)
if should_sync:
yield src_file
elif compare_keys == 'less_than':
src_take = True
dest_take = False
should_sync = self._not_at_dest_sync_strategy.determine_should_sync(src_file, None)
if should_sync:
yield src_file
elif compare_keys == 'greater_than':
src_take = False
dest_take = True
should_sync = self._not_at_src_sync_strategy.determine_should_sync(None, dest_file)
if should_sync:
yield dest_file
elif (not src_done) and dest_done:
src_take = True
should_sync = self._not_at_dest_sync_strategy.determine_should_sync(src_file, None)
if should_sync:
yield src_file
elif src_done and (not dest_done):
dest_take = True
should_sync = self._not_at_src_sync_strategy.determine_should_sync(None, dest_file)
if should_sync:
yield dest_file
else:
break
| awscli/customizations/s3/comparator.py | 402 | aws-cli | {
"docstring": "\n This function preforms the actual comparisons. The parameters it takes\n are the generated files for both the source and the destination. The\n key concept in this function is that no matter the type of where the\n files are coming from, they are listed in the same order, least to\n greatest in collation order. This allows for easy comparisons to\n determine if file needs to be added or deleted. Comparison keys are\n used to determine if two files are the same and each file has a\n unique comparison key. If they are the same compare the size and\n last modified times to see if a file needs to be updated. Ultimately,\n it will yield a sequence of file info objectsthat will be sent to\n the ``S3Handler``.\n\n :param src_files: The generated FileInfo objects from the source.\n :param dest_files: The generated FileInfo objects from the dest.\n\n :returns: Yields the FilInfo objects of the files that need to be\n operated on\n\n Algorithm:\n Try to take next from both files. If it is empty signal\n corresponding done flag. If both generated lists are not done\n compare compare_keys. If equal, compare size and time to see if\n it needs to be updated. If source compare_key is less than dest\n compare_key, the file needs to be added to the destination. Take\n the next source file but not not destination file. If the source\n compare_key is greater than dest compare_key, that destination file\n needs to be deleted from the destination. Take the next dest file\n but not the source file. If the source list is empty delete the\n rest of the files in the dest list from the destination. If the\n dest list is empty add the rest of the file in source list to\n the destination.\n ",
"language": "en",
"n_whitespaces": 560,
"n_words": 289,
"vocab_size": 121
} | 210 | Python | 68 | 8a16d7d8ce5e3f97fb100af7a960224f7f80137d | comparator.py | 189,212 | 52 | 239 | call | https://github.com/aws/aws-cli.git | Delete extra whitespace
A correction that does not affect the operation. | 1,052 | 0 | 46,019 | 16 |
|
4 | 13 | def _bool_arith_fallback(op_str, a, b):
if _has_bool_dtype(a) and _has_bool_dtype(b):
if op_str in _BOOL_OP_UNSUPPORTED:
warnings.warn(
f"evaluating in Python space because the {repr(op_str)} "
"operator is not supported by numexpr for the bool dtype, "
f"use {repr(_BOOL_OP_UNSUPPORTED[op_str])} instead.",
stacklevel=find_stack_level(inspect.currentframe()),
)
return True
return False
| pandas/core/computation/expressions.py | 108 | pandas | {
"docstring": "\n Check if we should fallback to the python `_evaluate_standard` in case\n of an unsupported operation by numexpr, which is the case for some\n boolean ops.\n ",
"language": "en",
"n_whitespaces": 38,
"n_words": 25,
"vocab_size": 23
} | 41 | Python | 36 | e94faa23e24c0abf9db74d79cfebe06676577867 | expressions.py | 168,434 | 11 | 52 | _bool_arith_fallback | https://github.com/pandas-dev/pandas.git | WARN,TST check stacklevel for all warnings (#47998)
* use find_stack_level everywhere
* fixup
* pyx fixups
* fixup test_optional_dependency
* fixup api
* set check_stacklevel=False for some tests
* use lru_cache for currentframe
* fixup import in __init__
* add missing imports to pyx files
* add missing import
* fixup import in conversion
* revert some __init__ changes
* start n=1
* temporarily dont check stacklevel in _check_plot_works
* catch some more warnings
* dont check stacklevel in check_plot_works
* fixup
* ignore stacklevel in check_plot_works | 150 | 0 | 40,296 | 17 |
|
2 | 5 | def get_file_breaks(self, filename):
filename = self.canonic(filename)
if filename in self.breaks:
return self.breaks[filename]
else:
return []
| python3.10.4/Lib/bdb.py | 58 | XX-Net | {
"docstring": "Return all lines with breakpoints for filename.\n\n If no breakpoints are set, return an empty list.\n ",
"language": "en",
"n_whitespaces": 30,
"n_words": 16,
"vocab_size": 15
} | 15 | Python | 13 | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | bdb.py | 221,103 | 6 | 35 | get_file_breaks | https://github.com/XX-net/XX-Net.git | add python 3.10.4 for windows | 65 | 0 | 56,206 | 9 |
|
5 | 11 | def radius(G, e=None, usebounds=False, weight=None):
if usebounds is True and e is None and not G.is_directed():
return _extrema_bounding(G, compute="radius", weight=weight)
if e is None:
e = eccentricity(G, weight=weight)
return min(e.values())
| networkx/algorithms/distance_measures.py | 112 | networkx | {
"docstring": "Returns the radius of the graph G.\n\n The radius is the minimum eccentricity.\n\n Parameters\n ----------\n G : NetworkX graph\n A graph\n\n e : eccentricity dictionary, optional\n A precomputed dictionary of eccentricities.\n\n weight : string, function, or None\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n If this is None, every edge has weight/distance/cost 1.\n\n Weights stored as floating point values can lead to small round-off\n errors in distances. Use integer weights to avoid this.\n\n Weights should be positive, since they are distances.\n\n Returns\n -------\n r : integer\n Radius of graph\n\n Examples\n --------\n >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])\n >>> nx.radius(G)\n 2\n\n ",
"language": "en",
"n_whitespaces": 357,
"n_words": 197,
"vocab_size": 120
} | 30 | Python | 22 | 28f78cfa9a386620ee1179582fda1db5ffc59f84 | distance_measures.py | 177,078 | 6 | 71 | radius | https://github.com/networkx/networkx.git | Add weight distance metrics (#5305)
Adds the weight keyword argument to allow users to compute weighted distance metrics
e.g. diameter, eccentricity, periphery, etc. The kwarg works in the same fashion as the
weight param for shortest paths - i.e. if a string, look up with edge attr by key, if callable,
compute the weight via the function. Default is None, meaning return unweighted result
which is the current behavior.
Co-authored-by: Dan Schult <[email protected]>
Co-authored-by: Ross Barnowski <[email protected]> | 56 | 0 | 42,265 | 11 |
|
8 | 24 | def write(self, args, path): # type: (CommonConfig, str) -> None
# NOTE: Switching the inventory generation to write JSON would be nice, but is currently not possible due to the use of hard-coded inventory filenames.
# The name `inventory` works for the POSIX integration tests, but `inventory.winrm` and `inventory.networking` will only parse in INI format.
# If tests are updated to use the `INVENTORY_PATH` environment variable, then this could be changed.
# Also, some tests detect the test type by inspecting the suffix on the inventory filename, which would break if it were changed.
inventory_text = ''
for group, hosts in self.host_groups.items():
inventory_text += f'[{group}]\n'
for host, variables in hosts.items():
kvp = ' '.join(f'{key}="{value}"' for key, value in variables.items())
inventory_text += f'{host} {kvp}\n'
inventory_text += '\n'
for group, children in (self.extra_groups or {}).items():
inventory_text += f'[{group}]\n'
for child in children:
inventory_text += f'{child}\n'
inventory_text += '\n'
inventory_text = inventory_text.strip()
if not args.explain:
write_text_file(path, inventory_text + '\n')
display.info(f'>>> Inventory\n{inventory_text}', verbosity=3)
| test/lib/ansible_test/_internal/host_profiles.py | 268 | ansible | {
"docstring": "Write the given inventory to the specified path on disk.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 9
} | 159 | Python | 109 | fe349a1ccd658d86cfcf10eecdce9d48ece6176c | host_profiles.py | 267,273 | 17 | 133 | write | https://github.com/ansible/ansible.git | ansible-test - Enhance the shell command. (#77734)
* ansible-test - Add shell --export option.
* ansible-test - Support cmd args for shell command.
Also allow shell to be used without a valid layout if no delegation is required.
* ansible-test - Improve stderr/stdout consistency.
By default all output goes to stdout only, with the exception of a fatal error.
When using any of the following, all output defaults to stderr instead:
* sanity with the `--lint` option -- sanity messages to stdout
* coverage analyze -- output to stdout if the output file is `/dev/stdout`
* shell -- shell output to stdout
This fixes issues two main issues:
* Unpredictable output order when using both info and error/warning messages.
* Mixing of lint/command/shell output with bootstrapping messages on stdout.
* ansible-test - Add changelog fragment. | 377 | 0 | 78,828 | 15 |
|
1 | 4 | def ambient_dimension(self):
return len(self.args[0])
| sympy/geometry/curve.py | 28 | sympy | {
"docstring": "The dimension of the curve.\n\n Returns\n =======\n\n int :\n the dimension of curve.\n\n Examples\n ========\n\n >>> from sympy.abc import t\n >>> from sympy import Curve\n >>> C = Curve((t, t**2), (t, 0, 2))\n >>> C.ambient_dimension\n 2\n\n ",
"language": "en",
"n_whitespaces": 124,
"n_words": 36,
"vocab_size": 27
} | 4 | Python | 4 | 498015021131af4dbb07eb110e5badaba8250c7b | curve.py | 196,266 | 2 | 16 | ambient_dimension | https://github.com/sympy/sympy.git | Updated import locations | 18 | 0 | 47,766 | 9 |
|
1 | 6 | def from_pandas(cls, *args, **kwargs):
return cls(pa.Table.from_pandas(*args, **kwargs))
| src/datasets/table.py | 46 | datasets | {
"docstring": "\n Convert pandas.DataFrame to an Arrow Table.\n\n The column types in the resulting Arrow Table are inferred from the\n dtypes of the pandas.Series in the DataFrame. In the case of non-object\n Series, the NumPy dtype is translated to its Arrow equivalent. In the\n case of `object`, we need to guess the datatype by looking at the\n Python objects in this Series.\n\n Be aware that Series of the `object` dtype don't carry enough\n information to always lead to a meaningful Arrow type. In the case that\n we cannot infer a type, e.g. because the DataFrame is of length 0 or\n the Series only contains None/nan objects, the type is set to\n null. This behavior can be avoided by constructing an explicit schema\n and passing it to this function.\n\n Args:\n df (:obj:`pandas.DataFrame`):\n schema (:obj:`pyarrow.Schema`, optional):\n The expected schema of the Arrow Table. This can be used to\n indicate the type of columns if we cannot infer it automatically.\n If passed, the output will have exactly this schema. Columns\n specified in the schema that are not found in the DataFrame columns\n or its index will raise an error. Additional columns or index\n levels in the DataFrame which are not specified in the schema will\n be ignored.\n preserve_index (:obj:`bool`, optional):\n Whether to store the index as an additional column in the resulting\n ``Table``. The default of None will store the index as a column,\n except for RangeIndex which is stored as metadata only. Use\n ``preserve_index=True`` to force it to be stored as a column.\n nthreads (:obj:`int`, defaults to :obj:`None` (may use up to system CPU count threads))\n If greater than 1, convert columns to Arrow in parallel using\n indicated number of threads\n columns (:obj:`List[str]`, optional):\n List of column to be converted. If None, use all columns.\n safe (:obj:`bool`, defaults to :obj:`True`):\n Check for overflows or other unsafe conversions\n\n Returns:\n :class:`datasets.table.Table`:\n\n Examples:\n ```python\n >>> import pandas as pd\n >>> import pyarrow as pa\n >>> df = pd.DataFrame({\n ... 'int': [1, 2],\n ... 'str': ['a', 'b']\n ... })\n >>> pa.Table.from_pandas(df)\n <pyarrow.lib.Table object at 0x7f05d1fb1b40>\n ```\n ",
"language": "en",
"n_whitespaces": 841,
"n_words": 338,
"vocab_size": 191
} | 7 | Python | 7 | e35be138148333078284b942ccc9ed7b1d826f97 | table.py | 104,411 | 2 | 28 | from_pandas | https://github.com/huggingface/datasets.git | Update docs to new frontend/UI (#3690)
* WIP: update docs to new UI
* make style
* Rm unused
* inject_arrow_table_documentation __annotations__
* hasattr(arrow_table_method, "__annotations__")
* Update task_template.rst
* Codeblock PT-TF-SPLIT
* Convert loading scripts
* Convert docs to mdx
* Fix mdx
* Add <Tip>
* Convert mdx tables
* Fix codeblock
* Rm unneded hashlinks
* Update index.mdx
* Redo dev change
* Rm circle ci `build_doc` & `deploy_doc`
* Rm unneeded files
* Update docs reamde
* Standardize to `Example::`
* mdx logging levels doc
* Table properties inject_arrow_table_documentation
* ``` to ```py mdx
* Add Tips mdx
* important,None -> <Tip warning={true}>
* More misc
* Center imgs
* Update instllation page
* `setup.py` docs section
* Rm imgs since they are in hf.co
* Update docs/source/access.mdx
Co-authored-by: Steven Liu <[email protected]>
* Update index mdx
* Update docs/source/access.mdx
Co-authored-by: Steven Liu <[email protected]>
* just `Dataset` obj
* Addedversion just italics
* Update ReadInstruction doc example syntax
* Change docstring for `prepare_for_task`
* Chore
* Remove `code` syntax from headings
* Rm `code` syntax from headings
* Hashlink backward compatability
* S3FileSystem doc
* S3FileSystem doc updates
* index.mdx updates
* Add darkmode gifs
* Index logo img css classes
* Index mdx dataset logo img size
* Docs for DownloadMode class
* Doc DownloadMode table
* format docstrings
* style
* Add doc builder scripts (#3790)
* add doc builder scripts
* fix docker image
* Docs new UI actions no self hosted (#3793)
* No self hosted
* replace doc injection by actual docstrings
* Docstring formatted
Co-authored-by: Quentin Lhoest <[email protected]>
Co-authored-by: Mishig Davaadorj <[email protected]>
Co-authored-by: Lysandre Debut <[email protected]>
Co-authored-by: Mishig Davaadorj <[email protected]>
* Rm notebooks from docs actions since they dont exi
* Update tsting branch
* More docstring
* Chore
* bump up node version
* bump up node
* ``` -> ```py for audio_process.mdx
* Update .github/workflows/build_documentation.yml
Co-authored-by: Quentin Lhoest <[email protected]>
* Uodate dev doc build
* remove run on PR
* fix action
* Fix gh doc workflow
* forgot this change when merging master
* Update build doc
Co-authored-by: Steven Liu <[email protected]>
Co-authored-by: Quentin Lhoest <[email protected]>
Co-authored-by: Quentin Lhoest <[email protected]>
Co-authored-by: Lysandre Debut <[email protected]> | 21 | 0 | 21,847 | 10 |
|
1 | 6 | def divides(p, n):
sympy_deprecation_warning(
,
deprecated_since_version="1.11",
active_deprecations_target='deprecated-carmichael-static-methods',
)
return n % p == 0
| sympy/functions/combinatorial/numbers.py | 44 | sympy | {
"docstring": "\n divides can be replaced by directly testing n % p == 0.\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 12
} | 14 | Python | 14 | b27e2b44626d138bd6ea235fbf114644baa5b144 | numbers.py | 197,219 | 9 | 26 | divides | https://github.com/sympy/sympy.git | Deprecate redundant static methods | 55 | 0 | 48,392 | 9 |
|
1 | 8 | def is_decompressed(self) -> bool:
return type(self._pb_body) in [
jina_pb2.DataRequestProto,
jina_pb2.DataRequestProtoWoData,
]
| jina/types/request/data.py | 42 | jina | {
"docstring": "\n Checks if the underlying proto object was already deserialized into a :class:`jina.proto.jina_pb2.DataRequestProto` or\n :class:`jina.proto.jina_pb2.DataRequestProtoWoData`. This does not necessarily mean that the data (docs) inside the request is also decompressed.\n :return: True if the proto was deserialized before\n ",
"language": "en",
"n_whitespaces": 69,
"n_words": 37,
"vocab_size": 30
} | 11 | Python | 11 | c3849c6fee4a65a77a82b2cfda9670d727ff0f53 | data.py | 12,701 | 10 | 26 | is_decompressed | https://github.com/jina-ai/jina.git | feat: allow to access parameters of data request wo loading data (#4991) | 54 | 0 | 2,387 | 9 |
|
1 | 4 | def model_file_path_key(self):
return f"{self.tag_to_agent()[self.value]}_response_model_path"
| projects/bb3/agents/module.py | 36 | ParlAI | {
"docstring": "\n Opt key for model file path for this agent.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 8
} | 4 | Python | 4 | b1acb681207559da56a787ba96e16f0e23697d92 | module.py | 195,191 | 2 | 9 | model_file_path_key | https://github.com/facebookresearch/ParlAI.git | Patch 8322 (#4709)
* add dafetymix teacher
* safety_mix teacher
* safety_mix teacher pos and neg teachers
* add tests for teacher
* add license info
* improvement
* add task list
* add task list and lint
* add init.py
* adding some patch to director
* seeker changes
* th
* 3
* jing
* changes
* z and r
* remove .opts
* fix docs
* add contrractions
* lint
Co-authored-by: Dexter Ju <[email protected]>
Co-authored-by: Jing Xu <[email protected]> | 18 | 0 | 47,220 | 10 |
|
1 | 9 | def swish(x):
return tf.nn.silu(x)
@keras_export("keras.activations.relu")
@tf.__internal__.dispatch.add_dispatch_support | keras/activations.py | 50 | @keras_export("keras.activations.relu")
@tf.__internal__.dispatch.add_dispatch_support | keras | {
"docstring": "Swish activation function, `swish(x) = x * sigmoid(x)`.\n\n Swish activation function which returns `x*sigmoid(x)`.\n It is a smooth, non-monotonic function that consistently matches\n or outperforms ReLU on deep networks, it is unbounded above and\n bounded below.\n\n\n Example Usage:\n\n >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)\n >>> b = tf.keras.activations.swish(a)\n >>> b.numpy()\n array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01,\n 2.0000000e+01], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n The swish activation applied to `x` (see reference paper for details).\n\n Reference:\n - [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)\n ",
"language": "en",
"n_whitespaces": 156,
"n_words": 83,
"vocab_size": 72
} | 6 | Python | 6 | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | activations.py | 269,309 | 2 | 15 | swish | https://github.com/keras-team/keras.git | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | 10 | 1 | 80,023 | 8 |
2 | 15 | def item_details(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
return frappe.db.sql(
% ("%s", searchfield, "%s", get_match_cond(doctype), "%s", "%s"),
((filters or {}).get("delivery_note"), "%%%s%%" % txt, page_len, start),
)
| erpnext/stock/doctype/packing_slip/packing_slip.py | 110 | erpnext | {
"docstring": "select name, item_name, description from `tabItem`\n\t\t\t\twhere name in ( select item_code FROM `tabDelivery Note Item`\n\t \t\t\t\t\t\twhere parent= %s)\n\t \t\t\tand %s like \"%s\" %s\n\t \t\t\tlimit %s offset %s ",
"language": "en",
"n_whitespaces": 28,
"n_words": 28,
"vocab_size": 23
} | 29 | Python | 24 | 00ef499739959630cd7cf97419fbb6ca59be05f2 | packing_slip.py | 68,812 | 11 | 72 | item_details | https://github.com/frappe/erpnext.git | refactor: use db independent offset syntax (#31345)
* chore: use db independent offset syntax
* fix: typo
* style: reformat code to black spec
Co-authored-by: Ankush Menat <[email protected]> | 23 | 0 | 14,893 | 13 |
|
3 | 14 | def export_probs(self) -> dict[str, Any]:
result = {}
for module in self.nas_modules:
try:
result.update(module.export_probs(memo=result))
except NotImplementedError:
warnings.warn(
'Some super-modules you have used did not implement export_probs. You might find some logs are missing.',
UserWarning
)
return result
| nni/nas/oneshot/pytorch/base_lightning.py | 86 | nni | {
"docstring": "\n Export the probability of every choice in the search space got chosen.\n\n .. note:: If such method of some modules is not implemented, they will be simply ignored.\n\n Returns\n -------\n dict\n In most cases, keys are names of ``nas_modules`` suffixed with ``/`` and choice name.\n Values are the probability / logits depending on the implementation.\n ",
"language": "en",
"n_whitespaces": 120,
"n_words": 55,
"vocab_size": 47
} | 37 | Python | 36 | f77db747d07d5c90a3a9f70bb17f71d4573f329e | base_lightning.py | 113,318 | 22 | 52 | export_probs | https://github.com/microsoft/nni.git | Enhancement of one-shot NAS (v2.9) (#5049) | 170 | 0 | 24,885 | 14 |
|
4 | 7 | def clear(self):
self._block_partition_refs = [None for _ in self._block_partition_refs]
self._block_partition_meta_refs = [
None for _ in self._block_partition_meta_refs
]
self._cached_metadata = [None for _ in self._cached_metadata]
self._stats_actor = None
| python/ray/data/_internal/lazy_block_list.py | 78 | ray | {
"docstring": "Clears all object references (block partitions and base block partitions)\n from this lazy block list.\n ",
"language": "en",
"n_whitespaces": 29,
"n_words": 15,
"vocab_size": 14
} | 28 | Python | 16 | b1cad0a1121c06cae55aaed32f2b901b2b725521 | lazy_block_list.py | 127,026 | 7 | 50 | clear | https://github.com/ray-project/ray.git | [Datasets] Use detached lifetime for stats actor (#25271)
The actor handle held at Ray client will become dangling if the Ray cluster is shutdown, and in such case if the user tries to get the actor again it will result in crash. This happened in a real user and blocked them from making progress.
This change makes the stats actor detached, and instead of keeping a handle, we access it via its name. This way we can make sure re-create this actor if the cluster gets restarted.
Co-authored-by: Ubuntu <[email protected]> | 81 | 0 | 28,336 | 9 |
|
4 | 13 | def _read_all_pages(self, endpoint):
internal_data = []
while True:
resp = self._session.get(endpoint)
if resp.status_code == 200:
internal_data += resp.json()
if "next" in resp.links:
endpoint = resp.links["next"]["url"]
else:
logger.debug("Exiting pagination loop")
break
else:
logger.warning(f"Request to {endpoint} return HTTP {resp.status_code}")
break
return internal_data
| .github/scripts/github.py | 149 | paperless-ngx | {
"docstring": "\n Helper function to read all pages of an endpoint, utilizing the\n next.url until exhausted. Assumes the endpoint returns a list\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 20,
"vocab_size": 19
} | 40 | Python | 32 | 0b8eff9643c12aa7c766538d8a3e4194934cf44c | github.py | 319,987 | 15 | 78 | _read_all_pages | https://github.com/paperless-ngx/paperless-ngx.git | Extends the cleanup of image versions to the library images and all the registry cache images as well | 233 | 0 | 117,041 | 15 |
|
1 | 3 | def lazy(func, *resultclasses):
| django/utils/functional.py | 17 | django | {
"docstring": "\n Turn any callable into a lazy evaluated callable. result classes or types\n is required -- at least one is needed so that the automatic forcing of\n the lazy evaluation code is triggered. Results are not memoized; the\n function is evaluated on every access.\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 43,
"vocab_size": 36
} | 3 | Python | 3 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | functional.py | 206,667 | 31 | 68 | lazy | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 6 | 0 | 51,617 | 6 |
|
3 | 22 | def chromatic_polynomial(G):
r
import sympy
x = sympy.Symbol("x")
stack = deque()
stack.append(nx.MultiGraph(G, contraction_idx=0))
polynomial = 0
while stack:
G = stack.pop()
edges = list(G.edges)
if not edges:
polynomial += (-1) ** G.graph["contraction_idx"] * x ** len(G)
else:
e = edges[0]
C = nx.contracted_edge(G, e, self_loops=True)
C.graph["contraction_idx"] = G.graph["contraction_idx"] + 1
C.remove_edge(e[0], e[0])
G.remove_edge(*e)
stack.append(G)
stack.append(C)
return polynomial
| networkx/algorithms/polynomials.py | 253 | networkx | {
"docstring": "Returns the chromatic polynomial of `G`\n\n This function computes the chromatic polynomial via an iterative version of\n the deletion-contraction algorithm.\n\n The chromatic polynomial `X_G(x)` is a fundamental graph polynomial\n invariant in one variable. Evaluating `X_G(k)` for an natural number `k`\n enumerates the proper k-colorings of `G`.\n\n There are several equivalent definitions; here are three:\n\n Def 1 (explicit formula):\n For `G` an undirected graph, `c(G)` the number of connected components of\n `G`, `E` the edge set of `G`, and `G(S)` the spanning subgraph of `G` with\n edge set `S` [1]_:\n\n .. math::\n\n X_G(x) = \\sum_{S \\subseteq E} (-1)^{|S|} x^{c(G(S))}\n\n\n Def 2 (interpolating polynomial):\n For `G` an undirected graph, `n(G)` the number of vertices of `G`, `k_0 = 0`,\n and `k_i` the number of distinct ways to color the vertices of `G` with `i`\n unique colors (for `i` a natural number at most `n(G)`), `X_G(x)` is the\n unique Lagrange interpolating polynomial of degree `n(G)` through the points\n `(0, k_0), (1, k_1), \\dots, (n(G), k_{n(G)})` [2]_.\n\n\n Def 3 (chromatic recurrence):\n For `G` an undirected graph, `G-e` the graph obtained from `G` by deleting\n edge `e`, `G/e` the graph obtained from `G` by contracting edge `e`, `n(G)`\n the number of vertices of `G`, and `e(G)` the number of edges of `G` [3]_:\n\n .. math::\n X_G(x) = \\begin{cases}\n \t x^{n(G)}, & \\text{if $e(G)=0$} \\\\\n X_{G-e}(x) - X_{G/e}(x), & \\text{otherwise, for an arbitrary edge $e$}\n \\end{cases}\n\n This formulation is also known as the Fundamental Reduction Theorem [4]_.\n\n\n Parameters\n ----------\n G : NetworkX graph\n\n Returns\n -------\n instance of `sympy.core.add.Add`\n A Sympy expression representing the chromatic polynomial for `G`.\n\n Examples\n --------\n >>> C = nx.cycle_graph(5)\n >>> nx.chromatic_polynomial(C)\n x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x\n\n >>> G = nx.complete_graph(4)\n >>> nx.chromatic_polynomial(G)\n x**4 - 6*x**3 + 11*x**2 - 6*x\n\n Notes\n -----\n Interpretation of the coefficients is discussed in [5]_. Several special\n cases are listed in [2]_.\n\n The chromatic polynomial is a specialization of the Tutte polynomial; in\n particular, `X_G(x) = `T_G(x, 0)` [6]_.\n\n The chromatic polynomial may take negative arguments, though evaluations\n may not have chromatic interpretations. For instance, `X_G(-1)` enumerates\n the acyclic orientations of `G` [7]_.\n\n References\n ----------\n .. [1] D. B. West,\n \"Introduction to Graph Theory,\" p. 222\n .. [2] E. W. Weisstein\n \"Chromatic Polynomial\"\n MathWorld--A Wolfram Web Resource\n https://mathworld.wolfram.com/ChromaticPolynomial.html\n .. [3] D. B. West,\n \"Introduction to Graph Theory,\" p. 221\n .. [4] J. Zhang, J. Goodall,\n \"An Introduction to Chromatic Polynomials\"\n https://math.mit.edu/~apost/courses/18.204_2018/Julie_Zhang_paper.pdf\n .. [5] R. C. Read,\n \"An Introduction to Chromatic Polynomials\"\n Journal of Combinatorial Theory, 1968\n https://math.berkeley.edu/~mrklug/ReadChromatic.pdf\n .. [6] W. T. Tutte,\n \"Graph-polynomials\"\n Advances in Applied Mathematics, 2004\n https://www.sciencedirect.com/science/article/pii/S0196885803000411\n .. [7] R. P. Stanley,\n \"Acyclic orientations of graphs\"\n Discrete Mathematics, 2006\n https://math.mit.edu/~rstan/pubs/pubfiles/18.pdf\n ",
"language": "en",
"n_whitespaces": 745,
"n_words": 437,
"vocab_size": 259
} | 57 | Python | 45 | a3a383f7a90e478df40bc9d746c925f2c94a5a2b | polynomials.py | 176,832 | 120 | 154 | chromatic_polynomial | https://github.com/networkx/networkx.git | Chromatic polynomial (#5675)
Adds chromatic_polynomial function to the graph polynomials package. | 196 | 0 | 42,128 | 14 |
|
4 | 25 | def reidemeister_presentation(fp_grp, H, C=None, homomorphism=False):
if not C:
C = coset_enumeration_r(fp_grp, H)
C.compress(); C.standardize()
define_schreier_generators(C, homomorphism=homomorphism)
reidemeister_relators(C)
gens, rels = C._schreier_generators, C._reidemeister_relators
gens, rels = simplify_presentation(gens, rels, change_gens=True)
C.schreier_generators = tuple(gens)
C.reidemeister_relators = tuple(rels)
if homomorphism:
_gens = []
for gen in gens:
_gens.append(C._schreier_gen_elem[str(gen)])
return C.schreier_generators, C.reidemeister_relators, _gens
return C.schreier_generators, C.reidemeister_relators
FpGroupElement = FreeGroupElement
| sympy/combinatorics/fp_groups.py | 217 | sympy | {
"docstring": "\n Parameters\n ==========\n\n fp_group: A finitely presented group, an instance of FpGroup\n H: A subgroup whose presentation is to be found, given as a list\n of words in generators of `fp_grp`\n homomorphism: When set to True, return a homomorphism from the subgroup\n to the parent group\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> from sympy.combinatorics.fp_groups import FpGroup, reidemeister_presentation\n >>> F, x, y = free_group(\"x, y\")\n\n Example 5.6 Pg. 177 from [1]\n >>> f = FpGroup(F, [x**3, y**5, (x*y)**2])\n >>> H = [x*y, x**-1*y**-1*x*y*x]\n >>> reidemeister_presentation(f, H)\n ((y_1, y_2), (y_1**2, y_2**3, y_2*y_1*y_2*y_1*y_2*y_1))\n\n Example 5.8 Pg. 183 from [1]\n >>> f = FpGroup(F, [x**3, y**3, (x*y)**3])\n >>> H = [x*y, x*y**-1]\n >>> reidemeister_presentation(f, H)\n ((x_0, y_0), (x_0**3, y_0**3, x_0*y_0*x_0*y_0*x_0*y_0))\n\n Exercises Q2. Pg 187 from [1]\n >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])\n >>> H = [x]\n >>> reidemeister_presentation(f, H)\n ((x_0,), (x_0**4,))\n\n Example 5.9 Pg. 183 from [1]\n >>> f = FpGroup(F, [x**3*y**-3, (x*y)**3, (x*y**-1)**2])\n >>> H = [x]\n >>> reidemeister_presentation(f, H)\n ((x_0,), (x_0**6,))\n\n ",
"language": "en",
"n_whitespaces": 276,
"n_words": 160,
"vocab_size": 96
} | 54 | Python | 41 | 498015021131af4dbb07eb110e5badaba8250c7b | fp_groups.py | 196,052 | 16 | 136 | reidemeister_presentation | https://github.com/sympy/sympy.git | Updated import locations | 125 | 0 | 47,552 | 14 |
|
1 | 8 | def test_preserve_username_case(self):
user = User.objects.create_user("forms_test2", "[email protected]", "test")
self.assertEqual(user.email, "[email protected]")
user = User.objects.create_user("forms_test3", "tesT", "test")
self.assertEqual(user.email, "tesT")
| tests/auth_tests/test_forms.py | 99 | django | {
"docstring": "\n Preserve the case of the user name (before the @ in the email address)\n when creating a user (#5605).\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 15
} | 16 | Python | 12 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | test_forms.py | 201,228 | 5 | 54 | test_preserve_username_case | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 51 | 0 | 49,906 | 9 |
|
1 | 4 | def clean_up_synthetic_data():
shutil.rmtree("audio_files", ignore_errors=True)
shutil.rmtree("image_files", ignore_errors=True)
| ludwig/utils/triton_utils.py | 46 | ludwig | {
"docstring": "Clean up synthetic example generated data for audio and image features.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | 6 | Python | 5 | ed8d9cf20843744f18593b22fb6a30eaf5f325eb | triton_utils.py | 7,519 | 3 | 25 | clean_up_synthetic_data | https://github.com/ludwig-ai/ludwig.git | Triton ensemble export (#2251) | 15 | 0 | 1,227 | 8 |
|
6 | 24 | def load_weights_only(model, filepath):
temp_dir = None
archive = None
if filepath.endswith(".weights.h5"):
# TODO: download file if h5 filepath is remote
weights_store = H5IOStore(filepath, mode="r")
elif filepath.endswith(".keras"):
archive = zipfile.ZipFile(filepath, "r")
weights_store = H5IOStore(
_VARS_FNAME + ".h5", archive=archive, mode="r"
)
_load_state(
model,
weights_handler=weights_store,
assets_handler=None,
inner_path="",
visited_trackables=set(),
)
weights_store.close()
if temp_dir and tf.io.gfile.exists(temp_dir):
tf.io.gfile.rmtree(temp_dir)
if archive:
archive.close()
| keras/saving/experimental/saving_lib.py | 212 | keras | {
"docstring": "Load the weights of a model from a filepath (.keras or .weights.h5).\n\n Note: only supports h5 for now.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 18,
"vocab_size": 17
} | 55 | Python | 43 | e6f739a31247c43a86c37c33b0b8b2ba6be6a5f6 | saving_lib.py | 280,201 | 22 | 126 | load_weights_only | https://github.com/keras-team/keras.git | - Add standalone weights file saving/loading functionality.
- Switch to in-memory, single write / single read archive saving for better performance.
- Remove ability to pick between zipping or not zipping a Keras saved artifact: it's always a zip archive now.
PiperOrigin-RevId: 483705728 | 180 | 0 | 83,286 | 13 |
|
1 | 11 | def cg(A, b, x0=None, *, tol=1e-5, atol=0.0, maxiter=None, M=None):
return _isolve(_cg_solve,
A=A, b=b, x0=x0, tol=tol, atol=atol,
maxiter=maxiter, M=M, check_symmetric=True)
| jax/_src/scipy/sparse/linalg.py | 91 | jax | {
"docstring": "Use Conjugate Gradient iteration to solve ``Ax = b``.\n\n The numerics of JAX's ``cg`` should exact match SciPy's ``cg`` (up to\n numerical precision), but note that the interface is slightly different: you\n need to supply the linear operator ``A`` as a function instead of a sparse\n matrix or ``LinearOperator``.\n\n Derivatives of ``cg`` are implemented via implicit differentiation with\n another ``cg`` solve, rather than by differentiating *through* the solver.\n They will be accurate only if both solves converge.\n\n Parameters\n ----------\n A: ndarray or function\n 2D array or function that calculates the linear map (matrix-vector\n product) ``Ax`` when called like ``A(x)``. ``A`` must represent a\n hermitian, positive definite matrix, and must return array(s) with the\n same structure and shape as its argument.\n b : array or tree of arrays\n Right hand side of the linear system representing a single vector. Can be\n stored as an array or Python container of array(s) with any shape.\n\n Returns\n -------\n x : array or tree of arrays\n The converged solution. Has the same structure as ``b``.\n info : None\n Placeholder for convergence information. In the future, JAX will report\n the number of iterations when convergence is not achieved, like SciPy.\n\n Other Parameters\n ----------------\n x0 : array or tree of arrays\n Starting guess for the solution. Must have the same structure as ``b``.\n tol, atol : float, optional\n Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.\n We do not implement SciPy's \"legacy\" behavior, so JAX's tolerance will\n differ from SciPy unless you explicitly pass ``atol`` to SciPy's ``cg``.\n maxiter : integer\n Maximum number of iterations. Iteration will stop after maxiter\n steps even if the specified tolerance has not been achieved.\n M : ndarray or function\n Preconditioner for A. The preconditioner should approximate the\n inverse of A. Effective preconditioning dramatically improves the\n rate of convergence, which implies that fewer iterations are needed\n to reach a given error tolerance.\n\n See also\n --------\n scipy.sparse.linalg.cg\n jax.lax.custom_linear_solve\n ",
"language": "en",
"n_whitespaces": 438,
"n_words": 314,
"vocab_size": 205
} | 19 | Python | 19 | 998d60dd07d2c33438f606307de0276bcf110428 | linalg.py | 119,883 | 4 | 71 | cg | https://github.com/google/jax.git | DOC: clarify parameter types in cg/bicgstab | 53 | 0 | 26,708 | 8 |
|
2 | 6 | def _get_database_display_str(self, verbosity, database_name):
return "'%s'%s" % (
self.connection.alias,
(" ('%s')" % database_name) if verbosity >= 2 else "",
)
| django/db/backends/base/creation.py | 55 | django | {
"docstring": "\n Return display string for a database for use in various actions.\n ",
"language": "en",
"n_whitespaces": 26,
"n_words": 11,
"vocab_size": 10
} | 20 | Python | 19 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | creation.py | 204,841 | 5 | 33 | _get_database_display_str | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 63 | 0 | 50,920 | 11 |
|
1 | 11 | def pre_run_hook(self, instance, private_data_dir):
instance.log_lifecycle("pre_run")
# Before task is started, ensure that job_event partitions exist
create_partition(instance.event_class._meta.db_table, start=instance.created)
| awx/main/tasks/jobs.py | 54 | awx | {
"docstring": "\n Hook for any steps to run before the job/task starts\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | 17 | Python | 17 | e87fabe6bb84691472ab67e5da737c9fe515cf3f | jobs.py | 81,641 | 3 | 32 | pre_run_hook | https://github.com/ansible/awx.git | Submit job to dispatcher as part of transaction (#12573)
Make it so that submitting a task to the dispatcher happens as part of the transaction.
this applies to dispatcher task "publishers" which NOTIFY the pg_notify queue
if the transaction is not successful, it will not be sent, as per postgres docs
This keeps current behavior for pg_notify listeners
practically, this only applies for the awx-manage run_dispatcher service
this requires creating a separate connection and keeping it long-lived
arbitrary code will occasionally close the main connection, which would stop listening
Stop sending the waiting status websocket message
this is required because the ordering cannot be maintained with other changes here
the instance group data is moved to the running websocket message payload
Move call to create_partition from task manager to pre_run_hook
mock this in relevant unit tests | 45 | 0 | 17,240 | 10 |
|
3 | 30 | def make_dataset(X, y, sample_weight, random_state=None):
rng = check_random_state(random_state)
# seed should never be 0 in SequentialDataset64
seed = rng.randint(1, np.iinfo(np.int32).max)
if X.dtype == np.float32:
CSRData = CSRDataset32
ArrayData = ArrayDataset32
else:
CSRData = CSRDataset64
ArrayData = ArrayDataset64
if sp.issparse(X):
dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
X = np.ascontiguousarray(X)
dataset = ArrayData(X, y, sample_weight, seed=seed)
intercept_decay = 1.0
return dataset, intercept_decay
| sklearn/linear_model/_base.py | 197 | scikit-learn | {
"docstring": "Create ``Dataset`` abstraction for sparse and dense inputs.\n\n This also returns the ``intercept_decay`` which is different\n for sparse datasets.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data\n\n y : array-like, shape (n_samples, )\n Target values.\n\n sample_weight : numpy array of shape (n_samples,)\n The weight of each sample\n\n random_state : int, RandomState instance or None (default)\n Determines random number generation for dataset random sampling. It is not\n used for dataset shuffling.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n Returns\n -------\n dataset\n The ``Dataset`` abstraction\n intercept_decay\n The intercept decay\n ",
"language": "en",
"n_whitespaces": 197,
"n_words": 95,
"vocab_size": 74
} | 66 | Python | 43 | b4da3b406379b241bf5e81d0f60bbcddd424625b | _base.py | 259,567 | 17 | 130 | make_dataset | https://github.com/scikit-learn/scikit-learn.git | MNT ensure creation of dataset is deterministic in SGD (#19716)
Co-authored-by: Guillaume Lemaitre <[email protected]>
Co-authored-by: Olivier Grisel <[email protected]>
Co-authored-by: Jérémie du Boisberranger <[email protected]> | 156 | 0 | 75,821 | 12 |
|
2 | 28 | def test_big_ndept() -> None:
# for multiplier in [1, 10, 100, 1000]:
for multiplier in [10]:
ndim = 1_000_000
rows = 1
cols = 7
num_entites = 1000
upper = highest()
lower = lowest()
reference_data = np.random.randint(
lower, upper, size=(multiplier * ndim, rows, cols), dtype=np.int32
)
big_ndept = NDEPT(
child=reference_data,
entities=[ishan() * num_entites],
max_vals=make_bounds(reference_data, upper),
min_vals=make_bounds(reference_data, lower),
)
ndept_metrics = time_and_size_serde(big_ndept)
print(multiplier, ndept_metrics)
# break
assert False
| packages/syft/tests/syft/core/tensor/tensor_serde_test.py | 185 | PySyft | {
"docstring": "Create big NDEPTs\n failed capnp deserialize capnp/serialize.c++:197:\n failed: expected totalWords <= options.traversalLimitInWords;\n Message is too large. To increase the limit on the receiving end,\n see capnp::ReaderOptions.\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 26,
"vocab_size": 25
} | 67 | Python | 53 | b2768484a1b5720be74c78335502cd996e0b1895 | tensor_serde_test.py | 735 | 27 | 118 | test_big_ndept | https://github.com/OpenMined/PySyft.git | WIP: Having issue with 10M NDEPT serde
- options.traversalLimitInWords; Message is too large. | 229 | 0 | 107 | 15 |
|
2 | 6 | def popitem(self):
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
| python3.10.4/Lib/configparser.py | 52 | XX-Net | {
"docstring": "Remove a section from the parser and return it as\n a (section_name, section_proxy) tuple. If no section is present, raise\n KeyError.\n\n The section DEFAULT is never returned because it cannot be removed.\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 32,
"vocab_size": 27
} | 16 | Python | 14 | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | configparser.py | 221,679 | 6 | 32 | popitem | https://github.com/XX-net/XX-Net.git | add python 3.10.4 for windows | 70 | 0 | 56,473 | 9 |
|
2 | 9 | def cleanup_cache_files(self) -> Dict[str, int]:
self._check_values_type()
return {k: dataset.cleanup_cache_files() for k, dataset in self.items()}
| src/datasets/dataset_dict.py | 62 | datasets | {
"docstring": "Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.\n Be careful when running this command that no other process is currently using other cache files.\n\n Return:\n Dict with the number of removed files for each split\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\")\n >>> ds.cleanup_cache_files()\n {'test': 0, 'train': 0, 'validation': 0}\n ```\n ",
"language": "en",
"n_whitespaces": 148,
"n_words": 67,
"vocab_size": 55
} | 14 | Python | 14 | 1904d0c0a3a96330d9b870cdca3e9a3a137f2977 | dataset_dict.py | 104,788 | 18 | 38 | cleanup_cache_files | https://github.com/huggingface/datasets.git | Add code examples for DatasetDict (#4245)
* 📝 add code examples for DatasetDict
* 🖍 apply quentin review | 35 | 0 | 21,970 | 9 |
|
2 | 14 | def test_recursion_to_deep(large_import_chain):
if is_win:
pytest.xfail("Worker is known to crash on Windows.")
path, script = large_import_chain
mg = modulegraph.ModuleGraph(path)
# Increase recursion limit to 5 times of the default. Given the module import chain created above
# this still should fail.
with pytest.raises(RecursionError):
mg.add_script(str(script))
| tests/unit/test_recursion_limit.py | 83 | pyinstaller | {
"docstring": "\n modulegraph is recursive and triggers RecursionError if nesting of imported modules is to deep.\n This can be worked around by increasing recursion limit.\n\n With the default recursion limit (1000), the recursion error occurs at about 115 modules, with limit 2000\n (as tested below) at about 240 modules, and with limit 5000 at about 660 modules.\n ",
"language": "en",
"n_whitespaces": 71,
"n_words": 55,
"vocab_size": 42
} | 43 | Python | 39 | 080d95d83bb7f60ce2ec25b0c81c207d303ec46c | test_recursion_limit.py | 262,741 | 7 | 45 | test_recursion_to_deep | https://github.com/pyinstaller/pyinstaller.git | Drop Python 3.6 support. | 78 | 0 | 77,340 | 11 |
|
4 | 12 | def field_as_sql(self, field, val):
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, "as_sql"):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, "get_placeholder"):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = "%s", [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
| django/db/models/sql/compiler.py | 168 | django | {
"docstring": "\n Take a field and a value intended to be saved on that field, and\n return placeholder SQL and accompanying params. Check for raw values,\n expressions, and fields with get_placeholder() defined in that order.\n\n When field is None, consider the value raw and use it as the\n placeholder, with no corresponding parameters returned.\n ",
"language": "en",
"n_whitespaces": 95,
"n_words": 52,
"vocab_size": 41
} | 136 | Python | 90 | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | compiler.py | 205,824 | 11 | 98 | field_as_sql | https://github.com/django/django.git | Refs #33476 -- Reformatted code with Black. | 319 | 0 | 51,226 | 13 |
|
1 | 16 | def psi_n(n, x, m, omega):
# sympify arguments
n, x, m, omega = map(S, [n, x, m, omega])
nu = m * omega / hbar
# normalization coefficient
C = (nu/pi)**Rational(1, 4) * sqrt(1/(2**n*factorial(n)))
return C * exp(-nu* x**2 /2) * hermite(n, sqrt(nu)*x)
| sympy/physics/qho_1d.py | 146 | sympy | {
"docstring": "\n Returns the wavefunction psi_{n} for the One-dimensional harmonic oscillator.\n\n Parameters\n ==========\n\n n :\n the \"nodal\" quantum number. Corresponds to the number of nodes in the\n wavefunction. ``n >= 0``\n x :\n x coordinate.\n m :\n Mass of the particle.\n omega :\n Angular frequency of the oscillator.\n\n Examples\n ========\n\n >>> from sympy.physics.qho_1d import psi_n\n >>> from sympy.abc import m, x, omega\n >>> psi_n(0, x, m, omega)\n (m*omega)**(1/4)*exp(-m*omega*x**2/(2*hbar))/(hbar**(1/4)*pi**(1/4))\n\n ",
"language": "en",
"n_whitespaces": 146,
"n_words": 66,
"vocab_size": 46
} | 43 | Python | 31 | a0989bcfd26470833cf03737941bfd80f511c745 | qho_1d.py | 199,984 | 5 | 97 | psi_n | https://github.com/sympy/sympy.git | applied backtick correction to the remainder of the project | 64 | 0 | 49,473 | 14 |
|
2 | 6 | async def async_get_hev_cycle(self) -> None:
if lifx_features(self.device)["hev"]:
await async_execute_lifx(self.device.get_hev_cycle)
| homeassistant/components/lifx/coordinator.py | 51 | core | {
"docstring": "Update the HEV cycle status from a LIFX Clean bulb.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | 9 | Python | 9 | dde763418a1c4ee0ecff17de76b6d670670a3bb7 | coordinator.py | 289,881 | 4 | 28 | async_get_hev_cycle | https://github.com/home-assistant/core.git | Add an RSSI sensor to the LIFX integration (#80993) | 34 | 0 | 89,009 | 12 |
|
6 | 10 | def subsets(seq, k=None, repetition=False):
r
if k is None:
if not repetition:
return chain.from_iterable((combinations(seq, k)
for k in range(len(seq) + 1)))
else:
return chain.from_iterable((combinations_with_replacement(seq, k)
for k in range(len(seq) + 1)))
else:
if not repetition:
return combinations(seq, k)
else:
return combinations_with_replacement(seq, k)
| sympy/utilities/iterables.py | 155 | sympy | {
"docstring": "Generates all `k`-subsets (combinations) from an `n`-element set, ``seq``.\n\n A `k`-subset of an `n`-element set is any subset of length exactly `k`. The\n number of `k`-subsets of an `n`-element set is given by ``binomial(n, k)``,\n whereas there are `2^n` subsets all together. If `k` is ``None`` then all\n `2^n` subsets will be returned from shortest to longest.\n\n Examples\n ========\n\n >>> from sympy.utilities.iterables import subsets\n\n ``subsets(seq, k)`` will return the `\\frac{n!}{k!(n - k)!}` `k`-subsets (combinations)\n without repetition, i.e. once an item has been removed, it can no\n longer be \"taken\":\n\n >>> list(subsets([1, 2], 2))\n [(1, 2)]\n >>> list(subsets([1, 2]))\n [(), (1,), (2,), (1, 2)]\n >>> list(subsets([1, 2, 3], 2))\n [(1, 2), (1, 3), (2, 3)]\n\n\n ``subsets(seq, k, repetition=True)`` will return the `\\frac{(n - 1 + k)!}{k!(n - 1)!}`\n combinations *with* repetition:\n\n >>> list(subsets([1, 2], 2, repetition=True))\n [(1, 1), (1, 2), (2, 2)]\n\n If you ask for more items than are in the set you get the empty set unless\n you allow repetitions:\n\n >>> list(subsets([0, 1], 3, repetition=False))\n []\n >>> list(subsets([0, 1], 3, repetition=True))\n [(0, 0, 0), (0, 0, 1), (0, 1, 1), (1, 1, 1)]\n\n ",
"language": "en",
"n_whitespaces": 313,
"n_words": 184,
"vocab_size": 117
} | 42 | Python | 23 | a25ba231f9c3fd6518f9ae81d1df0323898b9e44 | iterables.py | 197,085 | 52 | 100 | subsets | https://github.com/sympy/sympy.git | Optimization of subsets() to use return rather than yield from
By avoiding the use of yield in the body of iterables.subsets, Python
sees it as just a regular function rather than a generator. Hence it
can call generators and return the resulting generator objects,
avoiding some overhead from a layer of yield from handling. | 200 | 0 | 48,328 | 19 |
|
1 | 33 | async def test_doorbell_update_via_pubnub(hass):
doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.json")
pubnub = AugustPubNub()
await _create_august_with_devices(hass, [doorbell_one], pubnub=pubnub)
assert doorbell_one.pubsub_channel == "7c7a6672-59c8-3333-ffff-dcd98705cccc"
binary_sensor_k98gidt45gul_name_motion = hass.states.get(
"binary_sensor.k98gidt45gul_name_motion"
)
assert binary_sensor_k98gidt45gul_name_motion.state == STATE_OFF
binary_sensor_k98gidt45gul_name_ding = hass.states.get(
"binary_sensor.k98gidt45gul_name_ding"
)
assert binary_sensor_k98gidt45gul_name_ding.state == STATE_OFF
pubnub.message(
pubnub,
Mock(
channel=doorbell_one.pubsub_channel,
timetoken=_timetoken(),
message={
"status": "imagecapture",
"data": {
"result": {
"created_at": "2021-03-16T01:07:08.817Z",
"secure_url": "https://dyu7azbnaoi74.cloudfront.net/zip/images/zip.jpeg",
},
},
},
),
)
await hass.async_block_till_done()
binary_sensor_k98gidt45gul_name_image_capture = hass.states.get(
"binary_sensor.k98gidt45gul_name_image_capture"
)
assert binary_sensor_k98gidt45gul_name_image_capture.state == STATE_ON
pubnub.message(
pubnub,
Mock(
channel=doorbell_one.pubsub_channel,
timetoken=_timetoken(),
message={
"status": "doorbell_motion_detected",
"data": {
"event": "doorbell_motion_detected",
"image": {
"height": 640,
"width": 480,
"format": "jpg",
"created_at": "2021-03-16T02:36:26.886Z",
"bytes": 14061,
"secure_url": "https://dyu7azbnaoi74.cloudfront.net/images/1f8.jpeg",
"url": "https://dyu7azbnaoi74.cloudfront.net/images/1f8.jpeg",
"etag": "09e839331c4ea59eef28081f2caa0e90",
},
"doorbellName": "Front Door",
"callID": None,
"origin": "mars-api",
"mutableContent": True,
},
},
),
)
await hass.async_block_till_done()
binary_sensor_k98gidt45gul_name_motion = hass.states.get(
"binary_sensor.k98gidt45gul_name_motion"
)
assert binary_sensor_k98gidt45gul_name_motion.state == STATE_ON
binary_sensor_k98gidt45gul_name_ding = hass.states.get(
"binary_sensor.k98gidt45gul_name_ding"
)
assert binary_sensor_k98gidt45gul_name_ding.state == STATE_OFF
new_time = dt_util.utcnow() + datetime.timedelta(seconds=40)
native_time = datetime.datetime.now() + datetime.timedelta(seconds=40)
with patch(
"homeassistant.components.august.binary_sensor._native_datetime",
return_value=native_time,
):
async_fire_time_changed(hass, new_time)
await hass.async_block_till_done()
binary_sensor_k98gidt45gul_name_image_capture = hass.states.get(
"binary_sensor.k98gidt45gul_name_image_capture"
)
assert binary_sensor_k98gidt45gul_name_image_capture.state == STATE_OFF
pubnub.message(
pubnub,
Mock(
channel=doorbell_one.pubsub_channel,
timetoken=_timetoken(),
message={
"status": "buttonpush",
},
),
)
await hass.async_block_till_done()
binary_sensor_k98gidt45gul_name_ding = hass.states.get(
"binary_sensor.k98gidt45gul_name_ding"
)
assert binary_sensor_k98gidt45gul_name_ding.state == STATE_ON
new_time = dt_util.utcnow() + datetime.timedelta(seconds=40)
native_time = datetime.datetime.now() + datetime.timedelta(seconds=40)
with patch(
"homeassistant.components.august.binary_sensor._native_datetime",
return_value=native_time,
):
async_fire_time_changed(hass, new_time)
await hass.async_block_till_done()
binary_sensor_k98gidt45gul_name_ding = hass.states.get(
"binary_sensor.k98gidt45gul_name_ding"
)
assert binary_sensor_k98gidt45gul_name_ding.state == STATE_OFF
| tests/components/august/test_binary_sensor.py | 819 | core | {
"docstring": "Test creation of a doorbell that can be updated via pubnub.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | 211 | Python | 87 | ea5b18c1ef16b64cd7916f2540692ab5de2d2edf | test_binary_sensor.py | 309,149 | 109 | 475 | test_doorbell_update_via_pubnub | https://github.com/home-assistant/core.git | Split august motion and image capture binary sensors (#62154) | 1,162 | 0 | 107,857 | 17 |
|
4 | 8 | def get_span_dict(span_list):
strip_prefix = "python.ray.tests."
span_names = {}
for span in span_list:
span_name = span["name"]
if span_name.startswith(strip_prefix):
span_name = span_name[len(strip_prefix) :]
if span_name in span_names:
span_names[span_name] += 1
else:
span_names[span_name] = 1
return span_names
| python/ray/tests/test_tracing.py | 107 | ray | {
"docstring": "Given a list of span names, return dictionary of span names.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 9
} | 34 | Python | 23 | 2cdb76789e6d0d59928891a4b520f588b7844edf | test_tracing.py | 124,698 | 12 | 63 | get_span_dict | https://github.com/ray-project/ray.git | Bump pytest from 5.4.3 to 7.0.1 (#26334)
See #23676 for context. This is another attempt at that as I figured out what's going wrong in `bazel test`. Supersedes #24828.
Now that there are Python 3.10 wheels for Ray 1.13 and this is no longer a blocker for supporting Python 3.10, I still want to make `bazel test //python/ray/tests/...` work for developing in a 3.10 env, and make it easier to add Python 3.10 tests to CI in future.
The change contains three commits with rather descriptive commit message, which I repeat here:
Pass deps to py_test in py_test_module_list
Bazel macro py_test_module_list takes a `deps` argument, but completely
ignores it instead of passes it to `native.py_test`. Fixing that as we
are going to use deps of py_test_module_list in BUILD in later changes.
cpp/BUILD.bazel depends on the broken behaviour: it deps-on a cc_library
from a py_test, which isn't working, see upstream issue:
https://github.com/bazelbuild/bazel/issues/701.
This is fixed by simply removing the (non-working) deps.
Depend on conftest and data files in Python tests BUILD files
Bazel requires that all the files used in a test run should be
represented in the transitive dependencies specified for the test
target. For py_test, it means srcs, deps and data.
Bazel enforces this constraint by creating a "runfiles" directory,
symbolic links files in the dependency closure and run the test in the
"runfiles" directory, so that the test shouldn't see files not in the
dependency graph.
Unfortunately, the constraint does not apply for a large number of
Python tests, due to pytest (>=3.9.0, <6.0) resolving these symbolic
links during test collection and effectively "breaks out" of the
runfiles tree.
pytest >= 6.0 introduces a breaking change and removed the symbolic link
resolving behaviour, see pytest pull request
https://github.com/pytest-dev/pytest/pull/6523 for more context.
Currently, we are underspecifying dependencies in a lot of BUILD files
and thus blocking us from updating to newer pytest (for Python 3.10
support). This change hopefully fixes all of them, and at least those in
CI, by adding data or source dependencies (mostly for conftest.py-s)
where needed.
Bump pytest version from 5.4.3 to 7.0.1
We want at least pytest 6.2.5 for Python 3.10 support, but not past
7.1.0 since it drops Python 3.6 support (which Ray still supports), thus
the version constraint is set to <7.1.
Updating pytest, combined with earlier BUILD fixes, changed the ground
truth of a few error message based unit test, these tests are updated to
reflect the change.
There are also two small drive-by changes for making test_traceback and
test_cli pass under Python 3.10. These are discovered while debugging CI
failures (on earlier Python) with a Python 3.10 install locally. Expect
more such issues when adding Python 3.10 to CI. | 110 | 0 | 27,661 | 14 |
|
3 | 23 | async def dry_run(self, empty, context) -> jina_pb2.StatusProto:
from docarray import DocumentArray
from jina.clients.request import request_generator
from jina.enums import DataInputType
from jina.serve.executors import __dry_run_endpoint__
da = DocumentArray()
try:
req_iterator = request_generator(
exec_endpoint=__dry_run_endpoint__,
data=da,
data_type=DataInputType.DOCUMENT,
) | jina/serve/runtimes/gateway/grpc/__init__.py | 100 | async def dry_run(self, empty, context) -> jina_pb2.StatusProto:
"""
Process the the call requested by having a dry run call to every Executor in the graph
:param empty: The service expects an empty protobuf message
:param context: grpc context
:returns: the response request
"""
from docarray import DocumentArray
from jina.clients.request import request_generator
from jina.enums import DataInputType
from jina.serve.executors import __dry_run_endpoint__
da = DocumentArray()
try:
req_iterator = request_generator(
exec_endpoint=__dry_run_endpoint__,
data=da,
data_type=DataInputType.DOCUMENT,
) | jina | {
"docstring": "\n Process the the call requested by having a dry run call to every Executor in the graph\n\n :param empty: The service expects an empty protobuf message\n :param context: grpc context\n :returns: the response request\n ",
"language": "en",
"n_whitespaces": 70,
"n_words": 34,
"vocab_size": 29
} | 34 | Python | 27 | ef662b529b2a2eecea7bb99759a9f7b9d86d3062 | __init__.py | 12,508 | 28 | 121 | dry_run | https://github.com/jina-ai/jina.git | feat: add grpc health checking (#4779) | 150 | 1 | 2,326 | 10 |
1 | 7 | def test_parsing_of_open_actions(self):
from kitty.open_actions import actions_for_url, KeyAction
spec =
| kitty_tests/open_actions.py | 28 | kitty | {
"docstring": "\nprotocol file\nmime text/*\nfragment_matches .\nAcTion launch $EDITOR $FILE_PATH $FRAGMENT\naction\n\nprotocol file\nmime text/*\naction ignored\n\next py,txt\naction one\naction two\n",
"language": "en",
"n_whitespaces": 13,
"n_words": 24,
"vocab_size": 17
} | 9 | Python | 9 | 1454af2d416f0eb738c2268ee3297cacb0215dd0 | open_actions.py | 102,829 | 22 | 68 | test_parsing_of_open_actions | https://github.com/kovidgoyal/kitty.git | macOS: Allow customizing the launch actions | 23 | 0 | 21,573 | 7 |