ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
@Deprecated(message=deprecation_message) @dataclass
27,474
123,916
110
python/ray/util/ml_utils/checkpoint_manager.py
49
12
def _tune_legacy_checkpoint_score_attr(self) -> Optional[str]: if self.checkpoint_score_attribute is None: return self.checkpoint_score_attribute prefix = "" if self.checkpoint_score_order == MIN: prefix = "min-" return f"{prefix}{self.checkpoint_score_attribute}" # Alias for backwards compatibility deprecation_message = ( "`CheckpointStrategy` is deprecated and will be removed in " "the future. Please use `ray.air.config.CheckpointStrategy` " "instead." ) @Deprecated(message=deprecation_message) @da
[AIR] More checkpoint configurability, `Result` extension (#25943) This PR: * Allows the user to set `keep_checkpoints_num` and `checkpoint_score_attr` in `RunConfig` using the `CheckpointStrategy` dataclass * Adds two new fields to the `Result` object - `best_checkpoints` - a list of saved best checkpoints as determined by `CheckpointingConfig`.
_tune_legacy_checkpoint_score_attr
dc7ed086a5038775e378b32cb31fb4a79f418dd9
ray
checkpoint_manager.py
9
11
https://github.com/ray-project/ray.git
3
38
1
41
111
Python
{ "docstring": "Same as ``checkpoint_score_attr`` in ``tune.run``.\n\n Only used for Legacy API compatibility.\n ", "language": "en", "n_whitespaces": 25, "n_words": 11, "vocab_size": 11 }
def _tune_legacy_checkpoint_score_attr(self) -> Optional[str]: if self.checkpoint_score_attribute is None: return self.checkpoint_score_attribute prefix = "" if self.checkpoint_score_order == MIN: prefix = "min-" return f"{prefix}{self.checkpoint_score_attribute}" # Alias for backwards compatibility deprecation_message = ( "`CheckpointStrategy` is deprecated and will be removed in " "the future. Please use `ray.air.config.CheckpointStrategy` " "instead." ) @Deprecated(message=deprecation_message) @dataclass
18,600
89,979
451
tests/sentry/api/endpoints/test_project_details.py
59
30
def test_dynamic_sampling_bias_activation(self): project = self.project # force creation project.update_option( "sentry:dynamic_sampling_biases", [ {"id": "boostEnvironments", "active": False}, ], ) self.login_as(self.user) token = ApiToken.objects.create(user=self.user, scope_list=["project:write"]) authorization = f"Bearer {token.token}" url = reverse( "sentry-api-0-project-details", kwargs={ "organization_slug": self.project.organization.slug, "project_slug": self.project.slug,
ref(sampling): Prettify audit logs - Part 1 (#42534)
test_dynamic_sampling_bias_activation
b83aa7328d49e5b45357417c78b7d1a63bfb056e
sentry
test_project_details.py
16
33
https://github.com/getsentry/sentry.git
1
170
0
49
293
Python
{ "docstring": "\n Tests that when sending a request to enable a dynamic sampling bias,\n the bias will be successfully enabled and the audit log 'SAMPLING_BIAS_ENABLED' will be triggered\n ", "language": "en", "n_whitespaces": 48, "n_words": 26, "vocab_size": 22 }
def test_dynamic_sampling_bias_activation(self): project = self.project # force creation project.update_option( "sentry:dynamic_sampling_biases", [ {"id": "boostEnvironments", "active": False}, ], ) self.login_as(self.user) token = ApiToken.objects.create(user=self.user, scope_list=["project:write"]) authorization = f"Bearer {token.token}" url = reverse( "sentry-api-0-project-details", kwargs={ "organization_slug": self.project.organization.slug, "project_slug": self.project.slug, }, ) with Feature({self.new_ds_flag: True}): self.client.put( url, format="json", HTTP_AUTHORIZATION=authorization, data={ "dynamicSamplingBiases": [ {"id": "boostEnvironments", "active": True}, ] }, ) assert AuditLogEntry.objects.filter( organization=self.project.organization, event=audit_log.get_event_id("SAMPLING_BIAS_ENABLED"), ).exists()
19,844
100,351
467
lib/model/layers.py
152
25
def call(self, inputs, *args, **kwargs): input_shape = K.int_shape(inputs) if len(input_shape) != 4: raise ValueError('Inputs should have rank ' + str(4) + '; Received input shape:', str(input_shape)) if self.data_format == 'channels_first': batch_size, channels, height, width = input_shape if batch_size is None: batch_size = -1 r_height, r_width = self.size o_height, o_width = height * r_height, width * r_width o_channels = channels // (r_height * r_width) out = K.reshape(inputs, (batch_size, r_height, r_width, o_channels, height, width)) out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2)) out = K.reshape(out, (batch_size, o_channels, o_height, o_width)) elif self.data_format == 'channels_last': batch_size, height, width, channels = input_shape if batch_size is None: batch_size = -1 r_height, r_width = self.size o_height, o_width = height * r_height, width * r_width o_channels = channels // (r_height * r_width) out = K.reshape(inputs, (ba
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
call
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
faceswap
layers.py
13
27
https://github.com/deepfakes/faceswap.git
6
267
0
71
406
Python
{ "docstring": "This is where the layer's logic lives.\n\n Parameters\n ----------\n inputs: tensor\n Input tensor, or list/tuple of input tensors\n args: tuple\n Additional standard keras Layer arguments\n kwargs: dict\n Additional standard keras Layer keyword arguments\n\n Returns\n -------\n tensor\n A tensor or list/tuple of tensors\n ", "language": "en", "n_whitespaces": 149, "n_words": 42, "vocab_size": 31 }
def call(self, inputs, *args, **kwargs): input_shape = K.int_shape(inputs) if len(input_shape) != 4: raise ValueError('Inputs should have rank ' + str(4) + '; Received input shape:', str(input_shape)) if self.data_format == 'channels_first': batch_size, channels, height, width = input_shape if batch_size is None: batch_size = -1 r_height, r_width = self.size o_height, o_width = height * r_height, width * r_width o_channels = channels // (r_height * r_width) out = K.reshape(inputs, (batch_size, r_height, r_width, o_channels, height, width)) out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2)) out = K.reshape(out, (batch_size, o_channels, o_height, o_width)) elif self.data_format == 'channels_last': batch_size, height, width, channels = input_shape if batch_size is None: batch_size = -1 r_height, r_width = self.size o_height, o_width = height * r_height, width * r_width o_channels = channels // (r_height * r_width) out = K.reshape(inputs, (batch_size, height, width, r_height, r_width, o_channels)) out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5)) out = K.reshape(out, (batch_size, o_height, o_width, o_channels)) return out
40,250
168,239
137
pandas/core/indexes/base.py
40
15
def to_native_types(self, slicer=None, **kwargs) -> np.ndarray: warnings.warn( "The 'to_native_types' method is deprecated and will be removed in " "a future version. Use 'astype(str)' instead.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) values = self
PERF cache find_stack_level (#48023) cache stacklevel
to_native_types
2f8d0a36703e81e4dca52ca9fe4f58c910c1b304
pandas
base.py
12
37
https://github.com/pandas-dev/pandas.git
2
61
0
37
100
Python
{ "docstring": "\n Format specified values of `self` and return them.\n\n .. deprecated:: 1.2.0\n\n Parameters\n ----------\n slicer : int, array-like\n An indexer into `self` that specifies which values\n are used in the formatting process.\n kwargs : dict\n Options for specifying how the values should be formatted.\n These options include the following:\n\n 1) na_rep : str\n The value that serves as a placeholder for NULL values\n 2) quoting : bool or None\n Whether or not there are quoted values in `self`\n 3) date_format : str\n The format used to represent date-like values.\n\n Returns\n -------\n numpy.ndarray\n Formatted values.\n ", "language": "en", "n_whitespaces": 297, "n_words": 93, "vocab_size": 72 }
def to_native_types(self, slicer=None, **kwargs) -> np.ndarray: warnings.warn( "The 'to_native_types' method is deprecated and will be removed in " "a future version. Use 'astype(str)' instead.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) values = self if slicer is not None: values = values[slicer] return values._format_native_types(**kwargs)
42,266
177,079
76
networkx/algorithms/distance_measures.py
44
14
def periphery(G, e=None, usebounds=False, weight=None): if usebounds is True and e is None and not G.is_directed(): return _extrema_bounding(G, compute="periphery", weight=weight)
Add weight distance metrics (#5305) Adds the weight keyword argument to allow users to compute weighted distance metrics e.g. diameter, eccentricity, periphery, etc. The kwarg works in the same fashion as the weight param for shortest paths - i.e. if a string, look up with edge attr by key, if callable, compute the weight via the function. Default is None, meaning return unweighted result which is the current behavior. Co-authored-by: Dan Schult <[email protected]> Co-authored-by: Ross Barnowski <[email protected]>
periphery
28f78cfa9a386620ee1179582fda1db5ffc59f84
networkx
distance_measures.py
11
8
https://github.com/networkx/networkx.git
7
90
0
31
140
Python
{ "docstring": "Returns the periphery of the graph G.\n\n The periphery is the set of nodes with eccentricity equal to the diameter.\n\n Parameters\n ----------\n G : NetworkX graph\n A graph\n\n e : eccentricity dictionary, optional\n A precomputed dictionary of eccentricities.\n\n weight : string, function, or None\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n If this is None, every edge has weight/distance/cost 1.\n\n Weights stored as floating point values can lead to small round-off\n errors in distances. Use integer weights to avoid this.\n\n Weights should be positive, since they are distances.\n\n Returns\n -------\n p : list\n List of nodes in periphery\n\n Examples\n --------\n >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])\n >>> nx.periphery(G)\n [2, 5]\n\n See Also\n --------\n barycenter\n center\n ", "language": "en", "n_whitespaces": 384, "n_words": 212, "vocab_size": 128 }
def periphery(G, e=None, usebounds=False, weight=None): if usebounds is True and e is None and not G.is_directed(): return _extrema_bounding(G, compute="periphery", weight=weight) if e is None: e = eccentricity(G, weight=weight) diameter = max(e.values()) p = [v for v in e if e[v] == diameter] return p
91,294
292,193
74
homeassistant/components/zwave_js/climate.py
30
10
def _current_mode_setpoint_enums(self) -> list[ThermostatSetpointType | None]: if self._current_mode is None: # Thermostat(valve) with no support for setting a mode is considered heating-only return [ThermostatSetpointType.HEATING] return THERMOSTA
Add type ignore error codes [N-Z] (#66779)
_current_mode_setpoint_enums
67e94f2b4ba614a37544f54ccb85984f0d600376
core
climate.py
11
5
https://github.com/home-assistant/core.git
2
43
0
27
71
Python
{ "docstring": "Return the list of enums that are relevant to the current thermostat mode.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
def _current_mode_setpoint_enums(self) -> list[ThermostatSetpointType | None]: if self._current_mode is None: # Thermostat(valve) with no support for setting a mode is considered heating-only return [ThermostatSetpointType.HEATING] return THERMOSTAT_MODE_SETPOINT_MAP.get(int(self._current_mode.value), []) # type: ignore[no-any-return]
13,281
63,386
54
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
25
7
def line(loc, strg): lastCR = strg.rfind("\n", 0, loc) nextCR = strg.find("\n", loc) if nextCR >= 0: return strg[lastCR + 1:nextCR] else: return strg[lastCR + 1:]
upd; format
line
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
pyparsing.py
11
7
https://github.com/jindongwang/transferlearning.git
2
54
0
19
90
Python
{ "docstring": "Returns the line of text containing loc within a string, counting newlines as line separators.\n ", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 14 }
def line(loc, strg): lastCR = strg.rfind("\n", 0, loc) nextCR = strg.find("\n", loc) if nextCR >= 0: return strg[lastCR + 1:nextCR] else: return strg[lastCR + 1:]
21,445
102,080
150
lib/sysinfo.py
37
13
def _parse_configs(self, config_files): formatted = "" for cfile in config_files: fname = os.path.basename(cfile) ext = os.path.splitext(cfile)[1] formatted += f"\n--------- {fname} ---------\n" if ext == ".ini":
Allow decoding errors
_parse_configs
48c886b3dce3d3117ad16edaf35c8abd28dc51f5
faceswap
sysinfo.py
13
11
https://github.com/deepfakes/faceswap.git
4
71
0
26
127
Python
{ "docstring": " Parse the given list of config files into a human readable format.\n\n Parameters\n ----------\n config_files: list\n A list of paths to the faceswap config files\n\n Returns\n -------\n str\n The current configuration in the config files formatted in a human readable format\n ", "language": "en", "n_whitespaces": 113, "n_words": 41, "vocab_size": 28 }
def _parse_configs(self, config_files): formatted = "" for cfile in config_files: fname = os.path.basename(cfile) ext = os.path.splitext(cfile)[1] formatted += f"\n--------- {fname} ---------\n" if ext == ".ini": formatted += self._parse_ini(cfile) elif fname == ".faceswap": formatted += self._parse_json(cfile) return formatted
43,685
181,946
57
src/textual/dom.py
18
6
def parent(self) -> DOMNode: if self._parent is None: raise NoParent(f"{self} has no parent") assert isinstance(self._parent, DOMNode) return self._parent
docstrings and tidy
parent
2635f58e7c3d10b161ee69a15ebfe6499ac26daa
textual
dom.py
11
13
https://github.com/Textualize/textual.git
2
34
0
17
60
Python
{ "docstring": "Get the parent node.\n\n Raises:\n NoParent: If this is the root node.\n\n Returns:\n DOMNode: The node which is the direct parent of this node.\n ", "language": "en", "n_whitespaces": 67, "n_words": 24, "vocab_size": 17 }
def parent(self) -> DOMNode: if self._parent is None: raise NoParent(f"{self} has no parent") assert isinstance(self._parent, DOMNode) return self._parent
3,287
20,236
88
pipenv/patched/notpip/_vendor/platformdirs/unix.py
23
11
def user_documents_dir(self) -> str: documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR") if documents_dir is None: documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip() if not documents_dir: documents_dir = os.path.ex
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
user_documents_dir
f3166e673fe8d40277b804d35d77dcdb760fc3b3
pipenv
unix.py
13
10
https://github.com/pypa/pipenv.git
3
51
0
16
93
Python
{ "docstring": "\n :return: documents directory tied to the user, e.g. ``~/Documents``\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
def user_documents_dir(self) -> str: documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR") if documents_dir is None: documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip() if not documents_dir: documents_dir = os.path.expanduser("~/Documents") return documents_dir
76,025
259,992
230
sklearn/ensemble/tests/test_iforest.py
65
27
def test_iforest_sparse(global_random_seed): rng = check_random_state(global_random_seed) X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng) grid = ParameterGrid({"m
TST use global_random_seed in sklearn/ensemble/tests/test_iforest.py (#22901) Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Olivier Grisel <[email protected]>
test_iforest_sparse
6ca1f5e4d0d16bc9a7f28582079a15e14f012719
scikit-learn
test_iforest.py
15
17
https://github.com/scikit-learn/scikit-learn.git
3
144
0
47
221
Python
{ "docstring": "Check IForest for various parameter settings on sparse input.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def test_iforest_sparse(global_random_seed): rng = check_random_state(global_random_seed) X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "bootstrap": [True, False]}) for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in grid: # Trained on sparse format sparse_classifier = IsolationForest( n_estimators=10, random_state=global_random_seed, **params ).fit(X_train_sparse) sparse_results = sparse_classifier.predict(X_test_sparse) # Trained on dense format dense_classifier = IsolationForest( n_estimators=10, random_state=global_random_seed, **params ).fit(X_train) dense_results = dense_classifier.predict(X_test) assert_array_equal(sparse_results, dense_results)
75,703
259,304
76
sklearn/metrics/_scorer.py
41
17
def get_scorer_names():
API get_scorer returns a copy and introduce get_scorer_names (#22866)
get_scorer_names
7dc97a378ecbfa056dd9cfa9d1ef4c07d2d0cc1f
scikit-learn
_scorer.py
11
2
https://github.com/scikit-learn/scikit-learn.git
1
14
0
35
171
Python
{ "docstring": "Get the names of all available scorers.\n\n These names can be passed to :func:`~sklearn.metrics.get_scorer` to\n retrieve the scorer object.\n\n Returns\n -------\n list of str\n Names of all available scorers.\n ", "language": "en", "n_whitespaces": 54, "n_words": 29, "vocab_size": 21 }
def get_scorer_names(): return sorted(_SCORERS.keys()) for name, metric in [ ("precision", precision_score), ("recall", recall_score), ("f1", f1_score), ("jaccard", jaccard_score), ]: _SCORERS[name] = make_scorer(metric, average="binary") for average in ["macro", "micro", "samples", "weighted"]: qualified_name = "{0}_{1}".format(name, average) _SCORERS[qualified_name] = make_scorer(metric, pos_label=None, average=average) SCORERS = _DeprecatedScorers(_SCORERS)
22,155
105,541
317
datasets/swda/swda.py
95
21
def _split_generators(self, dl_manager): # Download extract and return path of data file. dl_dir = dl_manager.download_and_extract(_URL) # Use swda/ folder. data_dir = os.path.join(dl_dir, "swda") # Handle partitions files: download extract and return paths of split files. downloaded_files = dl_manager.download(self._URLS) return [ # Return whole data path and train splits file downloaded path. datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["train"]} ), # Return whole data path and dev splits file downloaded path. datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["dev"]}, ), # Return whole data path and train splits file downloaded path. datasets.SplitGenerator(
Support streaming swda dataset (#4914) * Support streaming swda dataset * Remove unused import
_split_generators
f10d38b8b60b09a633823a2fb2529c83933b9c80
datasets
swda.py
13
16
https://github.com/huggingface/datasets.git
1
126
0
51
211
Python
{ "docstring": "\n Returns SplitGenerators.\n This method is tasked with downloading/extracting the data and defining the splits.\n\n Args:\n dl_manager (:obj:`datasets.utils.download_manager.DownloadManager`):\n Download manager to download and extract data files from urls.\n\n Returns:\n :obj:`list[str]`:\n List of paths to data.\n ", "language": "en", "n_whitespaces": 123, "n_words": 34, "vocab_size": 30 }
def _split_generators(self, dl_manager): # Download extract and return path of data file. dl_dir = dl_manager.download_and_extract(_URL) # Use swda/ folder. data_dir = os.path.join(dl_dir, "swda") # Handle partitions files: download extract and return paths of split files. downloaded_files = dl_manager.download(self._URLS) return [ # Return whole data path and train splits file downloaded path. datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["train"]} ), # Return whole data path and dev splits file downloaded path. datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["dev"]}, ), # Return whole data path and train splits file downloaded path. datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["test"]} ), ]
83,838
281,540
30
gamestonk_terminal/stocks/discovery/disc_controller.py
8
7
def print_help(self): help
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: james <[email protected]> Co-authored-by: jose-donato <[email protected]>
print_help
82747072c511beb1b2672846ae2ee4aec53eb562
OpenBBTerminal
disc_controller.py
9
31
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
21
0
8
40
Python
{ "docstring": "Print help[cmds]\n[src][Geek of Wall St][/src]\n rtearn realtime earnings from and expected moves\n[src][Finnhub][/src]\n pipo past IPOs dates\n fipo future IPOs dates\n[src][Yahoo Finance][/src]\n gainers show latest top gainers\n losers show latest top losers\n ugs undervalued stocks with revenue and earnings growth in excess of 25%\n gtech tech stocks with revenue and earnings growth more than 25%\n active most active stocks by intraday trade volume\n ulc potentially undervalued large cap stocks\n asc small cap stocks with earnings growth rates better than 25%\n[src][Fidelity][/src]\n ford orders by Fidelity Customers\n[src][Cathiesark.com][/src]\n arkord orders by ARK Investment Management LLC\n[src][Seeking Alpha][/src]\n upcoming upcoming earnings release dates\n trending trending news\n cnews customized news (buybacks, ipos, spacs, healthcare, politics)\n[src][Shortinterest.com][/src]\n lowfloat low float stocks under 10M shares float\n[src][Pennystockflow.com][/src]\n hotpenny today's hot penny stocks\n[src][NASDAQ Data Link (Formerly Quandl)][/src]\n rtat top 10 retail traded stocks per day[/cmds]\n", "language": "en", "n_whitespaces": 340, "n_words": 142, "vocab_size": 101 }
def print_help(self): help_text = console.print(text=help_text, menu="Stocks - Discovery")
50,874
204,760
308
django/core/serializers/xml_serializer.py
60
22
def handle_fk_field(self, obj, field): self._start_relational_field(field) related_att = getattr(obj, field.get_attname()) if related_att is not None: if self.use_natural_foreign_keys and hasattr( field.remote_field.model, "natural_key" ): related = getattr(obj, field.name) # If related object has a natural key, use it related = related.natural_key() # Iterable natural keys are rolled out as subelements for key_value in related:
Refs #33476 -- Reformatted code with Black.
handle_fk_field
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
xml_serializer.py
15
18
https://github.com/django/django.git
5
133
0
50
225
Python
{ "docstring": "\n Handle a ForeignKey (they need to be treated slightly\n differently from regular fields).\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
def handle_fk_field(self, obj, field): self._start_relational_field(field) related_att = getattr(obj, field.get_attname()) if related_att is not None: if self.use_natural_foreign_keys and hasattr( field.remote_field.model, "natural_key" ): related = getattr(obj, field.name) # If related object has a natural key, use it related = related.natural_key() # Iterable natural keys are rolled out as subelements for key_value in related: self.xml.startElement("natural", {}) self.xml.characters(str(key_value)) self.xml.endElement("natural") else: self.xml.characters(str(related_att)) else: self.xml.addQuickElement("None") self.xml.endElement("field")
28,528
127,793
130
python/ray/tests/test_metrics_head.py
24
8
def test_metrics_folder(): with _ray_start(include_dashboard=True) as context: session_dir = context["session_dir"] assert os.path.exists( f"{session_dir}/metrics/grafana/provisioning/dashboards/default.yml" ) assert os.path.exists( f"{session_dir}/metrics/grafana/provisioning/dashboards" "/default_grafana_dashboard.json" ) assert os.path.exists( f"{session_dir}/metrics/grafana/provisioning/
Export default configurations for grafana and prometheus (#28286)
test_metrics_folder
42da4445e7a3cb358a1a02ae433a004e9fa836b5
ray
test_metrics_head.py
12
14
https://github.com/ray-project/ray.git
1
62
0
17
126
Python
{ "docstring": "\n Tests that the default dashboard files get created.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
def test_metrics_folder(): with _ray_start(include_dashboard=True) as context: session_dir = context["session_dir"] assert os.path.exists( f"{session_dir}/metrics/grafana/provisioning/dashboards/default.yml" ) assert os.path.exists( f"{session_dir}/metrics/grafana/provisioning/dashboards" "/default_grafana_dashboard.json" ) assert os.path.exists( f"{session_dir}/metrics/grafana/provisioning/datasources/default.yml" ) assert os.path.exists(f"{session_dir}/metrics/prometheus/prometheus.yml")
7,049
38,931
150
deepspeed/runtime/fp16/fused_optimizer.py
44
12
def state_dict(self): state_dict = {} state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['cur_scale'] = self.cur_scale state_dict['cur_iter'] = self.cur_iter if state_dict['dynamic_loss_scale']: state_dict['last_overflow_iter'] = self.last_overflow_iter state_dict['scale_factor'] = self.scale_factor state_dict['scale_window'] = self.scale_window state_dict[OPTIMIZER_STATE_DICT] = self.optimize
[ZeRO] Default disable elastic ckpt in stage 1+2 and reduce CPU memory overhead during ckpt load (#1525) Co-authored-by: Olatunji Ruwase <[email protected]>
state_dict
3293cf72a0abd5cf77a831996bd054bc908476a6
DeepSpeed
fused_optimizer.py
10
13
https://github.com/microsoft/DeepSpeed.git
2
94
0
34
166
Python
{ "docstring": "\n Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.\n This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict\n of the contained Pytorch optimizer.\n Example::\n checkpoint = {}\n checkpoint['model'] = model.state_dict()\n checkpoint['optimizer'] = optimizer.state_dict()\n torch.save(checkpoint, \"saved.pth\")\n ", "language": "en", "n_whitespaces": 119, "n_words": 39, "vocab_size": 31 }
def state_dict(self): state_dict = {} state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['cur_scale'] = self.cur_scale state_dict['cur_iter'] = self.cur_iter if state_dict['dynamic_loss_scale']: state_dict['last_overflow_iter'] = self.last_overflow_iter state_dict['scale_factor'] = self.scale_factor state_dict['scale_window'] = self.scale_window state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict() state_dict['fp32_groups_flat'] = self.fp32_groups_flat state_dict['clip_grad'] = self.clip_grad return state_dict # Refresh fp32 master params from fp16 copies
77,021
261,829
402
sklearn/naive_bayes.py
162
24
def _update_mean_variance(n_past, mu, var, X, sample_weight=None): if X.shape[0] == 0: return mu, var # Compute (potentially weighted) mean and variance of new datapoints if sample_weight is not None: n_new = float(sample_weight.sum()) if np.isclose(n_new, 0.0): return mu, var new_mu = np.average(X, axis=0, weights=sample_weight) new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weig
TST Add common tests for single class fitting induced by sample weights (#24140) Co-authored-by: johayon <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
_update_mean_variance
2cce02414d4a7161f0d105450c196d94b1182220
scikit-learn
naive_bayes.py
13
22
https://github.com/scikit-learn/scikit-learn.git
5
204
0
81
314
Python
{ "docstring": "Compute online update of Gaussian mean and variance.\n\n Given starting sample count, mean, and variance, a new set of\n points X, and optionally sample weights, return the updated mean and\n variance. (NB - each dimension (column) in X is treated as independent\n -- you get variance, not covariance).\n\n Can take scalar mean and variance, or vector mean and variance to\n simultaneously update a number of independent Gaussians.\n\n See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:\n\n http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf\n\n Parameters\n ----------\n n_past : int\n Number of samples represented in old mean and variance. If sample\n weights were given, this should contain the sum of sample\n weights represented in old mean and variance.\n\n mu : array-like of shape (number of Gaussians,)\n Means for Gaussians in original set.\n\n var : array-like of shape (number of Gaussians,)\n Variances for Gaussians in original set.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Weights applied to individual samples (1. for unweighted).\n\n Returns\n -------\n total_mu : array-like of shape (number of Gaussians,)\n Updated mean for each Gaussian over the combined set.\n\n total_var : array-like of shape (number of Gaussians,)\n Updated variance for each Gaussian over the combined set.\n ", "language": "en", "n_whitespaces": 412, "n_words": 191, "vocab_size": 105 }
def _update_mean_variance(n_past, mu, var, X, sample_weight=None): if X.shape[0] == 0: return mu, var # Compute (potentially weighted) mean and variance of new datapoints if sample_weight is not None: n_new = float(sample_weight.sum()) if np.isclose(n_new, 0.0): return mu, var new_mu = np.average(X, axis=0, weights=sample_weight) new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight) else: n_new = X.shape[0] new_var = np.var(X, axis=0) new_mu = np.mean(X, axis=0) if n_past == 0: return new_mu, new_var n_total = float(n_past + n_new) # Combine mean of old and new data, taking into consideration # (weighted) number of observations total_mu = (n_new * new_mu + n_past * mu) / n_total # Combine variance of old and new data, taking into consideration # (weighted) number of observations. This is achieved by combining # the sum-of-squared-differences (ssd) old_ssd = n_past * var new_ssd = n_new * new_var total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2 total_var = total_ssd / n_total return total_mu, total_var
48,393
197,220
50
sympy/functions/combinatorial/numbers.py
9
6
def is_prime(n): sympy_deprecation_warning( ,
Deprecate redundant static methods
is_prime
b27e2b44626d138bd6ea235fbf114644baa5b144
sympy
numbers.py
9
10
https://github.com/sympy/sympy.git
1
23
0
9
41
Python
{ "docstring": "\nis_prime is just a wrapper around sympy.ntheory.primetest.isprime so use that\ndirectly instead.\n ", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 12 }
def is_prime(n): sympy_deprecation_warning( , deprecated_since_version="1.11", active_deprecations_target='deprecated-carmichael-static-methods', ) return isprime(n)
56,186
221,074
16
python3.10.4/Lib/base64.py
12
7
def standard_b64decode(s): return b64decode(s) _urlsafe_encode_tr
add python 3.10.4 for windows
standard_b64decode
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
base64.py
7
2
https://github.com/XX-net/XX-Net.git
1
11
0
11
59
Python
{ "docstring": "Decode bytes encoded with the standard Base64 alphabet.\n\n Argument s is a bytes-like object or ASCII string to decode. The result\n is returned as a bytes object. A binascii.Error is raised if the input\n is incorrectly padded. Characters that are not in the standard alphabet\n are discarded prior to the padding check.\n ", "language": "en", "n_whitespaces": 70, "n_words": 52, "vocab_size": 41 }
def standard_b64decode(s): return b64decode(s) _urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_') _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
29,975
133,298
98
python/ray/util/sgd/torch/examples/dcgan.py
29
20
def inception_score(self, imgs, batch_size=32, splits=1): N = len(imgs) dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size) up = nn.Upsample( size=(28, 28), mo
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
inception_score
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
dcgan.py
12
25
https://github.com/ray-project/ray.git
4
236
0
27
105
Python
{ "docstring": "Calculate the inception score of the generated images.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
def inception_score(self, imgs, batch_size=32, splits=1): N = len(imgs) dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size) up = nn.Upsample( size=(28, 28), mode="bilinear", align_corners=False, # This is to reduce user warnings from torch. ).type(torch.FloatTensor)
14,354
66,833
9
erpnext/patches/v13_0/update_shipment_status.py
17
5
def execute(): frappe.reload_doc("stock", "doctype", "shipment") # update submitted status frappe.db.sql( )
style: format code with black
execute
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
update_shipment_status.py
8
12
https://github.com/frappe/erpnext.git
1
30
0
12
60
Python
{ "docstring": "UPDATE `tabShipment`\n\t\t\t\t\tSET status = \"Submitted\"\n\t\t\t\t\tWHERE status = \"Draft\" AND docstatus = 1UPDATE `tabShipment`\n\t\t\t\t\tSET status = \"Cancelled\"\n\t\t\t\t\tWHERE status = \"Draft\" AND docstatus = 2", "language": "en", "n_whitespaces": 22, "n_words": 27, "vocab_size": 13 }
def execute(): frappe.reload_doc("stock", "doctype", "shipment") # update submitted status frappe.db.sql( ) # update cancelled status frappe.db.sql( )
50,753
204,496
26
django/core/files/storage.py
12
4
def url(self, name): raise NotImplementedError("subclasses of Storage must provide a url() method
Refs #33476 -- Reformatted code with Black.
url
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
storage.py
8
2
https://github.com/django/django.git
1
13
0
12
25
Python
{ "docstring": "\n Return an absolute URL where the file's contents can be accessed\n directly by a web browser.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 16 }
def url(self, name): raise NotImplementedError("subclasses of Storage must provide a url() method")
81,491
275,866
227
keras/saving/hdf5_format.py
57
10
def load_attributes_from_hdf5_group(group, name): if name in group.
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
load_attributes_from_hdf5_group
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
hdf5_format.py
17
18
https://github.com/keras-team/keras.git
7
107
0
34
174
Python
{ "docstring": "Loads attributes of the specified name from the HDF5 group.\n\n This method deals with an inherent problem\n of HDF5 file which is not able to store\n data larger than HDF5_OBJECT_HEADER_LIMIT bytes.\n\n Args:\n group: A pointer to a HDF5 group.\n name: A name of the attributes to load.\n\n Returns:\n data: Attributes data.\n ", "language": "en", "n_whitespaces": 90, "n_words": 51, "vocab_size": 39 }
def load_attributes_from_hdf5_group(group, name): if name in group.attrs: data = [ n.decode("utf8") if hasattr(n, "decode") else n for n in group.attrs[name] ] else: data = [] chunk_id = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [ n.decode("utf8") if hasattr(n, "decode") else n for n in group.attrs["%s%d" % (name, chunk_id)] ] ) chunk_id += 1 return data
11,429
55,970
140
src/prefect/orion/models/block_schemas.py
21
6
def _find_root_block_schema(block_schemas_with_references): return next( ( block_schema for ( block_schema, _, parent_block_schema_id, ) in block_schemas_with_references if parent_block_schema_
Nested Block Schemas (PrefectHQ/orion#1846) * Adds models and migration for block schema and block document references * Adds customization to the generation of a block schema's fields * Adds ability to reconstruct block schema fields on read * Adds ability to reconstruct block schema when read by checksum * Adds schema reconstruction when reading multiple block schemas * Adds ordering to query of recursive CTE * Refactors to make code path and purpose easier to follow
_find_root_block_schema
a05e44c89acf0b6073ac876479be24a5e51d7754
prefect
block_schemas.py
10
13
https://github.com/PrefectHQ/prefect.git
3
31
0
19
46
Python
{ "docstring": "\n Attempts to find the root block schema from a list of block schemas\n with references. Returns None if a root block schema is not found.\n Returns only the first potential root block schema if multiple are found.\n ", "language": "en", "n_whitespaces": 50, "n_words": 37, "vocab_size": 25 }
def _find_root_block_schema(block_schemas_with_references): return next( ( block_schema for ( block_schema, _, parent_block_schema_id, ) in block_schemas_with_references if parent_block_schema_id is None ), None, )
4,866
25,203
465
ppocr/modeling/heads/local_graph.py
162
32
def feature_embedding(input_feats, out_feat_len): assert input_feats.ndim == 2 assert isinstance(out_feat_len, int) assert out_feat_len >= input_feats.shape[1] num_nodes = input_feats.shape[0] feat_dim = input_feats.shape[1] feat_repeat_times = out_feat_len // feat_dim residue_dim = out_feat_len % feat_dim if residue_dim > 0: embed_wave = np.array([ np.power(1000, 2.0 * (j // 2) / feat_repeat_times + 1) for j in range(feat_repeat_times + 1) ]).reshape((feat_repeat_times + 1, 1, 1)) repeat_feats = np.repeat( np.expand_dims( input_feats, axis=0), feat_repeat_times, axis=0) residue_feats = np.hstack([ input_feats[:, 0:residue_dim], np.zeros( (num_nodes, feat_dim - residue_dim)) ]) residue_feats = np.expand_dims(residue_feats, axis=0) repeat_feats = np.concatenate([repeat_feats, residue_feats], axis=0) embedded_feats = repeat_feats / embed_wave embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2]) embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2]) embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape( (num_nodes, -1))[:, 0:out_feat_len] else: embed_wave = np.array([ np.power(1000, 2.0 * (j // 2) / feat_repeat_times) for j in range(feat_repeat_times) ]).reshape((feat_repeat_times, 1, 1)) repeat_feats = np.repeat( np.expand_dims( input_feats, axis=0), feat_repeat_times, axis=0) embedded_feats = repeat_feats / embed_wave
add drrg
feature_embedding
1f9400dd7374ce9cc47981372e324ff412e53ba3
PaddleOCR
local_graph.py
20
41
https://github.com/PaddlePaddle/PaddleOCR.git
4
416
0
79
639
Python
{ "docstring": "Embed features. This code was partially adapted from\n https://github.com/GXYM/DRRG licensed under the MIT license.\n\n Args:\n input_feats (ndarray): The input features of shape (N, d), where N is\n the number of nodes in graph, d is the input feature vector length.\n out_feat_len (int): The length of output feature vector.\n\n Returns:\n embedded_feats (ndarray): The embedded features.\n ", "language": "en", "n_whitespaces": 98, "n_words": 54, "vocab_size": 43 }
def feature_embedding(input_feats, out_feat_len): assert input_feats.ndim == 2 assert isinstance(out_feat_len, int) assert out_feat_len >= input_feats.shape[1] num_nodes = input_feats.shape[0] feat_dim = input_feats.shape[1] feat_repeat_times = out_feat_len // feat_dim residue_dim = out_feat_len % feat_dim if residue_dim > 0: embed_wave = np.array([ np.power(1000, 2.0 * (j // 2) / feat_repeat_times + 1) for j in range(feat_repeat_times + 1) ]).reshape((feat_repeat_times + 1, 1, 1)) repeat_feats = np.repeat( np.expand_dims( input_feats, axis=0), feat_repeat_times, axis=0) residue_feats = np.hstack([ input_feats[:, 0:residue_dim], np.zeros( (num_nodes, feat_dim - residue_dim)) ]) residue_feats = np.expand_dims(residue_feats, axis=0) repeat_feats = np.concatenate([repeat_feats, residue_feats], axis=0) embedded_feats = repeat_feats / embed_wave embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2]) embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2]) embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape( (num_nodes, -1))[:, 0:out_feat_len] else: embed_wave = np.array([ np.power(1000, 2.0 * (j // 2) / feat_repeat_times) for j in range(feat_repeat_times) ]).reshape((feat_repeat_times, 1, 1)) repeat_feats = np.repeat( np.expand_dims( input_feats, axis=0), feat_repeat_times, axis=0) embedded_feats = repeat_feats / embed_wave embedded_feats[:, 0::2] = np.sin(embedded_feats[:, 0::2]) embedded_feats[:, 1::2] = np.cos(embedded_feats[:, 1::2]) embedded_feats = np.transpose(embedded_feats, (1, 0, 2)).reshape( (num_nodes, -1)).astype(np.float32) return embedded_feats
76,622
261,007
71
sklearn/linear_model/_base.py
29
18
def decision_function(self, X): check_is_fitted(self) xp, _ = get_namespace(X) X = s
ENH Adds Array API support to LinearDiscriminantAnalysis (#22554) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
decision_function
2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b
scikit-learn
_base.py
11
6
https://github.com/scikit-learn/scikit-learn.git
2
77
0
26
119
Python
{ "docstring": "\n Predict confidence scores for samples.\n\n The confidence score for a sample is proportional to the signed\n distance of that sample to the hyperplane.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The data matrix for which we want to get the confidence scores.\n\n Returns\n -------\n scores : ndarray of shape (n_samples,) or (n_samples, n_classes)\n Confidence scores per `(n_samples, n_classes)` combination. In the\n binary case, confidence score for `self.classes_[1]` where >0 means\n this class would be predicted.\n ", "language": "en", "n_whitespaces": 194, "n_words": 79, "vocab_size": 58 }
def decision_function(self, X): check_is_fitted(self) xp, _ = get_namespace(X) X = self._validate_data(X, accept_sparse="csr", reset=False) scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return xp.reshape(scores, -1) if scores.shape[1] == 1 else scores
54,671
216,630
63
backend/postprocessing/rankings.py
38
11
def get_ranking(pairs): if len(pairs) == 1: return list(
ran pre-commit hook
get_ranking
38ca08446d560797522b7828720032799584d32a
Open-Assistant
rankings.py
11
6
https://github.com/LAION-AI/Open-Assistant.git
4
61
0
33
98
Python
{ "docstring": "\n Abuses concordance property to get a (not necessarily unqiue) ranking.\n The lack of uniqueness is due to the potential existance of multiple\n equally ranked winners. We have to pick one, which is where\n the non-uniqueness comes from\n ", "language": "en", "n_whitespaces": 53, "n_words": 37, "vocab_size": 32 }
def get_ranking(pairs): if len(pairs) == 1: return list(pairs[0]) w = get_winner(pairs) # now remove the winner from the list of pairs p_new = np.array([(a, b) for a, b in pairs if a != w]) return [w] + get_ranking(p_new)
12,476
61,263
63
.venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py
31
9
def backup_dir(dir, ext=".bak"): # type: (str, str) -> str n = 1 extension = ext while os.path.exists(dir + extension): n += 1 extension = ext + str(n) return dir + extens
upd; format
backup_dir
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
misc.py
11
7
https://github.com/jindongwang/transferlearning.git
2
43
0
22
74
Python
{ "docstring": "Figure out the name of a directory to back up the given dir to\n (adding .bak, .bak2, etc)", "language": "en", "n_whitespaces": 20, "n_words": 18, "vocab_size": 16 }
def backup_dir(dir, ext=".bak"): # type: (str, str) -> str n = 1 extension = ext while os.path.exists(dir + extension): n += 1 extension = ext + str(n) return dir + extension
32,051
140,580
147
rllib/utils/filter_manager.py
42
22
def synchronize(local_filters, remotes, update_remote=True): remote_filters = ray.get( [r.get_filters.remote(flush_
Clean up docstyle in python modules and add LINT rule (#25272)
synchronize
905258dbc19753c81039f993477e7ab027960729
ray
filter_manager.py
12
11
https://github.com/ray-project/ray.git
7
107
0
30
164
Python
{ "docstring": "Aggregates all filters from remote evaluators.\n\n Local copy is updated and then broadcasted to all remote evaluators.\n\n Args:\n local_filters: Filters to be synchronized.\n remotes: Remote evaluators with filters.\n update_remote: Whether to push updates to remote filters.\n ", "language": "en", "n_whitespaces": 90, "n_words": 36, "vocab_size": 28 }
def synchronize(local_filters, remotes, update_remote=True): remote_filters = ray.get( [r.get_filters.remote(flush_after=True) for r in remotes] ) for rf in remote_filters: for k in local_filters: local_filters[k].apply_changes(rf[k], with_buffer=False) if update_remote: copies = {k: v.as_serializable() for k, v in local_filters.items()} remote_copy = ray.put(copies) [r.sync_filters.remote(remote_copy) for r in remotes]
76,986
261,775
37
sklearn/tests/test_base.py
19
11
def test_estimator_empty_instance_dict(estimator): state = estimator.__getstate__() expected = {"_sklearn_version": sklearn.__version__} assert state == expected # this should not raise pickle.loads(pickle.dumps(BaseEstimator()))
FIX fix pickling for empty object with Python 3.11+ (#25188) Co-authored-by: Adrin Jalali <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]> Python 3.11 introduces `__getstate__` on the `object` level, which breaks our existing `__getstate__` code for objects w/o any attributes. This fixes the issue.
test_estimator_empty_instance_dict
9017c701833114a75903f580dd0772e1d8d7d125
scikit-learn
test_base.py
11
5
https://github.com/scikit-learn/scikit-learn.git
1
39
0
16
70
Python
{ "docstring": "Check that ``__getstate__`` returns an empty ``dict`` with an empty\n instance.\n\n Python 3.11+ changed behaviour by returning ``None`` instead of raising an\n ``AttributeError``. Non-regression test for gh-25188.\n ", "language": "en", "n_whitespaces": 39, "n_words": 27, "vocab_size": 24 }
def test_estimator_empty_instance_dict(estimator): state = estimator.__getstate__() expected = {"_sklearn_version": sklearn.__version__} assert state == expected # this should not raise pickle.loads(pickle.dumps(BaseEstimator()))
16,740
78,234
42
wagtail/admin/tests/test_templatetags.py
13
9
def test_with_variables(self): context = Context({"name": "j
Introduce new template fragment composition tags
test_with_variables
524cab82e33b43463b746c3df1a80657b3ae874a
wagtail
test_templatetags.py
11
15
https://github.com/wagtail/wagtail.git
1
37
0
11
68
Python
{ "docstring": "\n {% load wagtailadmin_tags %}\n {% fragment as my_fragment %}\n <p>Hello, {{ name|title }}</p>\n {% endfragment %}\n Text coming after:\n {{ my_fragment }}\n \n Text coming after:\n <p>Hello, Jonathan Wells</p>\n ", "language": "en", "n_whitespaces": 136, "n_words": 28, "vocab_size": 18 }
def test_with_variables(self): context = Context({"name": "jonathan wells"}) template = expected = self.assertHTMLEqual(expected, Template(template).render(context))
78,390
266,422
122
lib/ansible/executor/module_common.py
54
15
def _extract_interpreter(b_module_data): interpreter = None args = [] b_lines = b_module_data.split(b"\n", 1) if b_lines[0].startswith(b"#!"): b_shebang = b_lines[0].strip() # shlex.split on python-2.6 needs bytes. On python-3.x it needs text cli_split =
Allow specifying specific python via shebang (#76677) modules with python were always normalized to /usr/bin/python, while other interpreters could have specific versions. * now shebang is always constructed by get_shebang and args are preserved * only update shebang if interpreter changed * updated test expectation * added python shebang test
_extract_interpreter
9142be2f6cabbe6597c9254c5bb9186d17036d55
ansible
module_common.py
14
11
https://github.com/ansible/ansible.git
3
98
0
39
162
Python
{ "docstring": "\n Used to extract shebang expression from binary module data and return a text\n string with the shebang, or None if no shebang is detected.\n ", "language": "en", "n_whitespaces": 34, "n_words": 24, "vocab_size": 23 }
def _extract_interpreter(b_module_data): interpreter = None args = [] b_lines = b_module_data.split(b"\n", 1) if b_lines[0].startswith(b"#!"): b_shebang = b_lines[0].strip() # shlex.split on python-2.6 needs bytes. On python-3.x it needs text cli_split = shlex.split(to_native(b_shebang[2:], errors='surrogate_or_strict')) # convert args to text cli_split = [to_text(a, errors='surrogate_or_strict') for a in cli_split] interpreter = cli_split[0] args = cli_split[1:] return interpreter, args
28,198
126,557
72
python/ray/tune/tests/test_tune_restore.py
26
9
def test_resource_exhausted_info(self):
[tune] Fix test_resource_exhausted_info test (#27426) #27213 broke this test Signed-off-by: Kai Fricke <[email protected]>
test_resource_exhausted_info
46ed3557ba6b4f4f72c15ef960aba5270ada2a9c
ray
test_tune_restore.py
11
11
https://github.com/ray-project/ray.git
2
51
0
25
56
Python
{ "docstring": "This is to test if helpful information is displayed when\n the objects captured in trainable/training function are too\n large and RESOURCES_EXHAUSTED error of gRPC is triggered.", "language": "en", "n_whitespaces": 39, "n_words": 26, "vocab_size": 24 }
def test_resource_exhausted_info(self): # generate some random data to be captured implicitly in training func. from sklearn.datasets import fetch_olivetti_faces a_large_array = [] for i in range(50): a_large_array.append(fetch_olivetti_faces())
11,268
55,193
394
tests/conftest.py
77
22
def testing_session_settings(): with tempfile.TemporaryDirectory() as tmpdir: profile = prefect.settings.Profile( name="test-session", settings={ # Set PREFECT_HOME to a temporary directory to avoid clobbering # environments and settings PREFECT_HOME: tmpdir, PREFECT_PROFILES_PATH: "$PREFECT_HOME/profiles.toml", # Enable debug logging PREFECT_LOGGING_LEVEL: "DEBUG", # Disable shipping logs to the API; # can be enabled by the `enable_orion_handler` mark PREFECT_LOGGING_ORION_ENABLED: False, # Disable services for test runs PREFECT_ORION_ANALYTICS_ENABLED: False, PREFECT_ORION_SERVICES_LATE_RUNS_ENABLED: False, PREFECT_ORION_SERVICES_SCHEDULER_ENABLED: False, }, source=__file__, )
Squash issues with tests
testing_session_settings
4adc737611ffa284d9952779ba2f68174a7e73cc
prefect
conftest.py
14
21
https://github.com/PrefectHQ/prefect.git
1
87
0
62
146
Python
{ "docstring": "\n Creates a fixture for the scope of the test session that modifies setting defaults.\n\n This ensures that tests are isolated from existing settings, databases, etc.\n ", "language": "en", "n_whitespaces": 35, "n_words": 25, "vocab_size": 23 }
def testing_session_settings(): with tempfile.TemporaryDirectory() as tmpdir: profile = prefect.settings.Profile( name="test-session", settings={ # Set PREFECT_HOME to a temporary directory to avoid clobbering # environments and settings PREFECT_HOME: tmpdir, PREFECT_PROFILES_PATH: "$PREFECT_HOME/profiles.toml", # Enable debug logging PREFECT_LOGGING_LEVEL: "DEBUG", # Disable shipping logs to the API; # can be enabled by the `enable_orion_handler` mark PREFECT_LOGGING_ORION_ENABLED: False, # Disable services for test runs PREFECT_ORION_ANALYTICS_ENABLED: False, PREFECT_ORION_SERVICES_LATE_RUNS_ENABLED: False, PREFECT_ORION_SERVICES_SCHEDULER_ENABLED: False, }, source=__file__, ) with prefect.settings.use_profile( profile, override_environment_variables=True, include_current_context=False, ) as ctx: yield ctx
34,592
149,925
25
tests/strategy/strats/hyperoptable_strategy.py
11
7
def bot_start(self, **kwargs) -> None: self.buy_rsi =
Enhance hyperoptable strategy to test instance parameters
bot_start
5bf021be2e8f1479753e66573575fa7cde00a2b6
freqtrade
hyperoptable_strategy.py
10
5
https://github.com/freqtrade/freqtrade.git
1
31
0
11
50
Python
{ "docstring": "\n Parameters can also be defined here ...\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
def bot_start(self, **kwargs) -> None: self.buy_rsi = IntParameter([0, 50], default=30, space='buy')
50,864
204,736
33
django/core/serializers/base.py
8
5
def getvalue(self): if callable(getattr(self.stream, "getvalue", None)): return self.stream.getvalue()
Refs #33476 -- Reformatted code with Black.
getvalue
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
base.py
10
3
https://github.com/django/django.git
2
29
0
8
50
Python
{ "docstring": "\n Return the fully serialized queryset (or None if the output stream is\n not seekable).\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
def getvalue(self): if callable(getattr(self.stream, "getvalue", None)): return self.stream.getvalue()
22,470
106,845
450
py/visdom/__init__.py
106
24
def boxplot(self, X, win=None, env=None, opts=None): X = np.squeeze(X) assert X.ndim == 1 or X.ndim == 2, "X should be one or two-dimensional" if X.ndim == 1: X = X[:, None] opts = {} if opts is None else opts _title2str(opts) _assert_opts(opts) if opts.get("legend") is not None: assert ( len(opts["legend"]) == X.shape[1] ), "number of legened labels must match number of columns" data = [] for k in range(X.shape[1]):
apply black py to all python files
boxplot
5b8b7f267cfaf76a2a39a727ef31a62b3909a093
visdom
__init__.py
14
32
https://github.com/fossasia/visdom.git
7
215
0
82
357
Python
{ "docstring": "\n This function draws boxplots of the specified data. It takes as input\n an `N` or an `NxM` tensor `X` that specifies the `N` data values of\n which to construct the `M` boxplots.\n\n The following plot-specific `opts` are currently supported:\n - `opts.legend`: labels for each of the columns in `X`\n ", "language": "en", "n_whitespaces": 92, "n_words": 49, "vocab_size": 41 }
def boxplot(self, X, win=None, env=None, opts=None): X = np.squeeze(X) assert X.ndim == 1 or X.ndim == 2, "X should be one or two-dimensional" if X.ndim == 1: X = X[:, None] opts = {} if opts is None else opts _title2str(opts) _assert_opts(opts) if opts.get("legend") is not None: assert ( len(opts["legend"]) == X.shape[1] ), "number of legened labels must match number of columns" data = [] for k in range(X.shape[1]): _data = { "y": X.take(k, 1).tolist(), "type": "box", } if opts.get("legend"): _data["name"] = opts["legend"][k] else: _data["name"] = "column " + str(k) data.append(_data) return self._send( { "data": data, "win": win, "eid": env, "layout": _opts2layout(opts), "opts": opts, } )
50,282
203,275
248
tests/requests/tests.py
65
12
def test_body_after_POST_multipart_related(self): # Ticket #9054 # There are cases in which the multipart data is related instead of # being a binary upload, in which case it should still be accessible # via body. payload_data = b"\r\n".join([ b'--boundary', b'Content-ID: id; name="name"', b'', b'value', b'--boundary--' ]) payload = FakePayload(payload_data) request = WSGIRequest({ 'REQUEST_METHOD': 'POST',
Refs #33476 -- Refactored problematic code before reformatting by Black. In these cases Black produces unexpected results, e.g. def make_random_password( self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789', ): or cursor.execute(""" SELECT ... """, [table name], )
test_body_after_POST_multipart_related
c5cd8783825b5f6384417dac5f3889b4210b7d08
django
tests.py
12
17
https://github.com/django/django.git
1
83
0
58
146
Python
{ "docstring": "\n Reading body after parsing multipart that isn't form-data is allowed\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
def test_body_after_POST_multipart_related(self): # Ticket #9054 # There are cases in which the multipart data is related instead of # being a binary upload, in which case it should still be accessible # via body. payload_data = b"\r\n".join([ b'--boundary', b'Content-ID: id; name="name"', b'', b'value', b'--boundary--' ]) payload = FakePayload(payload_data) request = WSGIRequest({ 'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': 'multipart/related; boundary=boundary', 'CONTENT_LENGTH': len(payload), 'wsgi.input': payload, }) self.assertEqual(request.POST, {}) self.assertEqual(request.body, payload_data)
8,978
46,739
172
airflow/providers/arangodb/hooks/arangodb.py
39
13
def query(self, query, **kwargs) -> Result: try: if self.db_conn: result = self.db_conn.aql.execute(query, **kwargs) return result else: raise AirflowException( f"Failed to execute AQLQuery, error connecting to database: {self.database}" ) except AQLQueryExecuteError as
Adding ArangoDB Provider (#22548) * Adding ArangoDB Provider
query
c758c76ac336c054fd17d4b878378aa893b7a979
airflow
arangodb.py
15
18
https://github.com/apache/airflow.git
3
56
0
31
109
Python
{ "docstring": "\n Function to create a arangodb session\n and execute the AQL query in the session.\n\n :param query: AQL query\n :return: Result\n ", "language": "en", "n_whitespaces": 56, "n_words": 20, "vocab_size": 17 }
def query(self, query, **kwargs) -> Result: try: if self.db_conn: result = self.db_conn.aql.execute(query, **kwargs) return result else: raise AirflowException( f"Failed to execute AQLQuery, error connecting to database: {self.database}" ) except AQLQueryExecuteError as error: raise AirflowException(f"Failed to execute AQLQuery, error: {str(error)}")
75,968
259,883
192
sklearn/datasets/tests/test_arff_parser.py
64
14
def test_post_process_frame(feature_names, target_names): pd = pytest.importorskip("pandas") X_original = pd.DataFrame( { "col_int_as_integer": [1, 2, 3], "col_int_as_numeric": [1, 2, 3], "col_float_as_real": [1.0, 2.0, 3.0], "col_float_as_numeric": [1.0, 2.0, 3.0], "col_categorical": ["a", "b", "c"], "col_string": ["a", "b", "c"], } ) X, y = _post_process_frame(X_original, feature_names, target_names) assert isinstance(X, pd.DataFrame) if len(target_names) >= 2: assert isinstance(y, pd.DataFrame) elif len(target_names) == 1: assert isinstance(y, pd.Series) else:
ENH improve ARFF parser using pandas (#21938) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Adrin Jalali <[email protected]>
test_post_process_frame
a47d569e670fd4102af37c3165c9b1ddf6fd3005
scikit-learn
test_arff_parser.py
12
20
https://github.com/scikit-learn/scikit-learn.git
3
158
0
46
233
Python
{ "docstring": "Check the behaviour of the post-processing function for splitting a dataframe.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def test_post_process_frame(feature_names, target_names): pd = pytest.importorskip("pandas") X_original = pd.DataFrame( { "col_int_as_integer": [1, 2, 3], "col_int_as_numeric": [1, 2, 3], "col_float_as_real": [1.0, 2.0, 3.0], "col_float_as_numeric": [1.0, 2.0, 3.0], "col_categorical": ["a", "b", "c"], "col_string": ["a", "b", "c"], } ) X, y = _post_process_frame(X_original, feature_names, target_names) assert isinstance(X, pd.DataFrame) if len(target_names) >= 2: assert isinstance(y, pd.DataFrame) elif len(target_names) == 1: assert isinstance(y, pd.Series) else: assert y is None
51,718
206,806
222
django/views/debug.py
64
19
def cleanse_setting(self, key, value): try: is_sensitive = self.hidden_settings.search(key) except TypeError: is_sensitive = False if is_sensitive: cleansed = self.cleansed_substitute elif isinstance(value, dict): cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()} elif isinstance(value, list): cleansed = [s
Refs #33476 -- Reformatted code with Black.
cleanse_setting
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
debug.py
15
18
https://github.com/django/django.git
10
138
0
37
219
Python
{ "docstring": "\n Cleanse an individual setting key/value of sensitive content. If the\n value is a dictionary, recursively cleanse the keys in that dictionary.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 20 }
def cleanse_setting(self, key, value): try: is_sensitive = self.hidden_settings.search(key) except TypeError: is_sensitive = False if is_sensitive: cleansed = self.cleansed_substitute elif isinstance(value, dict): cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()} elif isinstance(value, list): cleansed = [self.cleanse_setting("", v) for v in value] elif isinstance(value, tuple): cleansed = tuple([self.cleanse_setting("", v) for v in value]) else: cleansed = value if callable(cleansed): cleansed = CallableSettingWrapper(cleansed) return cleansed
81,751
276,840
105
keras/utils/generic_utils.py
42
20
def func_dump(func): if os.name == "nt": raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/") code = codecs.encode(raw_code, "base64").decode("ascii") else: raw_code = marshal.dumps(func.__code__) code = codecs.encode(raw_
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
func_dump
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
generic_utils.py
14
13
https://github.com/keras-team/keras.git
4
109
0
28
185
Python
{ "docstring": "Serializes a user defined function.\n\n Args:\n func: the function to serialize.\n\n Returns:\n A tuple `(code, defaults, closure)`.\n ", "language": "en", "n_whitespaces": 40, "n_words": 17, "vocab_size": 17 }
def func_dump(func): if os.name == "nt": raw_code = marshal.dumps(func.__code__).replace(b"\\", b"/") code = codecs.encode(raw_code, "base64").decode("ascii") else: raw_code = marshal.dumps(func.__code__) code = codecs.encode(raw_code, "base64").decode("ascii") defaults = func.__defaults__ if func.__closure__: closure = tuple(c.cell_contents for c in func.__closure__) else: closure = None return code, defaults, closure
47,486
195,948
56
sympy/polys/polyclasses.py
13
7
def cauchy_upper_bound(f): if not f.lev:
Add new methods to `DMP` class, corresp. to new funcs.
cauchy_upper_bound
d032a7a870672667f778be8bf02a3eba4ae89381
sympy
polyclasses.py
11
5
https://github.com/sympy/sympy.git
2
30
0
13
53
Python
{ "docstring": "Computes the Cauchy upper bound on the roots of ``f``. ", "language": "en", "n_whitespaces": 10, "n_words": 10, "vocab_size": 9 }
def cauchy_upper_bound(f): if not f.lev: return dup_cauchy_upper_bound(f.rep, f.dom) else: raise ValueError('univariate polynomial expected')
48,459
197,316
868
sympy/core/sympify.py
288
29
def kernS(s): hit = False quoted = '"' in s or "'" in s if '(' in s and not quoted: if s.count('(') != s.count(")"): raise SympifyError('unmatched left parenthesis') # strip all space from s s = ''.join(s.split()) olds = s # now use space to represent a symbol that # will # step 1. turn potential 2-arg Muls into 3-arg versions # 1a. *( -> * *( s = s.replace('*(', '* *(') # 1b. close up exponentials s = s.replace('** *', '**') # 2. handle the implied multiplication of a negated # parenthesized expression in two steps # 2a: -(...) --> -( *(...) target = '-( *(' s = s.replace('-(', target) # 2b: double the matching closing parenthesis # -( *(...) --> -( *(...)) i = nest = 0 assert target.endswith('(') # assumption below while True: j = s.find(target, i) if j == -1: break j += len(target) - 1 for j in range(j, len(s)): if s[j] == "(": nest += 1 elif s[j] == ")": nest -= 1 if nest == 0: break s = s[:j] + ")" + s[j:] i = j + 2 # the first char after 2nd ) if ' ' in s: # get a unique kern kern = '_' while kern in s: kern += choice(string.ascii_letters + string.digits) s = s.replace(' ', kern) hit = kern in s else: hit = False for i in range(2): try: expr = sympify(s) break except TypeError: # the kern might cause unknown errors... if hit: s = olds # maybe it didn't like the kern; use un-kerned s hit = False continue expr = sympify(s) # let original error raise if n
Remove abbreviations in documentation
kernS
65be461082dda54c8748922f9c29a19af1279fe1
sympy
sympify.py
16
53
https://github.com/sympy/sympy.git
17
307
0
166
535
Python
{ "docstring": "Use a hack to try keep autosimplification from distributing a\n a number into an Add; this modification does not\n prevent the 2-arg Mul from becoming an Add, however.\n\n Examples\n ========\n\n >>> from sympy.core.sympify import kernS\n >>> from sympy.abc import x, y\n\n The 2-arg Mul distributes a number (or minus sign) across the terms\n of an expression, but kernS will prevent that:\n\n >>> 2*(x + y), -(x + 1)\n (2*x + 2*y, -x - 1)\n >>> kernS('2*(x + y)')\n 2*(x + y)\n >>> kernS('-(x + 1)')\n -(x + 1)\n\n If use of the hack fails, the un-hacked string will be passed to sympify...\n and you get what you get.\n\n XXX This hack should not be necessary once issue 4596 has been resolved.\n ", "language": "en", "n_whitespaces": 175, "n_words": 121, "vocab_size": 82 }
def kernS(s): hit = False quoted = '"' in s or "'" in s if '(' in s and not quoted: if s.count('(') != s.count(")"): raise SympifyError('unmatched left parenthesis') # strip all space from s s = ''.join(s.split()) olds = s # now use space to represent a symbol that # will # step 1. turn potential 2-arg Muls into 3-arg versions # 1a. *( -> * *( s = s.replace('*(', '* *(') # 1b. close up exponentials s = s.replace('** *', '**') # 2. handle the implied multiplication of a negated # parenthesized expression in two steps # 2a: -(...) --> -( *(...) target = '-( *(' s = s.replace('-(', target) # 2b: double the matching closing parenthesis # -( *(...) --> -( *(...)) i = nest = 0 assert target.endswith('(') # assumption below while True: j = s.find(target, i) if j == -1: break j += len(target) - 1 for j in range(j, len(s)): if s[j] == "(": nest += 1 elif s[j] == ")": nest -= 1 if nest == 0: break s = s[:j] + ")" + s[j:] i = j + 2 # the first char after 2nd ) if ' ' in s: # get a unique kern kern = '_' while kern in s: kern += choice(string.ascii_letters + string.digits) s = s.replace(' ', kern) hit = kern in s else: hit = False for i in range(2): try: expr = sympify(s) break except TypeError: # the kern might cause unknown errors... if hit: s = olds # maybe it didn't like the kern; use un-kerned s hit = False continue expr = sympify(s) # let original error raise if not hit: return expr from .symbol import Symbol rep = {Symbol(kern): 1}
117,007
319,856
56
src/documents/tests/test_classifier.py
17
11
def test_load_corrupt_file(self, patched_pickle_load): # First load is the schema version p
Updates the classifier to catch warnings from scikit-learn and rebuild the model file when this happens
test_load_corrupt_file
77fbbe95ffb965525136982846f50e3ad8244de9
paperless-ngx
test_classifier.py
10
4
https://github.com/paperless-ngx/paperless-ngx.git
1
36
0
17
63
Python
{ "docstring": "\n GIVEN:\n - Corrupted classifier pickle file\n WHEN:\n - An attempt is made to load the classifier\n THEN:\n - The ClassifierModelCorruptError is raised\n ", "language": "en", "n_whitespaces": 84, "n_words": 22, "vocab_size": 18 }
def test_load_corrupt_file(self, patched_pickle_load): # First load is the schema version patched_pickle_load.side_effect = [DocumentClassifier.FORMAT_VERSION, OSError()] with self.assertRaises(ClassifierModelCorruptError): self.classifier.load()
86,883
287,694
20
homeassistant/components/plugwise/select.py
6
6
def current_option(self) -> str: return self.device[self.entity_description.current_option_key]
Rename property in Plugwise EntityDescription (#78935)
current_option
5c7d40cccf473c3549900949fe410dbe9d2e1a19
core
select.py
8
3
https://github.com/home-assistant/core.git
1
19
0
6
32
Python
{ "docstring": "Return the selected entity option to represent the entity state.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 8 }
def current_option(self) -> str: return self.device[self.entity_description.current_option_key]
4,771
24,604
172
ppstructure/table/convert_label2html.py
63
18
def gen_html(img): html_code = img['html']['structure']['tokens'].copy() to_insert = [i for i, tag in enumerate(html_code) if tag in ('<td>', '>')] for i, cell in zip(to_insert[::-1], img['html']['cells'][::-1]): if cell['tokens']: text = ''.join(cell['tokens']) # skip empty text sp_char_list = ['<b>', '</b>', '\u2028', ' ', '<i>', '</i>'] text_remove_style = skip_char(text, sp_char_list) if len(text_remove_style) == 0: continue html_code.insert(i + 1, text) html_code = ''.join(html_code) html_code = '<html><body><table>{}</table
add copyright
gen_html
97f7f748085fbe516952d36808735902d305da40
PaddleOCR
convert_label2html.py
14
14
https://github.com/PaddlePaddle/PaddleOCR.git
6
149
0
46
265
Python
{ "docstring": " \n Formats HTML code from tokenized annotation of img\n ", "language": "en", "n_whitespaces": 16, "n_words": 8, "vocab_size": 8 }
def gen_html(img): html_code = img['html']['structure']['tokens'].copy() to_insert = [i for i, tag in enumerate(html_code) if tag in ('<td>', '>')] for i, cell in zip(to_insert[::-1], img['html']['cells'][::-1]): if cell['tokens']: text = ''.join(cell['tokens']) # skip empty text sp_char_list = ['<b>', '</b>', '\u2028', ' ', '<i>', '</i>'] text_remove_style = skip_char(text, sp_char_list) if len(text_remove_style) == 0: continue html_code.insert(i + 1, text) html_code = ''.join(html_code) html_code = '<html><body><table>{}</table></body></html>'.format(html_code) return html_code
44,978
185,332
35
src/textual/events.py
10
8
def key_aliases(self) -> Iterable[str]: for alias in _get_key_aliases(self.key): yi
Move aliasing/normalisation logic into Key
key_aliases
bd3a723d86f9c550b0324153975580b70509cb22
textual
events.py
10
4
https://github.com/Textualize/textual.git
2
26
0
10
44
Python
{ "docstring": "Get the aliases for the key, including the key itself", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 8 }
def key_aliases(self) -> Iterable[str]: for alias in _get_key_aliases(self.key): yield _normalize_key(alias)
78,141
265,561
227
netbox/ipam/tests/test_api.py
69
30
def test_create_single_available_ip(self): vrf = VRF.objects.create(name='VRF 1') prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/30'), vrf=vrf, is_pool=True) u
Closes #10031: Enforce 'application/json' content type for REST API requests
test_create_single_available_ip
bfbf97aec9119539f7f42cf16f52d0ca8203ba60
netbox
test_api.py
13
16
https://github.com/netbox-community/netbox.git
2
194
0
57
323
Python
{ "docstring": "\n Test retrieval of the first available IP address within a parent prefix.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
def test_create_single_available_ip(self): vrf = VRF.objects.create(name='VRF 1') prefix = Prefix.objects.create(prefix=IPNetwork('192.0.2.0/30'), vrf=vrf, is_pool=True) url = reverse('ipam-api:prefix-available-ips', kwargs={'pk': prefix.pk}) self.add_permissions('ipam.view_prefix', 'ipam.add_ipaddress') # Create all four available IPs with individual requests for i in range(1, 5): data = { 'description': 'Test IP {}'.format(i) } response = self.client.post(url, data, format='json', **self.header) self.assertHttpStatus(response, status.HTTP_201_CREATED) self.assertEqual(response.data['vrf']['id'], vrf.pk) self.assertEqual(response.data['description'], data['description']) # Try to create one more IP response = self.client.post(url, {}, format='json', **self.header) self.assertHttpStatus(response, status.HTTP_409_CONFLICT) self.assertIn('detail', response.data)
@pytest.fixture(name="climate_adc_t3000_missing_mode")
91,775
292,702
105
tests/components/zwave_js/conftest.py
38
17
def climate_adc_t3000_missing_setpoint_fixture(client, climate_adc_t3000_state): data = copy.deepcopy(climate_adc_t3000_state) data["name"] = f"{data['name']} missing setpoint" for value in data["values"][:]: if ( value["commandClassName"] == "Humidity Control Setpoint" and value["propertyKeyName"] == "De-humidifier" ): data["values"].remove(value) node = Node(client, data) client.driver.controller.nodes[node.node_id] = node return node @pytest.fixture(name="climate_adc_t3000_missing_mode")
Add Humidifier support to zwave_js (#65847)
climate_adc_t3000_missing_setpoint_fixture
87593fa3ec4edd1fb467ed0709ef57c3c41e0fc4
core
conftest.py
13
12
https://github.com/home-assistant/core.git
4
84
1
32
171
Python
{ "docstring": "Mock a climate ADC-T3000 node with missing de-humidify setpoint.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def climate_adc_t3000_missing_setpoint_fixture(client, climate_adc_t3000_state): data = copy.deepcopy(climate_adc_t3000_state) data["name"] = f"{data['name']} missing setpoint" for value in data["values"][:]: if ( value["commandClassName"] == "Humidity Control Setpoint" and value["propertyKeyName"] == "De-humidifier" ): data["values"].remove(value) node = Node(client, data) client.driver.controller.nodes[node.node_id] = node return node @pytest.fixture(name="climate_adc_t3000_missing_mode")
r""" Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve` classifications for an ODE. The tuple is ordered so that first item is the classification that :py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In general, classifications at the near the beginning of the list will produce better solutions faster than those near the end, thought there are always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a different classification, use ``dsolve(ODE, func, hint=<classification>)``. See also the :py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints you can use. If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will return a dictionary of ``hint:match`` expression terms. This is intended for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that because dictionaries are ordered arbitrarily, this will most likely not be in the same order as the tuple. You can get help on different hints by executing ``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint without ``_Integral``. See :py:data:`~sympy.solvers.ode.allhints` or the :py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`. Notes ===== These are remarks on hint names. ``_Integral``r""" Returns a tuple ofpy:meth:classificationsan ODE. The tuple is ordered so that first item is the classification that :py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In general, classifications at the near the beginning of the list will produce better solutions faster than those near the end, thought there are always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a differentclassifications at the near the beginning of the list will produce better solutions faster than those near thethought there are always exceptions. To make :use ``hint=<classification>)``. See also the :py:meth:`~sympy.solvers.ode.dsolve` docstring for different=)``. See alsohints you can use. Ifyou can usedict:py:meth:`~sympy.solvers.ode.classify_ode` will return a dictionary of ``hint:match`` expression terms. This is intended for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that because dictionaries are orderedfor internal use by :py:meththis will most likely not be in the same order as the tuple. You can get help on different hints by executing ``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint without ``_Integral``. See :py:data:`~sympy.solvers.ode.allhints` or the :py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints that can be returned from :py:meth:the samethe tuple. You can get help on different hints by executing ``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint without`_Integral=== These are remarks on hint names. ``If a classification has ``_Integral`` at the end, it will_Integralexpressionanclass:
49,006
198,577
14
sympy/solvers/ode/ode.py
12
106
def classify_ode(eq, func=None, dict=False, ics=None, *, prep=True, xi=None, eta=None, n=None, **kwargs): r
Allow initial conditions of the form f(0): f(0) in dsolve There was a check that the replacement value does not contain f, but this makes perfect sense. The check was changed to checking that the value doesn't contain x. Fixes #23702
classify_ode
32589850ff6a970bee8af3034980e37932db2eb9
sympy
ode.py
10
270
https://github.com/sympy/sympy.git
66
1,582
28
12
439
Python
{ "docstring": "\n Returns a tuple of possible :py:meth:`~sympy.solvers.ode.dsolve`\n classifications for an ODE.\n\n The tuple is ordered so that first item is the classification that\n :py:meth:`~sympy.solvers.ode.dsolve` uses to solve the ODE by default. In\n general, classifications at the near the beginning of the list will\n produce better solutions faster than those near the end, thought there are\n always exceptions. To make :py:meth:`~sympy.solvers.ode.dsolve` use a\n different classification, use ``dsolve(ODE, func,\n hint=<classification>)``. See also the\n :py:meth:`~sympy.solvers.ode.dsolve` docstring for different meta-hints\n you can use.\n\n If ``dict`` is true, :py:meth:`~sympy.solvers.ode.classify_ode` will\n return a dictionary of ``hint:match`` expression terms. This is intended\n for internal use by :py:meth:`~sympy.solvers.ode.dsolve`. Note that\n because dictionaries are ordered arbitrarily, this will most likely not be\n in the same order as the tuple.\n\n You can get help on different hints by executing\n ``help(ode.ode_hintname)``, where ``hintname`` is the name of the hint\n without ``_Integral``.\n\n See :py:data:`~sympy.solvers.ode.allhints` or the\n :py:mod:`~sympy.solvers.ode` docstring for a list of all supported hints\n that can be returned from :py:meth:`~sympy.solvers.ode.classify_ode`.\n\n Notes\n =====\n\n These are remarks on hint names.\n\n ``_Integral``\n\n If a classification has ``_Integral`` at the end, it will return the\n expression with an unevaluated :py:class:`~.Integral`", "language": "en", "n_whitespaces": 280, "n_words": 184, "vocab_size": 118 }
def classify_ode(eq, func=None, dict=False, ics=None, *, prep=True, xi=None, eta=None, n=None, **kwargs): r
69,724
241,885
259
scipy/stats/_stats_py.py
108
35
def mode(a, axis=0, nan_policy='propagate'): a, axis = _chk_asarray(a, axis) if a.size == 0: return ModeResult(np.array([]), np.array([])) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.mode(a, axis) if a.dtype == object and np.nan in set(a.ravel()): # Fall back to a slower method since np.unique does not work with NaN scores = set(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape, dtype=a.dtype) oldcounts = np.zeros(testshape, dtype=int) for score in scores: template = (a == score) counts = np.sum(template, axis, keepdims=True) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts)
MAINT: stats: mode: fix negative axis issue with np.moveaxis instead of custom code (#15421)
mode
7438fe5edfb565ff341fa6ab054461fcdd504aa2
scipy
_stats_py.py
13
31
https://github.com/scipy/scipy.git
8
340
0
78
336
Python
{ "docstring": "Return an array of the modal (most common) value in the passed array.\n\n If there is more than one such value, only the smallest is returned.\n The bin-count for the modal bins is also returned.\n\n Parameters\n ----------\n a : array_like\n n-dimensional array of which to find mode(s).\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n\n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n\n Returns\n -------\n mode : ndarray\n Array of modal values.\n count : ndarray\n Array of counts for each mode.\n\n Examples\n --------\n >>> a = np.array([[6, 8, 3, 0],\n ... [3, 2, 1, 7],\n ... [8, 1, 8, 4],\n ... [5, 3, 0, 5],\n ... [4, 7, 5, 9]])\n >>> from scipy import stats\n >>> stats.mode(a)\n ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))\n\n To get mode of whole array, specify ``axis=None``:\n\n >>> stats.mode(a, axis=None)\n ModeResult(mode=array([3]), count=array([3]))\n\n ", "language": "en", "n_whitespaces": 390, "n_words": 183, "vocab_size": 131 }
def mode(a, axis=0, nan_policy='propagate'): a, axis = _chk_asarray(a, axis) if a.size == 0: return ModeResult(np.array([]), np.array([])) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.mode(a, axis) if a.dtype == object and np.nan in set(a.ravel()): # Fall back to a slower method since np.unique does not work with NaN scores = set(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape, dtype=a.dtype) oldcounts = np.zeros(testshape, dtype=int) for score in scores: template = (a == score) counts = np.sum(template, axis, keepdims=True) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts) oldmostfreq = mostfrequent return ModeResult(mostfrequent, oldcounts)
79,298
268,024
38
test/lib/ansible_test/_internal/host_profiles.py
10
6
def wait_for_instance(self) -> AnsibleCoreCI: core_ci = self.get_instance() cor
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
wait_for_instance
3eb0485dd92c88cc92152d3656d94492db44b183
ansible
host_profiles.py
8
5
https://github.com/ansible/ansible.git
1
22
0
9
40
Python
{ "docstring": "Wait for an AnsibleCoreCI VM instance to become ready.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def wait_for_instance(self) -> AnsibleCoreCI: core_ci = self.get_instance() core_ci.wait() return core_ci
49,306
199,635
90
sympy/polys/orthopolys.py
41
16
def spherical_bessel_fn(n, x=None, polys=False): if n < 0: dup = dup_spherical_bessel_fn_minus(-int(n), ZZ) else: dup = dup_spherical_bessel_fn(int(n), ZZ) poly = DMP(dup, ZZ) if x is not None: poly = Poly.new(poly, 1/x) else: poly = PurePoly.new(poly, 1/Dummy('x')) return poly if polys el
Link Appell sequences to corresponding continuous functions
spherical_bessel_fn
c6be089c27dd1891d4e273e7701926f7e5cf4d6f
sympy
orthopolys.py
15
11
https://github.com/sympy/sympy.git
4
97
0
28
154
Python
{ "docstring": "\n Coefficients for the spherical Bessel functions.\n\n Those are only needed in the jn() function.\n\n The coefficients are calculated from:\n\n fn(0, z) = 1/z\n fn(1, z) = 1/z**2\n fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z)\n\n Parameters\n ==========\n\n n : int\n `n` decides the degree of polynomial\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n\n Examples\n ========\n\n >>> from sympy.polys.orthopolys import spherical_bessel_fn as fn\n >>> from sympy import Symbol\n >>> z = Symbol(\"z\")\n >>> fn(1, z)\n z**(-2)\n >>> fn(2, z)\n -1/z + 3/z**3\n >>> fn(3, z)\n -6/z**2 + 15/z**4\n >>> fn(4, z)\n 1/z - 45/z**3 + 105/z**5\n\n ", "language": "en", "n_whitespaces": 197, "n_words": 107, "vocab_size": 77 }
def spherical_bessel_fn(n, x=None, polys=False): if n < 0: dup = dup_spherical_bessel_fn_minus(-int(n), ZZ) else: dup = dup_spherical_bessel_fn(int(n), ZZ) poly = DMP(dup, ZZ) if x is not None: poly = Poly.new(poly, 1/x) else: poly = PurePoly.new(poly, 1/Dummy('x')) return poly if polys else poly.as_expr()
46,857
192,158
263
torchvision/models/optical_flow/raft.py
65
25
def raft_large(*, pretrained=False, progress=True, **kwargs): return _raft( arch="raft_large", pretrained=pretrained, progress=progress, # Feature encoder feature_encoder_layers=(64, 64, 96, 128, 256), feature_encoder_block=ResidualBlock, feature_encoder_norm_layer=InstanceNorm2d, # Context encoder context_encoder_layers=(64, 64, 96, 128, 256), context_encoder_block=ResidualBlock, conte
Change default weights of RAFT model builders (#5381) * Change default weights of RAFT model builders * update handle_legacy_interface input * Oops, wrong default
raft_large
97eddc5d6a83a9bf620070075ef1e1864c9a68ac
vision
raft.py
10
23
https://github.com/pytorch/vision.git
1
152
0
52
205
Python
{ "docstring": "RAFT model from\n `RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.\n\n Args:\n pretrained (bool): Whether to use weights that have been pre-trained on\n :class:`~torchvsion.datasets.FlyingChairs` + :class:`~torchvsion.datasets.FlyingThings3D`\n with two fine-tuning steps:\n\n - one on :class:`~torchvsion.datasets.Sintel` + :class:`~torchvsion.datasets.FlyingThings3D`\n - one on :class:`~torchvsion.datasets.KittiFlow`.\n\n This corresponds to the ``C+T+S/K`` strategy in the paper.\n\n progress (bool): If True, displays a progress bar of the download to stderr.\n\n Returns:\n nn.Module: The model.\n ", "language": "en", "n_whitespaces": 156, "n_words": 68, "vocab_size": 56 }
def raft_large(*, pretrained=False, progress=True, **kwargs): return _raft( arch="raft_large", pretrained=pretrained, progress=progress, # Feature encoder feature_encoder_layers=(64, 64, 96, 128, 256), feature_encoder_block=ResidualBlock, feature_encoder_norm_layer=InstanceNorm2d, # Context encoder context_encoder_layers=(64, 64, 96, 128, 256), context_encoder_block=ResidualBlock, context_encoder_norm_layer=BatchNorm2d, # Correlation block corr_block_num_levels=4, corr_block_radius=4, # Motion encoder motion_encoder_corr_layers=(256, 192), motion_encoder_flow_layers=(128, 64), motion_encoder_out_channels=128, # Recurrent block recurrent_block_hidden_state_size=128, recurrent_block_kernel_size=((1, 5), (5, 1)), recurrent_block_padding=((0, 2), (2, 0)), # Flow head flow_head_hidden_size=256, # Mask predictor use_mask_predictor=True, **kwargs, )
2,034
11,433
408
jina/hubble/hubio.py
66
22
def _get_prettyprint_usage(self, console, executor_name, usage_kind=None): from rich.panel import Panel from rich.syntax import Syntax flow_plain = f flow_docker = f flow_sandbox = f panels = [ Panel( Syntax( p[0], 'python', theme='monokai', word_wrap=True, ), title=p[1], width=80, expand=False, ) for p in [ (flow_plain, 'Use via source'), (flow_docker, 'Use in Docker'), (flow_sandbox, 'Use in Sandbox'), ] ] if usage_kind == 'doc
feat: add sandbox after push (#4349)
_get_prettyprint_usage
c07f3c151d985b207af87ccc9115bc94c3164e55
jina
hubio.py
13
39
https://github.com/jina-ai/jina.git
4
141
0
51
231
Python
{ "docstring": "from jina import Flow\n\nf = Flow().add(uses='jinahub://{executor_name}')\nfrom jina import Flow\n\nf = Flow().add(uses='jinahub+docker://{executor_name}')\nfrom jina import Flow\n\nf = Flow().add(uses='jinahub+sandbox://{executor_name}')\n", "language": "en", "n_whitespaces": 15, "n_words": 21, "vocab_size": 9 }
def _get_prettyprint_usage(self, console, executor_name, usage_kind=None): from rich.panel import Panel from rich.syntax import Syntax flow_plain = f flow_docker = f flow_sandbox = f panels = [ Panel( Syntax( p[0], 'python', theme='monokai', word_wrap=True, ), title=p[1], width=80, expand=False, ) for p in [ (flow_plain, 'Use via source'), (flow_docker, 'Use in Docker'), (flow_sandbox, 'Use in Sandbox'), ] ] if usage_kind == 'docker': console.print(panels[2]) elif usage_kind == 'source': console.print(panels[1]) else: console.print(*reversed(panels))
51,352
206,070
66
django/http/request.py
16
7
def encoding(self, val): self._encoding = val if hasattr(self, "GET"): del self.GET if hasattr(self, "_post"): del self._p
Refs #33476 -- Reformatted code with Black.
encoding
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
request.py
8
6
https://github.com/django/django.git
3
37
0
13
63
Python
{ "docstring": "\n Set the encoding used for GET/POST accesses. If the GET or POST\n dictionary has already been created, remove and recreate it on the\n next access (so that it is decoded correctly).\n ", "language": "en", "n_whitespaces": 60, "n_words": 31, "vocab_size": 28 }
def encoding(self, val): self._encoding = val if hasattr(self, "GET"): del self.GET if hasattr(self, "_post"): del self._post
46,052
189,432
197
manim/mobject/geometry.py
44
15
def scale(self, factor, scale_tips=False, **kwargs): r if self.get_length() == 0: return self if scale_tips: super().scale(factor, **kwargs) self._set_stroke_width_from_length() return self has_tip = self.has_tip() has_start_tip = self.has_start_tip() if has_tip or has_start_tip: old_tips = self.pop_tips() super().scale(factor, **kwargs) self._set_stroke_width_from_length() if has_tip: self.add_tip(tip=old_ti
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
scale
902e7eb4f0147b5882a613b67467e38a1d47f01e
manim
geometry.py
11
45
https://github.com/ManimCommunity/manim.git
7
124
0
29
200
Python
{ "docstring": "Scale an arrow, but keep stroke width and arrow tip size fixed.\n\n See Also\n --------\n :meth:`~.Mobject.scale`\n\n Examples\n --------\n ::\n\n >>> arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0)\n >>> scaled_arrow = arrow.scale(2)\n >>> np.round(scaled_arrow.get_start_and_end(), 8) + 0\n array([[-2., -2., 0.],\n [ 2., 2., 0.]])\n >>> arrow.tip.length == scaled_arrow.tip.length\n True\n\n Manually scaling the object using the default method\n :meth:`~.Mobject.scale` does not have the same properties::\n\n >>> new_arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0)\n >>> another_scaled_arrow = VMobject.scale(new_arrow, 2)\n >>> another_scaled_arrow.tip.length == arrow.tip.length\n False\n\n ", "language": "en", "n_whitespaces": 279, "n_words": 85, "vocab_size": 60 }
def scale(self, factor, scale_tips=False, **kwargs): r if self.get_length() == 0: return self if scale_tips: super().scale(factor, **kwargs) self._set_stroke_width_from_length() return self has_tip = self.has_tip() has_start_tip = self.has_start_tip() if has_tip or has_start_tip: old_tips = self.pop_tips() super().scale(factor, **kwargs) self._set_stroke_width_from_length() if has_tip: self.add_tip(tip=old_tips[0]) if has_start_tip: self.add_tip(tip=old_tips[1], at_start=True) return self
56,213
221,110
604
python3.10.4/Lib/bdb.py
151
17
def effective(file, line, frame): possibles = Breakpoint.bplist[file, line] for b in possibles: if not b.enabled: continue if not checkfuncname(b, frame): continue # Count every hit when bp is enabled b.hits += 1 if not b.cond: # If unconditional, and ignoring go on to n
add python 3.10.4 for windows
effective
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
bdb.py
18
25
https://github.com/XX-net/XX-Net.git
9
131
0
96
217
Python
{ "docstring": "Determine which breakpoint for this file:line is to be acted upon.\n\n Called only if we know there is a breakpoint at this location. Return\n the breakpoint that was triggered and a boolean that indicates if it is\n ok to delete a temporary breakpoint. Return (None, None) if there is no\n matching breakpoint.\n ", "language": "en", "n_whitespaces": 69, "n_words": 52, "vocab_size": 37 }
def effective(file, line, frame): possibles = Breakpoint.bplist[file, line] for b in possibles: if not b.enabled: continue if not checkfuncname(b, frame): continue # Count every hit when bp is enabled b.hits += 1 if not b.cond: # If unconditional, and ignoring go on to next, else break if b.ignore > 0: b.ignore -= 1 continue else: # breakpoint and marker that it's ok to delete if temporary return (b, True) else: # Conditional bp. # Ignore count applies only to those bpt hits where the # condition evaluates to true. try: val = eval(b.cond, frame.f_globals, frame.f_locals) if val: if b.ignore > 0: b.ignore -= 1 # continue else: return (b, True) # else: # continue except: # if eval fails, most conservative thing is to stop on # breakpoint regardless of ignore count. Don't delete # temporary, as another hint to user. return (b, False) return (None, None) # -------------------- testing --------------------
@_wraps(osp_stats.truncnorm.logpdf, update_doc=False)
27,169
122,380
234
jax/_src/scipy/stats/truncnorm.py
172
23
def _log_gauss_mass(a, b): a, b = jnp.array(a), jnp.array(b) a, b = jnp.broadcast_arrays(a, b) # Note: Docstring carried over from scipy # Calculations in right tail are inaccurate, so we'll exploit the # symmetry and work only in the left tail case_left = b <= 0 case_right = a > 0 ca
implement truncnorm in jax.scipy.stats fix some shape and type issues import into namespace imports into non-_src library working logpdf test cleanup working tests for cdf and sf after fixing select relax need for x to be in (a, b) ensure behavior with invalid input matches scipy remove enforcing valid parameters in tests added truncnorm to docs whoops alphabetical fix linter error fix circular import issue
_log_gauss_mass
5784d61048facfa9dac1f1d309bde2d60a32810c
jax
truncnorm.py
13
14
https://github.com/google/jax.git
1
100
1
115
271
Python
{ "docstring": "Log of Gaussian probability mass within an interval", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def _log_gauss_mass(a, b): a, b = jnp.array(a), jnp.array(b) a, b = jnp.broadcast_arrays(a, b) # Note: Docstring carried over from scipy # Calculations in right tail are inaccurate, so we'll exploit the # symmetry and work only in the left tail case_left = b <= 0 case_right = a > 0 case_central = ~(case_left | case_right) def mass_case_left(a, b): return _log_diff(log_ndtr(b), log_ndtr(a)) def mass_case_right(a, b): return mass_case_left(-b, -a) def mass_case_central(a, b): # Note: Docstring carried over from scipy # Previously, this was implemented as: # left_mass = mass_case_left(a, 0) # right_mass = mass_case_right(0, b) # return _log_sum(left_mass, right_mass) # Catastrophic cancellation occurs as np.exp(log_mass) approaches 1. # Correct for this with an alternative formulation. # We're not concerned with underflow here: if only one term # underflows, it was insignificant; if both terms underflow, # the result can't accurately be represented in logspace anyway # because sc.log1p(x) ~ x for small x. return jnp.log1p(-ndtr(a) - ndtr(-b)) out = jnp.select( [case_left, case_right, case_central], [mass_case_left(a, b), mass_case_right(a, b), mass_case_central(a, b)] ) return out @_wraps(osp_stats.truncnorm.logpdf, update_doc=False)
@RunIf(min_gpus=2)
69,683
241,753
763
tests/trainer/logging_/test_logger_connector.py
249
30
def test_fx_validator_integration(tmpdir): not_supported = { None: "`self.trainer` reference is not registered", "on_before_accelerator_backend_setup": "You can't", "setup": "You can't", "configure_sharded_model": "You can't", "on_configure_sharded_model": "You can't", "configure_optimizers": "
Add `LightningModule.lr_scheduler_step` (#10249) Co-authored-by: Carlos Mocholi <[email protected]>
test_fx_validator_integration
82c8875f33addb0becd7761c95e9674ccc98c7ee
lightning
test_logger_connector.py
11
72
https://github.com/Lightning-AI/lightning.git
2
322
1
106
644
Python
{ "docstring": "Tries to log inside all `LightningModule` and `Callback` hooks to check any expected errors.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
def test_fx_validator_integration(tmpdir): not_supported = { None: "`self.trainer` reference is not registered", "on_before_accelerator_backend_setup": "You can't", "setup": "You can't", "configure_sharded_model": "You can't", "on_configure_sharded_model": "You can't", "configure_optimizers": "You can't", "on_fit_start": "You can't", "on_pretrain_routine_start": "You can't", "on_pretrain_routine_end": "You can't", "on_train_dataloader": "You can't", "train_dataloader": "You can't", "on_val_dataloader": "You can't", "val_dataloader": "You can't", "on_validation_end": "You can't", "on_train_end": "You can't", "on_fit_end": "You can't", "teardown": "You can't", "on_sanity_check_start": "You can't", "on_sanity_check_end": "You can't", "prepare_data": "You can't", "configure_callbacks": "You can't", "on_validation_model_eval": "You can't", "on_validation_model_train": "You can't", "lr_scheduler_step": "You can't", "summarize": "not managed by the `Trainer", } model = HookedModel(not_supported) with pytest.warns(UserWarning, match=not_supported[None]): model.log("foo", 1) callback = HookedCallback(not_supported) trainer = Trainer( default_root_dir=tmpdir, max_epochs=2, limit_train_batches=1, limit_val_batches=1, limit_test_batches=1, limit_predict_batches=1, callbacks=callback, ) with pytest.deprecated_call(match="on_train_dataloader` is deprecated in v1.5"): trainer.fit(model) not_supported.update( { # `lightning_module` ref is now present from the `fit` call "on_before_accelerator_backend_setup": "You can't", "on_test_dataloader": "You can't", "test_dataloader": "You can't", "on_test_model_eval": "You can't", "on_test_model_train": "You can't", "on_test_end": "You can't", } ) with pytest.deprecated_call(match="on_test_dataloader` is deprecated in v1.5"): trainer.test(model, verbose=False) not_supported.update({k: "result collection is not registered yet" for k in not_supported}) not_supported.update( { "on_predict_dataloader": "result collection is not registered yet", "predict_dataloader": "result collection is not registered yet", "on_predict_model_eval": "result collection is not registered yet", "on_predict_start": "result collection is not registered yet", "on_predict_epoch_start": "result collection is not registered yet", "on_predict_batch_start": "result collection is not registered yet", "predict_step": "result collection is not registered yet", "on_predict_batch_end": "result collection is not registered yet", "on_predict_epoch_end": "result collection is not registered yet", "on_predict_end": "result collection is not registered yet", } ) with pytest.deprecated_call(match="on_predict_dataloader` is deprecated in v1.5"): trainer.predict(model) @RunIf(min_gpus=2)
43,741
182,049
32
src/textual/drivers/win32.py
17
10
def enable_application_mode() -> Callable[[], None]: terminal_in = sys.stdin terminal_out = sys.stdout current_console_mode_in = _get_console_mode(terminal_in) current_console_m
working windows driver
enable_application_mode
988838a872d2c7af6a1113546ace4f15b74a3254
textual
win32.py
8
16
https://github.com/Textualize/textual.git
1
53
0
14
59
Python
{ "docstring": "Enable application mode.\n\n Returns:\n Callable[[], None]: A callable that will restore terminal to previous state.\n ", "language": "en", "n_whitespaces": 28, "n_words": 15, "vocab_size": 15 }
def enable_application_mode() -> Callable[[], None]: terminal_in = sys.stdin terminal_out = sys.stdout current_console_mode_in = _get_console_mode(terminal_in) current_console_mode_out = _get_console_mode(terminal_out)
78,616
266,836
53
lib/ansible/utils/_junit_xml.py
10
7
def get_attributes(self) -> dict[str, str]:
Simplify existing type hints.
get_attributes
871b2ca73adcba3a35551247cf839246cf121231
ansible
_junit_xml.py
9
6
https://github.com/ansible/ansible.git
1
29
0
10
45
Python
{ "docstring": "Return a dictionary of attributes for this instance.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def get_attributes(self) -> dict[str, str]: return _attributes( message=self.message, type=self.type, )
34,289
148,549
695
freqtrade/freqtradebot.py
125
41
def check_handle_timedout(self) -> None: for trade in Trade.get_open_order_trades(): try: if not trade.open_order_id: continue order = self.exchange.fetch_order(trade.open_order_id, trade.pair) except (ExchangeError): logger.info('Cannot query order for %s due to %s', trade, traceback.format_exc()) continue fully_cancelled = self.update_trade_state(trade, trade.open_order_id, order) if (order['side'] == 'buy' and (order['status'] == 'open' or fully_cancelled) and ( fu
Extract timeout handling from freqtradebot class
check_handle_timedout
7bef9a9b3ec8593dac0701e7c5f8df6d77b5d4e0
freqtrade
freqtradebot.py
21
38
https://github.com/freqtrade/freqtrade.git
17
283
0
82
483
Python
{ "docstring": "\n Check if any orders are timed out and cancel if necessary\n :param timeoutvalue: Number of minutes until order is considered timed out\n :return: None\n ", "language": "en", "n_whitespaces": 53, "n_words": 24, "vocab_size": 21 }
def check_handle_timedout(self) -> None: for trade in Trade.get_open_order_trades(): try: if not trade.open_order_id: continue order = self.exchange.fetch_order(trade.open_order_id, trade.pair) except (ExchangeError): logger.info('Cannot query order for %s due to %s', trade, traceback.format_exc()) continue fully_cancelled = self.update_trade_state(trade, trade.open_order_id, order) if (order['side'] == 'buy' and (order['status'] == 'open' or fully_cancelled) and ( fully_cancelled or self.strategy.ft_check_timed_out( 'buy', trade, order, datetime.now(timezone.utc)) )): self.handle_cancel_enter(trade, order, constants.CANCEL_REASON['TIMEOUT']) elif (order['side'] == 'sell' and (order['status'] == 'open' or fully_cancelled) and ( fully_cancelled or self.strategy.ft_check_timed_out( 'sell', trade, order, datetime.now(timezone.utc))) ): self.handle_cancel_exit(trade, order, constants.CANCEL_REASON['TIMEOUT']) canceled_count = trade.get_exit_order_count() max_timeouts = self.config.get('unfilledtimeout', {}).get('exit_timeout_count', 0) if max_timeouts > 0 and canceled_count >= max_timeouts: logger.warning(f'Emergencyselling trade {trade}, as the sell order ' f'timed out {max_timeouts} times.') try: self.execute_trade_exit( trade, order.get('price'), sell_reason=SellCheckTuple(sell_type=SellType.EMERGENCY_SELL)) except DependencyException as exception: logger.warning(f'Unable to emergency sell trade {trade.pair}: {exception}')
18,345
87,972
590
tests/snuba/api/endpoints/test_organization_events.py
119
35
def test_user_misery_denominator(self): ProjectTransactionThreshold.objects.create( project=self.project, organization=self.project.organization, threshold=600, metric=TransactionMetric.LCP.value, ) lcps = [ 400, 400, 300, 3000, 3000, 3000, ] for idx, lcp in enumerate(lcps): data = self.load_data(
fix(tests): Discover backend test flakes (#41057) - `MetricsQueryBuilder` wasn't sorting environment tags - Consistent timestamps on test_organization_events - Updated `apply_feature_flag_on_cls` to only apply decorator on the run method
test_user_misery_denominator
618ae63cf2ba419e44e79ce578d88e8b062d7dd9
sentry
test_organization_events.py
16
47
https://github.com/getsentry/sentry.git
2
287
0
84
478
Python
{ "docstring": "This is to test against a bug where the denominator of misery(total unique users) was wrong\n This is because the total unique users for a LCP misery should only count users that have had a txn with lcp,\n and not count all transactions (ie. uniq_if(transaction has lcp) not just uniq())\n ", "language": "en", "n_whitespaces": 71, "n_words": 50, "vocab_size": 41 }
def test_user_misery_denominator(self): ProjectTransactionThreshold.objects.create( project=self.project, organization=self.project.organization, threshold=600, metric=TransactionMetric.LCP.value, ) lcps = [ 400, 400, 300, 3000, 3000, 3000, ] for idx, lcp in enumerate(lcps): data = self.load_data( timestamp=before_now(minutes=(10 + idx)), ) data["event_id"] = f"{idx}" * 32 data["transaction"] = "/misery/new/" data["user"] = {"email": f"{idx}@example.com"} data["measurements"] = { "lcp": {"value": lcp}, } self.store_event(data, project_id=self.project.id) # Shouldn't count towards misery data = self.load_data(timestamp=self.ten_mins_ago, duration=timedelta(milliseconds=0)) data["transaction"] = "/misery/new/" data["user"] = {"email": "[email protected]"} data["measurements"] = {} self.store_event(data, project_id=self.project.id) query = { "field": [ "transaction", "user_misery()", ], "query": "event.type:transaction", "project": [self.project.id], "sort": "-user_misery", } response = self.do_request( query, ) assert response.status_code == 200, response.content assert len(response.data["data"]) == 1 data = response.data["data"] # (3 frustrated + 5.8875) / (6 + 117.75) assert abs(data[0]["user_misery()"] - 0.071818) < 0.0001
12,264
60,726
62
.venv/lib/python3.8/site-packages/pip/_internal/index/collector.py
25
7
def _determine_base_url(document, page_url): # type: (HTMLElement, str) -> str for base in document.findall(".//base"): href = base.get("href") if href is not None: return href return page_url
upd; format
_determine_base_url
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
collector.py
11
6
https://github.com/jindongwang/transferlearning.git
3
36
0
22
63
Python
{ "docstring": "Determine the HTML document's base URL.\n\n This looks for a ``<base>`` tag in the HTML document. If present, its href\n attribute denotes the base URL of anchor tags in the document. If there is\n no such tag (or if it does not have a valid href attribute), the HTML\n file's URL is used as the base URL.\n\n :param document: An HTML document representation. The current\n implementation expects the result of ``html5lib.parse()``.\n :param page_url: The URL of the HTML document.\n ", "language": "en", "n_whitespaces": 107, "n_words": 79, "vocab_size": 51 }
def _determine_base_url(document, page_url): # type: (HTMLElement, str) -> str for base in document.findall(".//base"): href = base.get("href") if href is not None: return href return page_url
1,622
9,465
23
reconstruction/ostec/external/stylegan2/metrics/precision_recall.py
10
9
def pairwise_distances(self, U, V): return self._distance_block.eval(feed_dict={self._features_batch1: U, self._features
initialize ostec
pairwise_distances
7375ee364e0df2a417f92593e09557f1b2a3575a
insightface
precision_recall.py
11
2
https://github.com/deepinsight/insightface.git
1
33
0
9
52
Python
{ "docstring": "Evaluate pairwise distances between two batches of feature vectors.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def pairwise_distances(self, U, V): return self._distance_block.eval(feed_dict={self._features_batch1: U, self._features_batch2: V}) #----------------------------------------------------------------------------
25,917
117,187
421
mindsdb/migrations/versions/2022-10-14_43c52d23845a_projects.py
110
45
def upgrade(): op.create_table( 'project', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_integration_name_company_id') ) conn = op.get_bind() session = sa.orm.Session(bind=conn) project_record = db.Project(name='mindsdb') session.add(project_record) session.commit() with op.ba
Projects structure (#3532) Projects structure
upgrade
7c02e15aa403a4ca1fa34489dd2df9136d6c961c
mindsdb
2022-10-14_43c52d23845a_projects.py
15
60
https://github.com/mindsdb/mindsdb.git
3
446
0
67
766
Python
{ "docstring": "\n update predictor set project_id = :project_id\n \n update view set project_id = :project_id\n \n select id, name from view\n where exists (select 1 from predictor where view.name = predictor.name)\n \n update view\n set name = :name\n where id = :view_id\n ", "language": "en", "n_whitespaces": 134, "n_words": 37, "vocab_size": 20 }
def upgrade(): op.create_table( 'project', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_integration_name_company_id') ) conn = op.get_bind() session = sa.orm.Session(bind=conn) project_record = db.Project(name='mindsdb') session.add(project_record) session.commit() with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.add_column(sa.Column('project_id', sa.Integer())) batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id']) conn.execute(sa.sql.text(), project_id=project_record.id) with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.alter_column( 'project_id', existing_type=sa.INTEGER(), nullable=False ) with op.batch_alter_table('view', schema=None) as batch_op: batch_op.add_column(sa.Column('project_id', sa.Integer())) batch_op.create_foreign_key('fk_project_id', 'project', ['project_id'], ['id']) conn.execute(sa.sql.text(), project_id=project_record.id) with op.batch_alter_table('view', schema=None) as batch_op: batch_op.alter_column( 'project_id', existing_type=sa.INTEGER(), nullable=False ) views = conn.execute().fetchall() for row in views: conn.execute( text(), { 'name': f"{row['name']}_view", 'view_id': row['id'] } ) view_integration = session.query(db.Integration).filter_by(name='views').first() if view_integration is not None: session.delete(view_integration) session.commit()
24,381
111,338
89
spacy/pipeline/span_ruler.py
21
14
def clear(self) -> None: self._patterns: List[PatternType] = [] self.matcher: Matcher = Matcher(self.nlp.vocab, validate=self.validate) self.phrase_matcher: P
Add SpanRuler component (#9880) * Add SpanRuler component Add a `SpanRuler` component similar to `EntityRuler` that saves a list of matched spans to `Doc.spans[spans_key]`. The matches from the token and phrase matchers are deduplicated and sorted before assignment but are not otherwise filtered. * Update spacy/pipeline/span_ruler.py Co-authored-by: Sofie Van Landeghem <[email protected]> * Fix cast * Add self.key property * Use number of patterns as length * Remove patterns kwarg from init * Update spacy/tests/pipeline/test_span_ruler.py Co-authored-by: Sofie Van Landeghem <[email protected]> * Add options for spans filter and setting to ents * Add `spans_filter` option as a registered function' * Make `spans_key` optional and if `None`, set to `doc.ents` instead of `doc.spans[spans_key]`. * Update and generalize tests * Add test for setting doc.ents, fix key property type * Fix typing * Allow independent doc.spans and doc.ents * If `spans_key` is set, set `doc.spans` with `spans_filter`. * If `annotate_ents` is set, set `doc.ents` with `ents_fitler`. * Use `util.filter_spans` by default as `ents_filter`. * Use a custom warning if the filter does not work for `doc.ents`. * Enable use of SpanC.id in Span * Support id in SpanRuler as Span.id * Update types * `id` can only be provided as string (already by `PatternType` definition) * Update all uses of Span.id/ent_id in Doc * Rename Span id kwarg to span_id * Update types and docs * Add ents filter to mimic EntityRuler overwrite_ents * Refactor `ents_filter` to take `entities, spans` args for more filtering options * Give registered filters more descriptive names * Allow registered `filter_spans` filter (`spacy.first_longest_spans_filter.v1`) to take any number of `Iterable[Span]` objects as args so it can be used for spans filter or ents filter * Implement future entity ruler as span ruler Implement a compatible `entity_ruler` as `future_entity_ruler` using `SpanRuler` as the underlying component: * Add `sort_key` and `sort_reverse` to allow the sorting behavior to be customized. (Necessary for the same sorting/filtering as in `EntityRuler`.) * Implement `overwrite_overlapping_ents_filter` and `preserve_existing_ents_filter` to support `EntityRuler.overwrite_ents` settings. * Add `remove_by_id` to support `EntityRuler.remove` functionality. * Refactor `entity_ruler` tests to parametrize all tests to test both `entity_ruler` and `future_entity_ruler` * Implement `SpanRuler.token_patterns` and `SpanRuler.phrase_patterns` properties. Additional changes: * Move all config settings to top-level attributes to avoid duplicating settings in the config vs. `span_ruler/cfg`. (Also avoids a lot of casting.) * Format * Fix filter make method name * Refactor to use same error for removing by label or ID * Also provide existing spans to spans filter * Support ids property * Remove token_patterns and phrase_patterns * Update docstrings * Add span ruler docs * Fix types * Apply suggestions from code review Co-authored-by: Sofie Van Landeghem <[email protected]> * Move sorting into filters * Check for all tokens in seen tokens in entity ruler filters * Remove registered sort key * Set Token.ent_id in a backwards-compatible way in Doc.set_ents * Remove sort options from API docs * Update docstrings * Rename entity ruler filters * Fix and parameterize scoring * Add id to Span API docs * Fix typo in API docs * Include explicit labeled=True for scorer Co-authored-by: Sofie Van Landeghem <[email protected]>
clear
a322d6d5f2f85c2da6cded4fcd6143d41b5a9e96
spaCy
span_ruler.py
10
13
https://github.com/explosion/spaCy.git
1
66
0
19
102
Python
{ "docstring": "Reset all patterns.\n\n RETURNS: None\n DOCS: https://spacy.io/api/spanruler#clear\n ", "language": "en", "n_whitespaces": 28, "n_words": 7, "vocab_size": 7 }
def clear(self) -> None: self._patterns: List[PatternType] = [] self.matcher: Matcher = Matcher(self.nlp.vocab, validate=self.validate) self.phrase_matcher: PhraseMatcher = PhraseMatcher( self.nlp.vocab, attr=self.phrase_matcher_attr, validate=self.validate, )
56,928
223,483
317
python3.10.4/Lib/doctest.py
88
19
def _from_module(self, module, object): if module is None: return True elif inspect.getmodule(object) is not None: return module is inspect.getmodule(object) elif inspect.isfunction(object): return module.__dict__ is object.__globals__ elif inspect.ismethoddescriptor(object): if hasattr(object, '__objclass__'): obj_mod = object.__objclass__.__module__ elif hasattr(object, '__module__'): obj_mod = object.__module__ else:
add python 3.10.4 for windows
_from_module
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
doctest.py
13
23
https://github.com/XX-net/XX-Net.git
10
148
0
47
242
Python
{ "docstring": "\n Return true if the given object is defined in the given\n module.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 10 }
def _from_module(self, module, object): if module is None: return True elif inspect.getmodule(object) is not None: return module is inspect.getmodule(object) elif inspect.isfunction(object): return module.__dict__ is object.__globals__ elif inspect.ismethoddescriptor(object): if hasattr(object, '__objclass__'): obj_mod = object.__objclass__.__module__ elif hasattr(object, '__module__'): obj_mod = object.__module__ else: return True # [XX] no easy way to tell otherwise return module.__name__ == obj_mod elif inspect.isclass(object): return module.__name__ == object.__module__ elif hasattr(object, '__module__'): return module.__name__ == object.__module__ elif isinstance(object, property): return True # [XX] no way not be sure. else: raise ValueError("object must be a class or function")
53,568
213,001
945
DemoPrograms/Demo_Script_Launcher_ANSI_Color_Output.py
258
33
def cut_ansi_string_into_parts(string_with_ansi_codes): color_codes_english = ['Black', 'Red', 'Green', 'Yellow', 'Blue', 'Magenta', 'Cyan', 'White', 'Reset'] color_codes = ["30m", "31m", "32m", "33m", "34m", "35m", "36m", "37m", "0m"] effect_codes_english = ['Italic', 'Underline', 'Slow Blink', 'Rapid Blink', 'Crossed Out'] effect_codes
Removed old code that used Popen and instead uses the PySimpleGUI Exec API calls for an all-in-one demo. Added expansion of the Multilline and a SizeGrip so that it's obvious to user the window is resizable.
cut_ansi_string_into_parts
a35687ac51dac5a2a0664ca20e7dd7cba6836c7b
PySimpleGUI
Demo_Script_Launcher_ANSI_Color_Output.py
21
57
https://github.com/PySimpleGUI/PySimpleGUI.git
19
603
0
131
973
Python
{ "docstring": "\n Converts a string with ambedded ANSI Color Codes and parses it to create\n a list of tuples describing pieces of the input string.\n :param string_with_ansi_codes:\n :return: [(sty, str, str, str), ...] A list of tuples. Each tuple has format: (text, text color, background color, effects)\n ", "language": "en", "n_whitespaces": 61, "n_words": 45, "vocab_size": 39 }
def cut_ansi_string_into_parts(string_with_ansi_codes): color_codes_english = ['Black', 'Red', 'Green', 'Yellow', 'Blue', 'Magenta', 'Cyan', 'White', 'Reset'] color_codes = ["30m", "31m", "32m", "33m", "34m", "35m", "36m", "37m", "0m"] effect_codes_english = ['Italic', 'Underline', 'Slow Blink', 'Rapid Blink', 'Crossed Out'] effect_codes = ["3m", "4m", "5m", "6m", "9m"] background_codes = ["40m", "41m", "42m", "43m", "44m", "45m", "46m", "47m"] background_codes_english = ["Black", "Red", "Green", "Yellow", "Blue", "Magenta", "Cyan", "White"] ansi_codes = color_codes + effect_codes tuple_list = [] string_list = string_with_ansi_codes.split("\u001b[") if (len(string_list)) == 1: string_list = string_with_ansi_codes.split("\033[") for teststring in string_list: if teststring == string_with_ansi_codes: tuple_list += [(teststring, None, None, None)] break if any(code in teststring for code in ansi_codes): static_string = None color_used = None effect_used = None background_used = None for color in range(0, len(color_codes)): if teststring.startswith(color_codes[color]): working_thread = teststring.split(color_codes[color]) ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]') static_string = ansi_strip.sub('', working_thread[1]) color_used = color_codes_english[color] for effect in range(0, len(effect_codes)): if teststring.startswith(effect_codes[effect]): working_thread = teststring.split(effect_codes[effect]) ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]') static_string = ansi_strip.sub('', working_thread[1]) effect_used = effect_codes_english[effect] for background in range(0, len(background_codes)): if teststring.startswith(background_codes[background]): working_thread = teststring.split(background_codes[background]) ansi_strip = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]') static_string = ansi_strip.sub('', working_thread[1]) background_used = background_codes_english[background] try: if not tuple_list[len(tuple_list) - 1][0]: if not tuple_list[len(tuple_list) - 1][1] == None: color_used = tuple_list[len(tuple_list) - 1][1] if not tuple_list[len(tuple_list) - 1][2] == None: background_used = tuple_list[len(tuple_list) - 1][2] if not tuple_list[len(tuple_list) - 1][3] == None: effect_used = tuple_list[len(tuple_list) - 1][3] tuple_list += [(static_string, color_used, background_used, effect_used)] else: tuple_list += [(static_string, color_used, background_used, effect_used)] except Exception: tuple_list += [(static_string, color_used, background_used, effect_used)] new_tuple_list = [] for x in range(0, len(tuple_list)): if tuple_list[x][0]: new_tuple_list += [[tuple_list[x][0], tuple_list[x][1], tuple_list[x][2], tuple_list[x][3]]] return new_tuple_list
39,296
162,765
472
research/neo_peq/legacy_frequency_response.py
147
30
def interpolate(self, f=None, f_step=DEFAULT_STEP, pol_order=1, f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX): # Remove None values i = 0 while i < len(self.raw): if self.raw[i] is None: self.raw = np.delete(self.raw, i) self.frequency = np.delete(self.frequency, i) else: i += 1 # Interpolation functions keys = 'raw error error_smoothed equalization equalized_raw equalized_smoothed target'.split() interpolators = dict() log_f = np.log10(self.frequency) for key in keys: if len(self.__dict__[key]): interpolators[key] = InterpolatedUnivariateSpline(log_f, self.__dict__[key], k=pol_order) if f is None: self.frequency = self.generate_frequencies(f_min=f_min, f_max=f_max, f_step=f_step) else: self.frequency = np.array(f) # Prevent log10 from exploding by replacing zero frequency with small value zero_freq_fix = False if self.frequency[0] == 0: self.frequency[0] = 0.001 zero_freq_fix = True # Run interpolators log_f = np.log10(self.frequency) for key in keys: if len(self.__dict__[key]) and key in interpolators: self.__dict__[key] = interpolators[key](log_f) if zero_freq_fix: # Restore zero frequency self.frequency[0] = 0 # Everything but the interpolated data is affected by interpolating, reset them
Added PEQ configs to CLI and function interfaces. Improved default value handling for PEQ parameters and added more predefined configs. Removed legacy PEQ optimization. Fixed readme write. Improved shelf filter initialization. Added plot method to PEQ. Notebook for comparing old and new optimizers. Bug fixes.
interpolate
9120cdffe618c6c2ff16fe6a311b6a1367efdbc8
AutoEq
legacy_frequency_response.py
14
29
https://github.com/jaakkopasanen/AutoEq.git
12
273
0
94
423
Python
{ "docstring": "Interpolates missing values from previous and next value. Resets all but raw data.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
def interpolate(self, f=None, f_step=DEFAULT_STEP, pol_order=1, f_min=DEFAULT_F_MIN, f_max=DEFAULT_F_MAX): # Remove None values i = 0 while i < len(self.raw): if self.raw[i] is None: self.raw = np.delete(self.raw, i) self.frequency = np.delete(self.frequency, i) else: i += 1 # Interpolation functions keys = 'raw error error_smoothed equalization equalized_raw equalized_smoothed target'.split() interpolators = dict() log_f = np.log10(self.frequency) for key in keys: if len(self.__dict__[key]): interpolators[key] = InterpolatedUnivariateSpline(log_f, self.__dict__[key], k=pol_order) if f is None: self.frequency = self.generate_frequencies(f_min=f_min, f_max=f_max, f_step=f_step) else: self.frequency = np.array(f) # Prevent log10 from exploding by replacing zero frequency with small value zero_freq_fix = False if self.frequency[0] == 0: self.frequency[0] = 0.001 zero_freq_fix = True # Run interpolators log_f = np.log10(self.frequency) for key in keys: if len(self.__dict__[key]) and key in interpolators: self.__dict__[key] = interpolators[key](log_f) if zero_freq_fix: # Restore zero frequency self.frequency[0] = 0 # Everything but the interpolated data is affected by interpolating, reset them self.reset(**{key: False for key in keys})
119,869
331,584
947
timm/optim/lars.py
182
34
def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() device = self.param_groups[0]['params'][0].device one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] trust_coeff = group['trust_coeff'] eps = group['eps'] for p
fix lars
step
cdcd0a92ca8a3dc120336a5dde1b7d6ecd5e9186
pytorch-image-models
lars.py
19
44
https://github.com/huggingface/pytorch-image-models.git
11
331
0
118
534
Python
{ "docstring": "Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model and returns the loss.\n ", "language": "en", "n_whitespaces": 44, "n_words": 19, "vocab_size": 17 }
def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() device = self.param_groups[0]['params'][0].device one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] trust_coeff = group['trust_coeff'] eps = group['eps'] for p in group['params']: if p.grad is None: continue grad = p.grad # apply LARS LR adaptation, LARC clipping, weight decay # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py if weight_decay != 0 or group['always_adapt']: w_norm = p.norm(2.0) g_norm = grad.norm(2.0) trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) # FIXME nested where required since logical and/or not working in PT XLA trust_ratio = torch.where( w_norm > 0, torch.where(g_norm > 0, trust_ratio, one_tensor), one_tensor, ) if group['trust_clip']: trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) grad.add_(p, alpha=weight_decay) grad.mul_(trust_ratio) # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone(grad).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(grad, alpha=1. - dampening) if nesterov: grad = grad.add(buf, alpha=momentum) else: grad = buf p.add_(grad, alpha=-group['lr']) return loss
95,399
296,416
30
homeassistant/components/hunterdouglas_powerview/cover.py
9
5
async def _async_force_resync(self, *_): self._forced_resync = None
Fix handling of powerview stale state (#70195)
_async_force_resync
2c2b678e80db615e50a7b72c3ec107730cc6f8dd
core
cover.py
8
3
https://github.com/home-assistant/core.git
1
20
0
9
37
Python
{ "docstring": "Force a resync after an update since the hub may have stale state.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
async def _async_force_resync(self, *_): self._forced_resync = None await self._async_force_refresh_state()
7,511
42,251
273
seaborn/palettes.py
90
18
def set_color_codes(palette="deep"): if palette == "reset": colors = [
Convert color palette docstrings to notebooks (#3034) * Convert color palette docstrings to notebooks and rerun all with py310 kernel * Add v0.12.1 release notes to index * Improve failure mode when ipywidgets is not involved * Update palettes docstrings * Remove all other doctest-style examples * Remove doctest-oriented testing infrastructure * Mention in release notes * Skip colormap patch test on matplotlib's where it's not relevant * Use more robust approach to mpl backcompat
set_color_codes
e644793f0ac2b1be178425f20f529121f37f29de
seaborn
palettes.py
13
25
https://github.com/mwaskom/seaborn.git
6
207
0
57
280
Python
{ "docstring": "Change how matplotlib color shorthands are interpreted.\n\n Calling this will change how shorthand codes like \"b\" or \"g\"\n are interpreted by matplotlib in subsequent plots.\n\n Parameters\n ----------\n palette : {deep, muted, pastel, dark, bright, colorblind}\n Named seaborn palette to use as the source of colors.\n\n See Also\n --------\n set : Color codes can be set through the high-level seaborn style\n manager.\n set_palette : Color codes can also be set through the function that\n sets the matplotlib color cycle.\n\n ", "language": "en", "n_whitespaces": 141, "n_words": 78, "vocab_size": 58 }
def set_color_codes(palette="deep"): if palette == "reset": colors = [ (0., 0., 1.), (0., .5, 0.), (1., 0., 0.), (.75, 0., .75), (.75, .75, 0.), (0., .75, .75), (0., 0., 0.) ] elif not isinstance(palette, str): err = "set_color_codes requires a named seaborn palette" raise TypeError(err) elif palette in SEABORN_PALETTES: if not palette.endswith("6"): palette = palette + "6" colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)] else: err = f"Cannot set colors with palette '{palette}'" raise ValueError(err) for code, color in zip("bgrmyck", colors): rgb = mpl.colors.colorConverter.to_rgb(color) mpl.colors.colorConverter.colors[code] = rgb mpl.colors.colorConverter.cache[code] = rgb
7,877
43,220
20
tests/models/test_dagrun.py
11
9
def test_mapped_literal_length_increase_at_runtime_adds_additional_tis(dag_maker, session):
Fix mapped task immutability after clear (#23667) We should be able to detect if the structure of mapped task has changed and verify the integrity. This PR ensures this Co-authored-by: Tzu-ping Chung <[email protected]>
test_mapped_literal_length_increase_at_runtime_adds_additional_tis
b692517ce3aafb276e9d23570e9734c30a5f3d1f
airflow
test_dagrun.py
9
39
https://github.com/apache/airflow.git
5
311
0
11
51
Python
{ "docstring": "Test that when the length of mapped literal increases at runtime, additional ti is added", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
def test_mapped_literal_length_increase_at_runtime_adds_additional_tis(dag_maker, session): from airflow.models import Variable Variable.set(key='arg1', value=[1, 2, 3])
116,988
319,729
44
src/documents/tests/test_management_convert_thumbnail.py
16
8
def create_png_thumbnail_file(self, thumb_dir): thumb_file = Path(thumb_dir) / Path(f"{self.doc.pk:07}.png") thumb_file.write_text("this is a dummy p
Fixes existing testing, adds test coverage of new command
create_png_thumbnail_file
08c3d6e84b17da2acfb10250438fe357398e5e0e
paperless-ngx
test_management_convert_thumbnail.py
13
4
https://github.com/paperless-ngx/paperless-ngx.git
1
28
0
15
62
Python
{ "docstring": "\n Creates a dummy PNG thumbnail file in the given directory, based on\n the database Document\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
def create_png_thumbnail_file(self, thumb_dir): thumb_file = Path(thumb_dir) / Path(f"{self.doc.pk:07}.png") thumb_file.write_text("this is a dummy png file") return thumb_file
45,702
187,141
47
tests/test_api_validate.py
16
10
def test_parse_html(self): a
plugin.api.validate: implement ValidationError - Implement `ValidationError` - Inherit from `ValueError` to preserve backwards compatiblity - Allow collecting multiple errors (AnySchema) - Keep an error stack of parent `ValidationError`s or other exceptions - Format error stack when converting error to string - Raise `ValidationError` instead of `ValueError` - Add error contexts where it makes sense - Add schema names to error instances - Add and update tests
test_parse_html
3d44da082b3ba202b9d0557bfd8ce747a1d7960c
streamlink
test_api_validate.py
11
8
https://github.com/streamlink/streamlink.git
1
44
0
15
79
Python
{ "docstring": "\n ValidationError:\n Unable to parse HTML: can only parse strings (None)\n ", "language": "en", "n_whitespaces": 42, "n_words": 10, "vocab_size": 9 }
def test_parse_html(self): assert validate(parse_html(), '<!DOCTYPE html><body>&quot;perfectly&quot;<a>valid<div>HTML').tag == "html" with self.assertRaises(ValueError) as cm: validate(parse_html(), None) assert_validationerror(cm.exception, )
75,377
258,712
264
sklearn/utils/validation.py
107
16
def _check_feature_names_in(estimator, input_features=None, *, generate_names=True): feature_names_in_ = getattr(estimator, "feature_names_in_", None) n_features_in_ = getattr(estimator, "n_features_in_", None) if input_features is not None: input_features = np.asarray(input_features, dtype=object) if feature_names_in_ is not None and not np.array_equal( feature_names_in_, input_features ): raise ValueError("input_features is not equal to feature_names_in_") if n_features_in_ is not None and len(input_features) != n_features_in_: raise ValueError( "input_features should have length equal to number of " f"features ({n_features_in_}), got {len(input_features)}" ) return input_features if feature_names_in_ is not None: return feature_n
ENH Adds feature_names_out to preprocessing module (#21079) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: 赵丰 (Zhao Feng) <[email protected]> Co-authored-by: Niket Jain <[email protected]> Co-authored-by: Loïc Estève <[email protected]>
_check_feature_names_in
d7feac0ccfe1a7b8a55f2e16f249f77508a91fe1
scikit-learn
validation.py
16
22
https://github.com/scikit-learn/scikit-learn.git
10
141
0
62
244
Python
{ "docstring": "Check `input_features` and generate names if needed.\n\n Commonly used in :term:`get_feature_names_out`.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features` is `None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then names are generated: `[x0, x1, ..., x(n_features_in_)]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n generate_names : bool, default=True\n Whether to generate names when `input_features` is `None` and\n `estimator.feature_names_in_` is not defined. This is useful for transformers\n that validates `input_features` but do not require them in\n :term:`get_feature_names_out` e.g. `PCA`.\n\n Returns\n -------\n feature_names_in : ndarray of str or `None`\n Feature names in.\n ", "language": "en", "n_whitespaces": 226, "n_words": 110, "vocab_size": 71 }
def _check_feature_names_in(estimator, input_features=None, *, generate_names=True): feature_names_in_ = getattr(estimator, "feature_names_in_", None) n_features_in_ = getattr(estimator, "n_features_in_", None) if input_features is not None: input_features = np.asarray(input_features, dtype=object) if feature_names_in_ is not None and not np.array_equal( feature_names_in_, input_features ): raise ValueError("input_features is not equal to feature_names_in_") if n_features_in_ is not None and len(input_features) != n_features_in_: raise ValueError( "input_features should have length equal to number of " f"features ({n_features_in_}), got {len(input_features)}" ) return input_features if feature_names_in_ is not None: return feature_names_in_ if not generate_names: return # Generates feature names if `n_features_in_` is defined if n_features_in_ is None: raise ValueError("Unable to generate feature names without n_features_in_") return np.asarray([f"x{i}" for i in range(n_features_in_)], dtype=object)
41,810
176,293
41
networkx/algorithms/shortest_paths/weighted.py
22
6
def all_pairs_bellman_ford_path(G, weight="weight"): path = single_source_bellm
DOC: Update documentation to include callables for weight argument (#5307) Update docs to include functions as valid input for weight argument.
all_pairs_bellman_ford_path
b5d41847b8db0c82372faf69cd3a339d11da7ef0
networkx
weighted.py
12
4
https://github.com/networkx/networkx.git
2
33
0
22
54
Python
{ "docstring": "Compute shortest paths between all nodes in a weighted graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n weight : string or function (default=\"weight\")\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n Returns\n -------\n distance : dictionary\n Dictionary, keyed by source and target, of shortest paths.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> path = dict(nx.all_pairs_bellman_ford_path(G))\n >>> path[0][4]\n [0, 1, 2, 3, 4]\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n See Also\n --------\n floyd_warshall, all_pairs_dijkstra_path\n\n ", "language": "en", "n_whitespaces": 310, "n_words": 170, "vocab_size": 109 }
def all_pairs_bellman_ford_path(G, weight="weight"): path = single_source_bellman_ford_path # TODO This can be trivially parallelized. for n in G: yield (n, path(G, n, weight=weight))
14,863
68,774
20
erpnext/manufacturing/doctype/bom_update_log/test_bom_update_log.py
24
6
def update_cost_in_all_boms_in_test(): log = enqueue_update_cost() # create BOM Update Log while log.status != "Completed": resume_bom_cost_update_jobs() # run cron job until complete log.reload() return log
chore: Less hacky tests, versioning (replace bom) and clearing log data (update cost) - Remove `auto_commit_on_many_writes` in `update_cost_in_level()` as commits happen every N BOMs - Auto commit every 50 BOMs - test: Remove hacky `frappe.flags.in_test` returns - test: Enqueue `now` if in tests (for update cost and replace bom) - Replace BOM: Copy bom object to `_doc_before_save` so that version.py finds a difference between the two - Replace BOM: Add reference to version - Update Cost: Unset `processed_boms` if Log is completed (useless after completion) - test: `update_cost_in_all_boms_in_test` works close to actual prod implementation (only call Cron job manually) - Test: use `enqueue_replace_bom` so that test works closest to production behaviour Co-authored-by: Ankush Menat <[email protected]>
update_cost_in_all_boms_in_test
3fa0a46f39f7024c5d0b235a7725eaa9ad0f3869
erpnext
test_bom_update_log.py
9
6
https://github.com/frappe/erpnext.git
2
27
0
22
54
Python
{ "docstring": "\n\tUtility to run 'Update Cost' job in tests without Cron job until fully complete.\n\t", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
def update_cost_in_all_boms_in_test(): log = enqueue_update_cost() # create BOM Update Log while log.status != "Completed": resume_bom_cost_update_jobs() # run cron job until complete log.reload() return log
45,161
185,757
81
src/textual/widgets/_data_table.py
18
10
def clear(self) -> None: self.row_count = 0 self._clear_caches()
ffixed table refresh on add row
clear
b524fa08eecadc83b0b694278db1c79d90feb9d8
textual
_data_table.py
8
14
https://github.com/Textualize/textual.git
1
54
0
15
94
Python
{ "docstring": "Clear the table.\n\n Args:\n columns (bool, optional): Also clear the columns. Defaults to False.\n ", "language": "en", "n_whitespaces": 39, "n_words": 14, "vocab_size": 13 }
def clear(self) -> None: self.row_count = 0 self._clear_caches() self._y_offsets.clear() self.data.clear() self.rows.clear() self._line_no = 0 self._require_update_dimensions = True self.refresh()
42,686
178,399
207
nuitka/utils/FileOperations.py
43
15
def copyFile(source_path, dest_path): while 1: try: shutil.copyfile(source_path, dest_path) except PermissionError as e: if e.errno != errno.EACCES: raise general.warning("Problem copying file %s:" % e) try: reply
UI: In case of PermissionError, allow uses to retry * Esp. on Windows it happens a lot that running programs cannot be updated by Nuitka, this avoids the cryptic error somewhere ranomly.
copyFile
2c20b90946a8aa5ad4ee39ad365ff1b83f182770
Nuitka
FileOperations.py
17
16
https://github.com/Nuitka/Nuitka.git
7
72
0
37
132
Python
{ "docstring": "Improved version of shutil.copy\n\n This handles errors with a chance to correct them, e.g. on Windows, files might be\n locked by running program or virus checkers.\n ", "language": "en", "n_whitespaces": 35, "n_words": 26, "vocab_size": 26 }
def copyFile(source_path, dest_path): while 1: try: shutil.copyfile(source_path, dest_path) except PermissionError as e: if e.errno != errno.EACCES: raise general.warning("Problem copying file %s:" % e) try: reply = raw_input("Retry? (YES/no) ") or "yes" except EOFError: reply = "no" if reply.upper() == "YES": continue raise break
76,027
259,994
106
sklearn/ensemble/tests/test_iforest.py
37
14
def test_iforest(global_random_seed): X_train = np.array([[0, 1], [1, 2]]) X_test = np.array([[2, 1], [1, 1]]) grid = ParameterGrid( {"n_estimators": [3], "max_samples": [0.5, 1.0, 3], "bootstrap": [True, False]} ) with ignore_warnings(): for params in grid: IsolationForest(random_state=global_random_seed, **params).fit( X_train ).predi
TST use global_random_seed in sklearn/ensemble/tests/test_iforest.py (#22901) Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Olivier Grisel <[email protected]>
test_iforest
6ca1f5e4d0d16bc9a7f28582079a15e14f012719
scikit-learn
test_iforest.py
16
11
https://github.com/scikit-learn/scikit-learn.git
2
109
0
32
164
Python
{ "docstring": "Check Isolation Forest for various parameter settings.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
def test_iforest(global_random_seed): X_train = np.array([[0, 1], [1, 2]]) X_test = np.array([[2, 1], [1, 1]]) grid = ParameterGrid( {"n_estimators": [3], "max_samples": [0.5, 1.0, 3], "bootstrap": [True, False]} ) with ignore_warnings(): for params in grid: IsolationForest(random_state=global_random_seed, **params).fit( X_train ).predict(X_test)
55,189
218,189
22
python3.10.4/Lib/importlib/abc.py
7
9
def invalidate_caches(self): _register
add python 3.10.4 for windows
invalidate_caches
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
abc.py
6
1
https://github.com/XX-net/XX-Net.git
1
6
0
7
40
Python
{ "docstring": "An optional method for clearing the finder's cache, if any.\n This method is used by importlib.invalidate_caches().\n ", "language": "en", "n_whitespaces": 30, "n_words": 16, "vocab_size": 15 }
def invalidate_caches(self): _register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter, machinery.PathFinder, machinery.WindowsRegistryFinder)
17,223
81,589
131
awx/main/dispatch/reaper.py
49
28
def reap(instance=None, status='failed', excluded_uuids=[]): me = instance if me is None: try: me = Instance.objects.me() except RuntimeError as e: logger.warning(f'Local instance is not registered, not running reaper: {e}') return workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id jobs = UnifiedJob.objects.
Split reaper for running and waiting jobs Avoid running jobs that have already been reapted Co-authored-by: Elijah DeLee <[email protected]> Remove unnecessary extra actions Fix waiting jobs in other cases of reaping
reap
278db2cddebec97ec48011ecae45129be1ac43a4
awx
reaper.py
18
14
https://github.com/ansible/awx.git
4
122
0
40
205
Python
{ "docstring": "\n Reap all jobs in running for this instance.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
def reap(instance=None, status='failed', excluded_uuids=[]): me = instance if me is None: try: me = Instance.objects.me() except RuntimeError as e: logger.warning(f'Local instance is not registered, not running reaper: {e}') return workflow_ctype_id = ContentType.objects.get_for_model(WorkflowJob).id jobs = UnifiedJob.objects.filter( Q(status='running') & (Q(execution_node=me.hostname) | Q(controller_node=me.hostname)) & ~Q(polymorphic_ctype_id=workflow_ctype_id) ).exclude(celery_task_id__in=excluded_uuids) for j in jobs: reap_job(j, status)
7,566
42,481
106
nltk/corpus/reader/wordnet.py
44
12
def closure(self, rel, depth=-1): from nltk.util import acyclic_breadth_first for synset in acyclic_breadth_first(self, rel, depth): if s
Fix some tests in Wordnet-related DocStrings
closure
692adaff901dd9daf29400fdf3385130aefbfb2a
nltk
wordnet.py
10
5
https://github.com/nltk/nltk.git
3
38
0
29
89
Python
{ "docstring": "\n Return the transitive closure of source under the rel\n relationship, breadth-first, discarding cycles:\n\n >>> from nltk.corpus import wordnet as wn\n >>> computer = wn.synset('computer.n.01')\n >>> topic = lambda s:s.topic_domains()\n >>> print(list(computer.closure(topic)))\n [Synset('computer_science.n.01')]\n\n UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2\n\n\n Include redundant paths (but only once), avoiding duplicate searches\n (from 'animal.n.01' to 'entity.n.01'):\n\n >>> dog = wn.synset('dog.n.01')\n >>> hyp = lambda s:s.hypernyms()\n >>> print(list(dog.closure(hyp)))\n [Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\\\n Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\\\n Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\\\n Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\\\n Synset('physical_entity.n.01'), Synset('entity.n.01')]\n\n UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7\n ", "language": "en", "n_whitespaces": 201, "n_words": 88, "vocab_size": 69 }
def closure(self, rel, depth=-1): from nltk.util import acyclic_breadth_first for synset in acyclic_breadth_first(self, rel, depth): if synset != self: yield synset from nltk.util import acyclic_depth_first as acyclic_tree from nltk.util import unweighted_minimum_spanning_tree as mst # Also add this shortcut? # from nltk.util import unweighted_minimum_spanning_digraph as umsd
20,505
101,068
157
plugins/convert/writer/opencv.py
46
13
def _get_save_args(self) -> Tuple[int, ...]: filetype = self
Convert: Add option to output mask separately for draw-transparent
_get_save_args
049314429f71a21e6595e9d27e9e36f6a3479c42
faceswap
opencv.py
11
18
https://github.com/deepfakes/faceswap.git
5
98
0
31
165
Python
{ "docstring": " Obtain the save parameters for the file format.\n\n Returns\n -------\n tuple\n The OpenCV specific arguments for the selected file format\n ", "language": "en", "n_whitespaces": 61, "n_words": 20, "vocab_size": 16 }
def _get_save_args(self) -> Tuple[int, ...]: filetype = self.config["format"] args: Tuple[int, ...] = tuple() if filetype == "jpg" and self.config["jpg_quality"] > 0: args = (cv2.IMWRITE_JPEG_QUALITY, # pylint: disable=no-member self.config["jpg_quality"]) if filetype == "png" and self.config["png_compress_level"] > -1: args = (cv2.IMWRITE_PNG_COMPRESSION, # pylint: disable=no-member self.config["png_compress_level"]) logger.debug(args) return args
22,598
107,133
32
lib/matplotlib/figure.py
7
7
def set_constrained_layout_pads(self, **kwargs): if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine): self.
ENH: implement and use base layout_engine for more flexible layout.
set_constrained_layout_pads
ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22
matplotlib
figure.py
11
3
https://github.com/matplotlib/matplotlib.git
2
32
0
7
55
Python
{ "docstring": "\n Set padding for ``constrained_layout``.\n\n Tip: The parameters can be passed from a dictionary by using\n ``fig.set_constrained_layout(**pad_dict)``.\n\n See :doc:`/tutorials/intermediate/constrainedlayout_guide`.\n\n Parameters\n ----------\n w_pad : float, default: :rc:`figure.constrained_layout.w_pad`\n Width padding in inches. This is the pad around Axes\n and is meant to make sure there is enough room for fonts to\n look good. Defaults to 3 pts = 0.04167 inches\n\n h_pad : float, default: :rc:`figure.constrained_layout.h_pad`\n Height padding in inches. Defaults to 3 pts.\n\n wspace : float, default: :rc:`figure.constrained_layout.wspace`\n Width padding between subplots, expressed as a fraction of the\n subplot width. The total padding ends up being w_pad + wspace.\n\n hspace : float, default: :rc:`figure.constrained_layout.hspace`\n Height padding between subplots, expressed as a fraction of the\n subplot width. The total padding ends up being h_pad + hspace.\n\n ", "language": "en", "n_whitespaces": 291, "n_words": 122, "vocab_size": 74 }
def set_constrained_layout_pads(self, **kwargs): if isinstance(self.get_layout_engine(), ConstrainedLayoutEngine): self.get_layout_engine().set(**kwargs)
35,062
151,595
85
freqtrade/freqai/freqai_interface.py
32
6
def track_current_candle(self): if self.dd.current_candle > self.current_candle: self.get_corr_dataframes = True
start tracking the current candle in FreqAI, add robustness to corr_df caching and inference timer, add test for cache corr_df
track_current_candle
255eb71270991fe480cd642ee5ea2ce69964f8a9
freqtrade
freqai_interface.py
10
5
https://github.com/freqtrade/freqtrade.git
2
36
0
28
62
Python
{ "docstring": "\n Checks if the latest candle appended by the datadrawer is\n equivalent to the latest candle seen by FreqAI. If not, it\n asks to refresh the cached corr_dfs, and resets the pair\n counter.\n ", "language": "en", "n_whitespaces": 68, "n_words": 32, "vocab_size": 24 }
def track_current_candle(self): if self.dd.current_candle > self.current_candle: self.get_corr_dataframes = True self.pair_it = 0 self.current_candle = self.dd.current_candle # Following methods which are overridden by user made prediction models. # See freqai/prediction_models/CatboostPredictionModel.py for an example.
13,215
63,228
76
.venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py
26
8
def find(self, req): dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX a
upd; format
find
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
__init__.py
9
5
https://github.com/jindongwang/transferlearning.git
3
40
0
22
64
Python
{ "docstring": "Find a distribution matching requirement `req`\n\n If there is an active distribution for the requested project, this\n returns it as long as it meets the version requirement specified by\n `req`. But, if there is an active distribution for the project and it\n does *not* meet the `req` requirement, ``VersionConflict`` is raised.\n If there is no active distribution for the requested project, ``None``\n is returned.\n ", "language": "en", "n_whitespaces": 114, "n_words": 64, "vocab_size": 38 }
def find(self, req): dist = self.by_key.get(req.key) if dist is not None and dist not in req: # XXX add more info raise VersionConflict(dist, req) return dist
25,019
113,746
161
nni/mutable/frozen.py
36
13
def current() -> dict | None: try: ContextStack.top(_FROZEN_CONTEXT_KEY) sample: Sample = {} for ctx in ContextStack
Mutable equal, frozen context, new labels (#5247)
current
a67180283b8d273b19f6a3497c6b898ab0c97b7d
nni
frozen.py
15
19
https://github.com/microsoft/nni.git
4
61
0
35
106
Python
{ "docstring": "Retrieve the current frozen context.\n If multiple layers have been found, they would be merged from bottom to top.\n\n Returns\n -------\n The sample in frozen context.\n If no sample is found, return none.\n ", "language": "en", "n_whitespaces": 75, "n_words": 33, "vocab_size": 28 }
def current() -> dict | None: try: ContextStack.top(_FROZEN_CONTEXT_KEY) sample: Sample = {} for ctx in ContextStack.stack(_FROZEN_CONTEXT_KEY): if not isinstance(ctx, dict): raise TypeError(f'Expect architecture to be a dict, found: {ctx}') sample.update(ctx) return sample except NoContextError: return None
44
70
371
packages/syft/tests/syft/core/tensor/tensor_serde_test.py
230
37
def test_rept_child() -> None: rows = 10_000 cols = 7 rept_row_count = 5 # these times and sizes are based on the above constants # and Madhavas MacBook Pro 2019 expected_rept_mem_size = 4.010650634765625 expected_rept_ser_size = 7.4926300048828125 macbook_pro_2019_ser_time = 0.18791760900000032 macbook_pro_2019_de_time = 0.1726598199999998 sept = make_sept(rows=rows, cols=cols) rept_rows = [sept.copy() for i in range(rept_row_count)] rept = REPT(rows=rept_rows) start = timeit.default_timer() ser = sy.serialize(rept, to_bytes=True) end = timeit.default_timer() time_ser = end - start start = timeit.default_timer() de = sy.deserialize(ser, from_byte
Started DPTensor resource optimization - Added initial REPT and SEPT benchmarking tests - Deleted unused old Tensor classes - Added pympler for memory size tests Co-authored-by: @IshanMi Co-authored-by: @rasswanth-s
test_rept_child
10ae1d589044a6ae4722ead7aedc63fcdc4923b5
PySyft
tensor_serde_test.py
10
41
https://github.com/OpenMined/PySyft.git
2
278
0
132
501
Python
{ "docstring": "We need to benchmark both the size and time to serialize and deserialize REPTs", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 12 }
def test_rept_child() -> None: rows = 10_000 cols = 7 rept_row_count = 5 # these times and sizes are based on the above constants # and Madhavas MacBook Pro 2019 expected_rept_mem_size = 4.010650634765625 expected_rept_ser_size = 7.4926300048828125 macbook_pro_2019_ser_time = 0.18791760900000032 macbook_pro_2019_de_time = 0.1726598199999998 sept = make_sept(rows=rows, cols=cols) rept_rows = [sept.copy() for i in range(rept_row_count)] rept = REPT(rows=rept_rows) start = timeit.default_timer() ser = sy.serialize(rept, to_bytes=True) end = timeit.default_timer() time_ser = end - start start = timeit.default_timer() de = sy.deserialize(ser, from_bytes=True) end = timeit.default_timer() time_de = end - start assert rept == de current_rept_mem_size = size(rept) mem_diff = (current_rept_mem_size / expected_rept_mem_size * 100) - 100 current_rept_bytes_size = size(ser) bytes_diff = (current_rept_bytes_size / expected_rept_ser_size * 100) - 100 ser_time_diff = (time_ser / macbook_pro_2019_ser_time * 100) - 100 de_time_diff = (time_de / macbook_pro_2019_de_time * 100) - 100 print("REPT Stats") print("==========") print("In-memory size of REPT", size(rept)) print("Serialized size of REPT", size(ser)) print(f"Serializing {rept_row_count}x{rows}x{cols} took {time_ser} secs") print(f"Deserializing {rept_row_count}x{rows}x{cols} took {time_de} secs") print("Current Results") print("===============") print(f"In-memory size delta: {mem_diff}%") print(f"Serialized size delta: {bytes_diff}%") print(f"Serializing time delta: {ser_time_diff}%") print(f"Deserializing time delta: {de_time_diff}%") # we want to assert that our calculated values are smaller than the old values with # some tolerance assert (current_rept_mem_size - expected_rept_mem_size) < 1e-3 assert (current_rept_bytes_size - expected_rept_ser_size) < 2e-2 # TODO: make time benchmarks stable (probably can't run in parallel) # assert (time_ser - macbook_pro_2019_ser_time) < 2e-1 # assert (time_de - macbook_pro_2019_de_time) < 2e-1
48,294
197,037
798
sympy/ntheory/ecm.py
319
56
def _ecm_one_factor(n, B1=10000, B2=100000, max_curve=200): n = as_int(n) if B1 % 2 != 0 or B2 % 2 != 0: raise ValueError("The Bounds should be an even integer") sieve.extend(B2) if isprime(n): return n from sympy.functions.elementary.miscellaneous import sqrt from sympy.polys.polytools import gcd curve = 0 D = int(sqrt(B2)) beta = [0]*(D + 1) S = [0]*(D + 1) k = 1 for p in sieve.primerange(1, B1 + 1): k *= pow(p, integer_log(B1, p)[0]) while(curve <= max_curve): curve += 1 #Suyama's Paramatrization sigma = rgen.randint(6, n - 1) u = (sigma*sigma - 5) % n v = (4*sigma) % n diff = v - u u_3 = pow(u, 3, n) try: C = (pow(diff, 3, n)*(3*u + v)*mod_inverse(4*u_3*v, n) - 2) % n except ValueError: #If the mod_inverse(4*u_3*v, n) doesn't exist return gcd(4*u_3*v, n) a24 = (C + 2)*mod_inverse(4, n) % n Q = Point(u_3, pow(v, 3, n), a24, n) Q = Q.mont_ladder(k) g = gcd(Q.z_cord, n) #Stage 1 factor if g != 1 and g != n: return g #Stage 1 failure. Q.z = 0, Try another curve
Refactored import ordering in functions
_ecm_one_factor
e0dc14eca132f37c5f49369eb4051eae37c9b119
sympy
ecm.py
19
58
https://github.com/sympy/sympy.git
15
615
0
170
933
Python
{ "docstring": "Returns one factor of n using\n Lenstra's 2 Stage Elliptic curve Factorization\n with Suyama's Parameterization. Here Montgomery\n arithmetic is used for fast computation of addition\n and doubling of points in elliptic curve.\n\n This ECM method considers elliptic curves in Montgomery\n form (E : b*y**2*z = x**3 + a*x**2*z + x*z**2) and involves\n elliptic curve operations (mod N), where the elements in\n Z are reduced (mod N). Since N is not a prime, E over FF(N)\n is not really an elliptic curve but we can still do point additions\n and doubling as if FF(N) was a field.\n\n Stage 1 : The basic algorithm involves taking a random point (P) on an\n elliptic curve in FF(N). The compute k*P using Montgomery ladder algorithm.\n Let q be an unknown factor of N. Then the order of the curve E, |E(FF(q))|,\n might be a smooth number that divides k. Then we have k = l * |E(FF(q))|\n for some l. For any point belonging to the curve E, |E(FF(q))|*P = O,\n hence k*P = l*|E(FF(q))|*P. Thus kP.z_cord = 0 (mod q), and the unknownn\n factor of N (q) can be recovered by taking gcd(kP.z_cord, N).\n\n Stage 2 : This is a continuation of Stage 1 if k*P != O. The idea utilize\n the fact that even if kP != 0, the value of k might miss just one large\n prime divisor of |E(FF(q))|. In this case we only need to compute the\n scalar multiplication by p to get p*k*P = O. Here a second bound B2\n restrict the size of possible values of p.\n\n Parameters\n ==========\n\n n : Number to be Factored\n B1 : Stage 1 Bound\n B2 : Stage 2 Bound\n max_curve : Maximum number of curves generated\n\n References\n ==========\n\n .. [1] Carl Pomerance and Richard Crandall \"Prime Numbers:\n A Computational Perspective\" (2nd Ed.), page 344\n ", "language": "en", "n_whitespaces": 407, "n_words": 303, "vocab_size": 187 }
def _ecm_one_factor(n, B1=10000, B2=100000, max_curve=200): n = as_int(n) if B1 % 2 != 0 or B2 % 2 != 0: raise ValueError("The Bounds should be an even integer") sieve.extend(B2) if isprime(n): return n from sympy.functions.elementary.miscellaneous import sqrt from sympy.polys.polytools import gcd curve = 0 D = int(sqrt(B2)) beta = [0]*(D + 1) S = [0]*(D + 1) k = 1 for p in sieve.primerange(1, B1 + 1): k *= pow(p, integer_log(B1, p)[0]) while(curve <= max_curve): curve += 1 #Suyama's Paramatrization sigma = rgen.randint(6, n - 1) u = (sigma*sigma - 5) % n v = (4*sigma) % n diff = v - u u_3 = pow(u, 3, n) try: C = (pow(diff, 3, n)*(3*u + v)*mod_inverse(4*u_3*v, n) - 2) % n except ValueError: #If the mod_inverse(4*u_3*v, n) doesn't exist return gcd(4*u_3*v, n) a24 = (C + 2)*mod_inverse(4, n) % n Q = Point(u_3, pow(v, 3, n), a24, n) Q = Q.mont_ladder(k) g = gcd(Q.z_cord, n) #Stage 1 factor if g != 1 and g != n: return g #Stage 1 failure. Q.z = 0, Try another curve elif g == n: continue #Stage 2 - Improved Standard Continuation S[1] = Q.double() S[2] = S[1].double() beta[1] = (S[1].x_cord*S[1].z_cord) % n beta[2] = (S[2].x_cord*S[2].z_cord) % n for d in range(3, D + 1): S[d] = S[d - 1].add(S[1], S[d - 2]) beta[d] = (S[d].x_cord*S[d].z_cord) % n g = 1 B = B1 - 1 T = Q.mont_ladder(B - 2*D) R = Q.mont_ladder(B) for r in range(B, B2, 2*D): alpha = (R.x_cord*R.z_cord) % n for q in sieve.primerange(r + 2, r + 2*D + 1): delta = (q - r) // 2 f = (R.x_cord - S[d].x_cord)*(R.z_cord + S[d].z_cord) -\ alpha + beta[delta] g = (g*f) % n #Swap T, R = R, R.add(S[D], T) g = gcd(n, g) #Stage 2 Factor found if g != 1 and g != n: return g #ECM failed, Increase the bounds raise ValueError("Increase the bounds")
107,219
308,463
386
tests/components/command_line/test_cover.py
78
11
async def test_unique_id(hass): await setup_test_entity( hass, { "unique": { "command_open": "echo open", "command_close": "echo close", "command_stop": "echo stop", "u
Add unique_id configuration variable to command_line integration (#58596)
test_unique_id
d26275011ae4e8ba0a8dcdc2a7ef81b5911d3900
core
test_cover.py
13
32
https://github.com/home-assistant/core.git
1
138
0
38
264
Python
{ "docstring": "Test unique_id option and if it only creates one cover per id.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
async def test_unique_id(hass): await setup_test_entity( hass, { "unique": { "command_open": "echo open", "command_close": "echo close", "command_stop": "echo stop", "unique_id": "unique", }, "not_unique_1": { "command_open": "echo open", "command_close": "echo close", "command_stop": "echo stop", "unique_id": "not-so-unique-anymore", }, "not_unique_2": { "command_open": "echo open", "command_close": "echo close", "command_stop": "echo stop", "unique_id": "not-so-unique-anymore", }, }, ) assert len(hass.states.async_all()) == 2 ent_reg = entity_registry.async_get(hass) assert len(ent_reg.entities) == 2 assert ent_reg.async_get_entity_id("cover", "command_line", "unique") is not None assert ( ent_reg.async_get_entity_id("cover", "command_line", "not-so-unique-anymore") is not None )
@pytest.fixture
88,829
289,693
110
tests/components/mqtt/test_config_flow.py
28
7
def mock_ssl_context(): with patch( "homeassistant.components.mqtt.config_flow.SSLC
Move advanced MQTT options to entry (#79351) * Move advanced broker settings to entry * Add repair issue for deprecated settings * Split CONFIG_SCHEMA * Do not store certificate UI flags in entry * Keep entered password in next dialog * Do not process yaml config in flow * Correct typo
mock_ssl_context
5e7f571f019c0b992b9cb8ffa545c12e8169d395
core
test_config_flow.py
11
13
https://github.com/home-assistant/core.git
1
42
1
20
92
Python
{ "docstring": "Mock the SSL context used to load the cert chain and to load verify locations.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 12 }
def mock_ssl_context(): with patch( "homeassistant.components.mqtt.config_flow.SSLContext" ) as mock_context, patch( "homeassistant.components.mqtt.config_flow.load_pem_private_key" ) as mock_key_check, patch( "homeassistant.components.mqtt.config_flow.load_pem_x509_certificate" ) as mock_cert_check: yield { "context": mock_context, "load_pem_x509_certificate": mock_cert_check, "load_pem_private_key": mock_key_check, } @pytest.fixture
7,671
42,640
274
tests/cli/commands/test_task_command.py
21
13
def test_task_states_for_dag_run_when_dag_run_not_exists(self): with pytest.raises(DagRunNotFound): default_date2 = t
Replaced all days_ago functions with datetime functions (#23237) Co-authored-by: Dev232001 <[email protected]>
test_task_states_for_dag_run_when_dag_run_not_exists
f352ee63a5d09546a7997ba8f2f8702a1ddb4af7
airflow
test_task_command.py
14
15
https://github.com/apache/airflow.git
1
56
0
20
97
Python
{ "docstring": "\n task_states_for_dag_run should return an AirflowException when invalid dag id is passed\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
def test_task_states_for_dag_run_when_dag_run_not_exists(self): with pytest.raises(DagRunNotFound): default_date2 = timezone.datetime(2016, 1, 9) task_command.task_states_for_dag_run( self.parser.parse_args( [ 'tasks', 'states-for-dag-run', 'not_exists_dag', default_date2.isoformat(), '--output', "json", ] ) )
15,993
73,217
29
wagtail/contrib/modeladmin/tests/test_page_modeladmin.py
8
5
def test_title_present(self): response = self.get(4) self.assertConta
Reformat with black
test_title_present
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_page_modeladmin.py
8
3
https://github.com/wagtail/wagtail.git
1
24
0
8
42
Python
{ "docstring": "\n The page title should appear three times. Once in the header, and two times\n in the field listing (as the actual title and as the draft title)\n ", "language": "en", "n_whitespaces": 49, "n_words": 27, "vocab_size": 21 }
def test_title_present(self): response = self.get(4) self.assertContains(response, "Christmas", 3)
22,669
107,267
121
lib/matplotlib/axes/_base.py
30
16
def _set_position(self, pos, which='both'): i
Fix typos
_set_position
f7e4349b6c20d127e88a8f750fe1df7462350971
matplotlib
_base.py
12
9
https://github.com/matplotlib/matplotlib.git
5
85
0
23
143
Python
{ "docstring": "\n Private version of set_position.\n\n Call this internally to get the same functionality of `set_position`,\n but not to take the axis out of the constrained_layout hierarchy.\n ", "language": "en", "n_whitespaces": 54, "n_words": 25, "vocab_size": 20 }
def _set_position(self, pos, which='both'): if not isinstance(pos, mtransforms.BboxBase): pos = mtransforms.Bbox.from_bounds(*pos) for ax in self._twinned_axes.get_siblings(self): if which in ('both', 'active'): ax._position.set(pos) if which in ('both', 'original'): ax._originalPosition.set(pos) self.stale = True