complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
2
7
def row_swap(self, i, j): for k in range(0, self.cols): self[i, k], self[j, k] = self[j, k], self[i, k]
sympy/matrices/repmatrix.py
69
sympy
{ "docstring": "Swap the two given rows of the matrix in-place.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> M = Matrix([[0, 1], [1, 0]])\n >>> M\n Matrix([\n [0, 1],\n [1, 0]])\n >>> M.row_swap(0, 1)\n >>> M\n Matrix([\n [1, 0],\n [0, 1]])\n\n See Also\n ========\n\n row\n col_swap\n ", "language": "en", "n_whitespaces": 171, "n_words": 45, "vocab_size": 31 }
18
Python
14
59d22b6bb7287613d598611027f640d068ca5748
repmatrix.py
196,394
3
49
row_swap
https://github.com/sympy/sympy.git
Moved imports to higher level
43
0
47,894
10
1
17
def setup_method(self): self.simple_graph = nx.complete_bipartite_graph(2, 3) self.simple_solution = {0: 2, 1: 3, 2: 0, 3: 1} edges = [(0, 7), (0, 8), (2, 6), (2, 9), (3, 8), (4, 8), (4, 9), (5, 11)] self.top_nodes = set(range(6)) self.graph = nx.Graph() self.graph.add_nodes_from(range(12)) self.graph.add_edges_from(edges) # Example bipartite graph from issue 2127 G = nx.Graph() G.add_nodes_from( [ (1, "C"), (1, "B"), (0, "G"), (1, "F"), (1, "E"), (0, "C"), (1, "D"), (1, "I"), (0, "A"), (0, "D"), (0, "F"), (0, "E"), (0, "H"), (1, "G"), (1, "A"), (0, "I"), (0, "B"), (1, "H"), ] ) G.add_edge((1, "C"), (0, "A")) G.add_edge((1, "B"), (0, "A")) G.add_edge((0, "G"), (1, "I")) G.add_edge((0, "G"), (1, "H")) G.add_edge((1, "F"), (0, "A")) G.add_edge((1, "F"), (0, "C")) G.add_edge((1, "F"), (0, "E")) G.add_edge((1, "E"), (0, "A")) G.add_edge((1, "E"), (0, "C")) G.add_edge((0, "C"), (1, "D")) G.add_edge((0, "C"), (1, "I")) G.add_edge((0, "C"), (1, "G")) G.add_edge((0, "C"), (1, "H")) G.add_edge((1, "D"), (0, "A")) G.add_edge((1, "I"), (0, "A")) G.add_edge((1, "I"), (0, "E")) G.add_edge((0, "A"), (1, "G")) G.add_edge((0, "A"), (1, "H")) G.add_edge((0, "E"), (1, "G")) G.add_edge((0, "E"), (1, "H")) self.disconnected_graph = G
networkx/algorithms/bipartite/tests/test_matching.py
901
networkx
{ "docstring": "Creates a bipartite graph for use in testing matching algorithms.\n\n The bipartite graph has a maximum cardinality matching that leaves\n vertex 1 and vertex 10 unmatched. The first six numbers are the left\n vertices and the next six numbers are the right vertices.\n\n ", "language": "en", "n_whitespaces": 71, "n_words": 43, "vocab_size": 31 }
175
Python
65
6ef8b9986ad9a8bc79a4a6640a8f9ee285b67a7b
test_matching.py
177,423
52
576
setup_method
https://github.com/networkx/networkx.git
Update pytest (#6165)
698
0
42,374
10
7
16
def dispatch_call(self, frame, arg): # XXX 'arg' is no longer used if self.botframe is None: # First call of dispatch since reset() self.botframe = frame.f_back # (CT) Note that this may also be None! return self.trace_dispatch if not (self.stop_here(frame) or self.break_anywhere(frame)): # No need to trace this function return # None # Ignore call events in generator except when stepping. if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS: return self.trace_dispatch self.user_call(frame, arg) if self.quitting: raise BdbQuit return self.trace_dispatch
python3.10.4/Lib/bdb.py
137
XX-Net
{ "docstring": "Invoke user function and return trace function for call event.\n\n If the debugger stops on this function call, invoke\n self.user_call(). Raise BdbQuit if self.quitting is set.\n Return self.trace_dispatch to continue tracing in this scope.\n ", "language": "en", "n_whitespaces": 62, "n_words": 34, "vocab_size": 31 }
76
Python
59
8198943edd73a363c266633e1aa5b2a9e9c9f526
bdb.py
221,115
11
83
dispatch_call
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
205
0
56,216
10
2
22
def subprocess_run_helper(func, *args, timeout, extra_env=None): target = func.__name__ module = func.__module__ proc = subprocess.run( [sys.executable, "-c", f"from {module} import {target}; {target}()", *args], env={**os.environ, "SOURCE_DATE_EPOCH": "0", **(extra_env or {})}, timeout=timeout, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) return proc
lib/matplotlib/testing/__init__.py
151
matplotlib
{ "docstring": "\n Run a function in a sub-process.\n\n Parameters\n ----------\n func : function\n The function to be run. It must be in a module that is importable.\n *args : str\n Any additional command line arguments to be passed in\n the first argument to ``subprocess.run``.\n extra_env : dict[str, str]\n Any additional environment variables to be set for the subprocess.\n ", "language": "en", "n_whitespaces": 107, "n_words": 56, "vocab_size": 39 }
35
Python
32
031093e6f05496f55616a1fa2f39e573fea02828
__init__.py
108,485
14
92
subprocess_run_helper
https://github.com/matplotlib/matplotlib.git
Tweak subprocess_run_helper. On general grounds, an API like `subprocess_run_helper(func, *args, timeout, **extra_env)` is problematic because it prevents one from passing an environment variable called "timeout". Instead, pass the extra environment variables as a dict, without unpacking. (Technically this has been released in 3.5.2 as public API, but 1) I'm not really sure it should have been a public API to start with (should we deprecate it and make it private?), and 2) hopefully tweaking that in 3.5.3 with no deprecation is not going to disrupt anyone... I can still put in a changelog entry if that's preferred.)
116
0
23,212
14
4
9
def getManhattanDistance(self): ans = 0 for i in range(self.size): for j in range(self.size): if self.state[i][j] != 0: ans = ( ans + abs((self.state[i][j] - 1) % self.size - j) + abs((self.state[i][j] - 1) // self.size - i) ) return ans
Eight_Puzzle_Solver/eight_puzzle.py
146
Python
{ "docstring": "\n Parameters: State\n Returns: Manhattan Distance between Current State and Goal State\n Restrictions: State must be a self.size x self.size Array\n ", "language": "en", "n_whitespaces": 49, "n_words": 20, "vocab_size": 16 }
40
Python
26
f0af0c43340763724f139fa68aa1e5a9ffe458b4
eight_puzzle.py
22,421
11
89
getManhattanDistance
https://github.com/geekcomputers/Python.git
refactor: clean code Signed-off-by: slowy07 <[email protected]>
201
0
4,326
24
2
6
def _on_source_file_changed(self) -> None: if self._run_on_save: self.request_rerun(self._client_state) else: self._enqueue_file_change_message()
lib/streamlit/app_session.py
50
streamlit
{ "docstring": "One of our source files changed. Schedule a rerun if appropriate.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
9
Python
9
ee09d5da0986357dccdae9d0fff50c3dab3b40cf
app_session.py
118,779
6
28
_on_source_file_changed
https://github.com/streamlit/streamlit.git
ScriptRunner + AppSession type annotations (#4376) Adds missing type annotations in `script_runner.py` and `app_session.py`. No behavior changes.
52
0
26,419
10
10
27
def __new__(cls, partition, integer=None): if integer is not None: integer, partition = partition, integer if isinstance(partition, (dict, Dict)): _ = [] for k, v in sorted(list(partition.items()), reverse=True): if not v: continue k, v = as_int(k), as_int(v) _.extend([k]*v) partition = tuple(_) else: partition = tuple(sorted(map(as_int, partition), reverse=True)) sum_ok = False if integer is None: integer = sum(partition) sum_ok = True else: integer = as_int(integer) if not sum_ok and sum(partition) != integer: raise ValueError("Partition did not add to %s" % integer) if any(i < 1 for i in partition): raise ValueError("All integer summands must be greater than one") obj = Basic.__new__(cls, Integer(integer), Tuple(*partition)) obj.partition = list(partition) obj.integer = integer return obj
sympy/combinatorics/partitions.py
337
sympy
{ "docstring": "\n Generates a new IntegerPartition object from a list or dictionary.\n\n Explanation\n ===========\n\n The partition can be given as a list of positive integers or a\n dictionary of (integer, multiplicity) items. If the partition is\n preceded by an integer an error will be raised if the partition\n does not sum to that given integer.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.partitions import IntegerPartition\n >>> a = IntegerPartition([5, 4, 3, 1, 1])\n >>> a\n IntegerPartition(14, (5, 4, 3, 1, 1))\n >>> print(a)\n [5, 4, 3, 1, 1]\n >>> IntegerPartition({1:3, 2:1})\n IntegerPartition(5, (2, 1, 1, 1))\n\n If the value that the partition should sum to is given first, a check\n will be made to see n error will be raised if there is a discrepancy:\n\n >>> IntegerPartition(10, [5, 4, 3, 1])\n Traceback (most recent call last):\n ...\n ValueError: The partition is not valid\n\n ", "language": "en", "n_whitespaces": 307, "n_words": 138, "vocab_size": 80 }
109
Python
69
24f1e7730119fe958cc8e28411f790c9a5ec04eb
partitions.py
200,380
27
210
__new__
https://github.com/sympy/sympy.git
Fix various typos Found via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`
374
0
49,610
15
4
9
def _deprecate_ci(errorbar, ci): if ci != "deprecated": if ci is None: errorbar = None elif ci == "sd": errorbar = "sd" else: errorbar = ("ci", ci) msg = ( "\n\nThe `ci` parameter is deprecated. " f"Use `errorbar={repr(errorbar)}` for the same effect.\n" ) warnings.warn(msg, FutureWarning, stacklevel=3) return errorbar
seaborn/utils.py
117
seaborn
{ "docstring": "\n Warn on usage of ci= and convert to appropriate errorbar= arg.\n\n ci was deprecated when errorbar was added in 0.12. It should not be removed\n completely for some time, but it can be moved out of function definitions\n (and extracted from kwargs) after one cycle.\n\n ", "language": "en", "n_whitespaces": 61, "n_words": 45, "vocab_size": 42 }
47
Python
37
26bf4b3b645edc405ca52b533b8d68273aeba7d1
utils.py
41,875
14
59
_deprecate_ci
https://github.com/mwaskom/seaborn.git
Housekeeping on relational plot parameters (#2855) * Do some housekeeping on lineplot ci deprecation * Remove some unused parameters from scatterplot * Remove incorrect statement from relplot docstring * Update lineplot ci= deprecation test
153
0
7,451
14
3
18
def start(self, tag, attrib={}, **extra): self.__flush() tag = _escape_cdata(tag) self.__data = [] self.__tags.append(tag) self.__write(self.__indentation[:len(self.__tags) - 1]) self.__write("<%s" % tag) for k, v in {**attrib, **extra}.items(): if v: k = _escape_cdata(k) v = _quote_escape_attrib(v) self.__write(' %s=%s' % (k, v)) self.__open = 1 return len(self.__tags) - 1
lib/matplotlib/backends/backend_svg.py
206
matplotlib
{ "docstring": "\n Open a new element. Attributes can be given as keyword\n arguments, or as a string/string dictionary. The method returns\n an opaque identifier that can be passed to the :meth:`close`\n method, to close all open elements up to and including this one.\n\n Parameters\n ----------\n tag\n Element tag.\n attrib\n Attribute dictionary. Alternatively, attributes can be given as\n keyword arguments.\n\n Returns\n -------\n An element identifier.\n ", "language": "en", "n_whitespaces": 182, "n_words": 62, "vocab_size": 50 }
45
Python
37
ec410abbb3a721e31f3aaa61e9e4f941467e35e1
backend_svg.py
108,142
14
126
start
https://github.com/matplotlib/matplotlib.git
Deprecate functions in backends
171
0
23,076
13
2
14
def parse_wheel(wheel_zip, name): # type: (ZipFile, str) -> Tuple[str, Message] try: info_dir = wheel_dist_info_dir(wheel_zip, name) metadata = wheel_metadata(wheel_zip, info_dir) version = wheel_version(metadata) except UnsupportedWheel as e: raise UnsupportedWheel("{} has an invalid wheel, {}".format(name, str(e))) check_compatibility(version, name) return info_dir, metadata
.venv/lib/python3.8/site-packages/pip/_internal/utils/wheel.py
103
transferlearning
{ "docstring": "Extract information from the provided wheel, ensuring it meets basic\n standards.\n\n Returns the name of the .dist-info directory and the parsed WHEEL metadata.\n ", "language": "en", "n_whitespaces": 32, "n_words": 23, "vocab_size": 20 }
39
Python
35
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
wheel.py
61,340
9
62
parse_wheel
https://github.com/jindongwang/transferlearning.git
upd; format
85
0
12,522
14
1
3
def reduce(tensor, reduction="mean"):
src/accelerate/utils.py
20
accelerate
{ "docstring": "\n Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the\n mean of a given operation.\n\n Args:\n tensor (nested list/tuple/dictionary of `torch.Tensor`):\n The data to reduce.\n reduction (`str`, *optional*, defaults to `\"mean\"`):\n A reduction method. Can be of \"mean\", \"sum\", or \"none\"\n\n Returns:\n The same data structure as `data` with all the tensors reduced.\n ", "language": "en", "n_whitespaces": 119, "n_words": 60, "vocab_size": 45 }
3
Python
3
5f433673e1bfc7588f8899b1ddf15c85bd630410
utils.py
337,446
3
27
reduce
https://github.com/huggingface/accelerate.git
Introduce reduce operator (#326) Co-authored-by: Sylvain Gugger <[email protected]>
6
0
121,056
6
2
2
def parameterize_with_task_runners(*values):
tests/test_task_runners.py
15
prefect
{ "docstring": "\n Generates a `pytest.mark.parametrize` instance for the `task_runner` indirect\n fixture.\n\n Passes marks from the fixtures to the parameter so we can indicate required services\n on each task runner fixture.\n ", "language": "en", "n_whitespaces": 44, "n_words": 28, "vocab_size": 25 }
2
Python
2
dc0f9feb764c72620a68ca139eb56e43f6e5f068
test_task_runners.py
53,897
8
37
parameterize_with_task_runners
https://github.com/PrefectHQ/prefect.git
Add service marks to task runner tests
5
0
10,949
6
2
14
def test_login_required(self, view_url="/login_required/", login_url=None): if login_url is None: login_url = settings.LOGIN_URL response = self.client.get(view_url) self.assertEqual(response.status_code, 302) self.assertIn(login_url, response.url) self.login() response = self.client.get(view_url) self.assertEqual(response.status_code, 200)
tests/auth_tests/test_decorators.py
127
django
{ "docstring": "\n login_required works on a simple view wrapped in a login_required\n decorator.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 9 }
24
Python
18
9c19aff7c7561e3a82978a272ecdaad40dda5c00
test_decorators.py
201,206
9
79
test_login_required
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
91
0
49,901
9
1
14
def get_estimator(self) -> BaseEstimator: with self.as_directory() as checkpoint_path: estimator_path = os.path.join(checkpoint_path, MODEL_KEY) with open(estimator_path, "rb") as f: return cpickle.load(f)
python/ray/train/sklearn/sklearn_checkpoint.py
83
ray
{ "docstring": "Retrieve the ``Estimator`` stored in this checkpoint.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
19
Python
17
ac1d21027da8a8c002cc7c28b8d1dc89c0d72fcf
sklearn_checkpoint.py
125,333
6
46
get_estimator
https://github.com/ray-project/ray.git
[AIR] Add framework-specific checkpoints (#26777)
70
0
27,838
13
1
21
def condition_score_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None): alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"]) eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, p_mean_var, **model_kwargs) out = p_mean_var.copy() out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps) out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t) return out
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/gaussian_diffusion.py
187
PaddleHub
{ "docstring": "\n Compute what the p_mean_variance output would have been, should the\n model's score function be conditioned by cond_fn.\n\n See condition_mean() for details on cond_fn.\n\n Unlike condition_mean(), this instead uses the conditioning strategy\n from Song et al (2020).\n ", "language": "en", "n_whitespaces": 79, "n_words": 36, "vocab_size": 33 }
46
Python
32
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
gaussian_diffusion.py
49,781
8
124
condition_score_with_grad
https://github.com/PaddlePaddle/PaddleHub.git
add disco_diffusion_cnclip_vitb16 module
102
0
9,905
12
2
8
def is_strongly_connected(G): if len(G) == 0: raise nx.NetworkXPointlessConcept( ) return len(list(strongly_connected_components(G))[0]) == len(G) @not_implemented_for("undirected")
networkx/algorithms/components/strongly_connected.py
80
@not_implemented_for("undirected")
networkx
{ "docstring": "Test directed graph for strong connectivity.\n\n A directed graph is strongly connected if and only if every vertex in\n the graph is reachable from every other vertex.\n\n Parameters\n ----------\n G : NetworkX Graph\n A directed graph.\n\n Returns\n -------\n connected : bool\n True if the graph is strongly connected, False otherwise.\n\n Examples\n --------\n >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 0), (2, 4), (4, 2)])\n >>> nx.is_strongly_connected(G)\n True\n >>> G.remove_edge(2, 3)\n >>> nx.is_strongly_connected(G)\n False\n\n Raises\n ------\n NetworkXNotImplemented\n If G is undirected.\n\n See Also\n --------\n is_weakly_connected\n is_semiconnected\n is_connected\n is_biconnected\n strongly_connected_components\n\n Notes\n -----\n For directed graphs only.\n Connectivity is undefined for the null graph.", "language": "en", "n_whitespaces": 211, "n_words": 104, "vocab_size": 73 }
14
Python
12
7cad29b3542ad867f1eb5b7b6a9087495f252749
strongly_connected.py
176,593
6
40
is_strongly_connected
https://github.com/networkx/networkx.git
Added examples in connected and strongly connected functions (#5559) * added examples * Update networkx/algorithms/components/connected.py Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Ross Barnowski <[email protected]>
48
1
41,992
13
15
48
def _resolve_project_threshold_config(self) -> SelectType: org_id = self.builder.params.get("organization_id") project_ids = self.builder.params.get("project_id") project_threshold_configs = ( ProjectTransactionThreshold.objects.filter( organization_id=org_id, project_id__in=project_ids, ) .order_by("project_id") .values_list("project_id", "threshold", "metric") ) transaction_threshold_configs = ( ProjectTransactionThresholdOverride.objects.filter( organization_id=org_id, project_id__in=project_ids, ) .order_by("project_id") .values_list("transaction", "project_id", "threshold", "metric") ) num_project_thresholds = project_threshold_configs.count() sentry_sdk.set_tag("project_threshold.count", num_project_thresholds) sentry_sdk.set_tag( "project_threshold.count.grouped", format_grouped_length(num_project_thresholds, [10, 100, 250, 500]), ) num_transaction_thresholds = transaction_threshold_configs.count() sentry_sdk.set_tag("txn_threshold.count", num_transaction_thresholds) sentry_sdk.set_tag( "txn_threshold.count.grouped", format_grouped_length(num_transaction_thresholds, [10, 100, 250, 500]), ) if ( num_project_thresholds + num_transaction_thresholds > constants.MAX_QUERYABLE_TRANSACTION_THRESHOLDS ): raise InvalidSearchQuery( f"Exceeded {constants.MAX_QUERYABLE_TRANSACTION_THRESHOLDS} configured transaction thresholds limit, try with fewer Projects." ) # Arrays need to have toUint64 casting because clickhouse will define the type as the narrowest possible type # that can store listed argument types, which means the comparison will fail because of mismatched types project_thresholds = {} project_threshold_config_keys = [] project_threshold_config_values = [] for project_id, threshold, metric in project_threshold_configs: metric = TRANSACTION_METRICS[metric] if ( threshold == constants.DEFAULT_PROJECT_THRESHOLD and metric == constants.DEFAULT_PROJECT_THRESHOLD_METRIC ): # small optimization, if the configuration is equal to the default, # we can skip it in the final query continue project_thresholds[project_id] = (metric, threshold) project_threshold_config_keys.append(Function("toUInt64", [project_id])) project_threshold_config_values.append((metric, threshold)) project_threshold_override_config_keys = [] project_threshold_override_config_values = [] for transaction, project_id, threshold, metric in transaction_threshold_configs: metric = TRANSACTION_METRICS[metric] if ( project_id in project_thresholds and threshold == project_thresholds[project_id][1] and metric == project_thresholds[project_id][0] ): # small optimization, if the configuration is equal to the project # configs, we can skip it in the final query continue elif ( project_id not in project_thresholds and threshold == constants.DEFAULT_PROJECT_THRESHOLD and metric == constants.DEFAULT_PROJECT_THRESHOLD_METRIC ): # small optimization, if the configuration is equal to the default # and no project configs were set, we can skip it in the final query continue transaction_id = self.resolve_tag_value(transaction) # Don't add to the config if we can't resolve it if transaction_id is None: continue project_threshold_override_config_keys.append( (Function("toUInt64", [project_id]), (Function("toUInt64", [transaction_id]))) ) project_threshold_override_config_values.append((metric, threshold)) project_threshold_config_index: SelectType = Function( "indexOf", [ project_threshold_config_keys, self.builder.column("project_id"), ], constants.PROJECT_THRESHOLD_CONFIG_INDEX_ALIAS, ) project_threshold_override_config_index: SelectType = Function( "indexOf", [ project_threshold_override_config_keys, (self.builder.column("project_id"), self.builder.column("transaction")), ], constants.PROJECT_THRESHOLD_OVERRIDE_CONFIG_INDEX_ALIAS, )
src/sentry/search/events/datasets/metrics.py
741
sentry
{ "docstring": "This is mostly duplicated code from the discover dataset version\n TODO: try to make this more DRY with the discover version\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 18 }
318
Python
165
e1b25d625b185588fc7c2834dff5ea5bb3a98ce0
metrics.py
93,967
117
513
_resolve_project_threshold_config
https://github.com/getsentry/sentry.git
fix(mep): Use project thresholds for apdex calculation (#37256) - Currently apdex is always based on the satisfaction tags in the transaction.duration metrics. This updates the apdex function so we read the threshold config, and use that to determine which metric we should read the satisfaction tags from instead
1,399
0
19,036
14
1
6
def set_dpi(self, val): self._parent.dpi = val self.stale = True
lib/matplotlib/figure.py
34
matplotlib
{ "docstring": "\n Set the resolution of parent figure in dots-per-inch.\n \n Parameters\n ----------\n val : float\n ", "language": "en", "n_whitespaces": 57, "n_words": 13, "vocab_size": 13 }
9
Python
8
e12db8dcf12d408cf8cc23e95ea16b99038a058a
figure.py
108,688
3
20
set_dpi
https://github.com/matplotlib/matplotlib.git
Add get/set methods for DPI in SubFigure This fixes the following error: matplotlib\lib\text.py line 1489, dop = self.figure.get_dpi()/72. AttributeError: 'SubFigure' object has no attribute 'get_dpi'. Effect: in v3.5.2 it is not possible to save a figure with a subfigure to a PDF.
30
0
23,305
8
2
13
def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"): from distutils.ccompiler import CompileError self._check_compiler() ok = True try: self._preprocess(body, headers, include_dirs, lang) except CompileError: ok = False self._clean() return ok
python3.10.4/Lib/distutils/command/config.py
100
XX-Net
{ "docstring": "Construct a source file from 'body' (a string containing lines\n of C/C++ code) and 'headers' (a list of header files to include)\n and run it through the preprocessor. Return true if the\n preprocessor succeeded, false if there were any errors.\n ('body' probably isn't of much use, but what the heck.)\n ", "language": "en", "n_whitespaces": 86, "n_words": 50, "vocab_size": 43 }
27
Python
24
8198943edd73a363c266633e1aa5b2a9e9c9f526
config.py
222,727
10
63
try_cpp
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
105
0
56,711
9
1
5
def test_weird_target_2(self): b = a = self.check(b, a)
python3.10.4/Lib/lib2to3/tests/test_fixers.py
35
XX-Net
{ "docstring": "\n try:\n pass\n except Exception, a.foo:\n pass\n try:\n pass\n except Exception as xxx_todo_changeme:\n a.foo = xxx_todo_changeme\n pass", "language": "en", "n_whitespaces": 135, "n_words": 16, "vocab_size": 11 }
8
Python
7
8198943edd73a363c266633e1aa5b2a9e9c9f526
test_fixers.py
218,929
13
19
test_weird_target_2
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
30
0
55,554
7
1
28
def test_summarization(self): model = FlaxLongT5ForConditionalGeneration.from_pretrained(self.model_path) tok = AutoTokenizer.from_pretrained(self.model_path) ARTICLE = dct = tok( [ARTICLE], max_length=1024, padding="max_length", truncation=True, return_tensors="np", ) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, do_sample=False, early_stopping=True, ).sequences decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertListEqual( self.expected_summary(), decoded, )
tests/models/longt5/test_modeling_flax_longt5.py
250
transformers
{ "docstring": "coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in developing world . \\n it provides an excellent resolution for visualization of the coronary arteries for catheter - based or operating interventions . \\n\n although the association of this technique with major complications such as mortality is highly uncommon , it is frequently associated with various cardiac and noncardiac complications . computed tomography ( ct ) coronary angiography is\n a promising technique for the evaluation of cad noninvasively . \\n it assesses disease within the coronary artery and provides qualitative and quantitative information about nonobstructive atherosclerotic plaque burden within the vessel\n wall . \\n thus , ct angiography - based disease evaluation may provide clinically more significant information than conventional angiography . the introduction of multi - slice computed tomography ( msct ) technology such as 64-slice , 12\n 8-slice , 256-slice , and now 320-slice msct has produced a high diagnostic accuracy of ct coronary angiography . \\n it has consistently showed to have a very high negative predictive value ( well above 90% ) in ruling out patients with s\n ignificant cad defined as coronary luminal stenosis of > 50% . \\n the american college of cardiology / american heart association recommends that coronary angiography should be performed before valve surgery in men aged > 40 years , women\n aged > 35 years with coronary risk factors and in postmenopausal women . \\n the prevalence of cad in patients undergoing valve replacement is 2040% in developed countries . in the previous studies , \\n the incidence of angiographically p\n roven cad in acquired valvular diseases has been shown to vary widely from 9% to 41% . in aortic stenosis , \\n we aimed to report the diagnostic performance of 128-slice ct coronary angiography in 50 patients undergoing for major noncoron\n ary cardiac surgery referred for diagnostic invasive coronary angiography to assess the extent and severity of coronary stenosis . \\n during january 2013 to december 2014 , we enrolled fifty major noncoronary cardiac surgery patients sche\n duled for invasive coronary angiography who fulfilled the following inclusion criteria of age 40 years , having low or intermediate probability of cad , left ventricular ejection fraction ( lvef ) > 35% , and patient giving informed conse\n nt for undergoing msct and conventional coronary angiography . \\n those having any contraindication for contrast injection , lvef < 35% , high pretest probability of cad , and hemodynamic instability were excluded from the study . \\n pati\n ents with heart rates of > 70 bpm received ( unless they had known overt heart failure or electrocardiogram ( ecg ) atrioventricular conduction abnormalities ) a single oral dose of 100 mg metoprolol 45 min before the scan . \\n patients w\n ith heart rates of > 80 bpm received an additional oral dose of metoprolol if not contraindicated . \\n all patients were scanned with a 128-slice ct scanner ( siemens , somatom definition as ) equipped with a new feature in msct technolog\n y , so - called z - axis flying - focus technology . \\n the central 32 detector rows acquire 0.6-mm slices , and the flying - focus spot switches back and forth between 2 z positions between each reading . \\n two slices per detector row a\n re acquired , which results in a higher oversampling rate in the z - axis , thereby reducing artifacts related to the spiral acquisition and improving spatial resolution down to 0.4 mm . \\n a bolus of 6580 ml contrast material ( omnipaque\n ) was injected through an arm vein at a flow rate of 5 ml / s . \\n a bolus tracking technique was used to synchronize the arrival of contrast in the coronary arteries with the initiation of the scan . to monitor the arrival of contrast m\n aterial , \\n axial scans were obtained at the level of the ascending aorta with a delay of 10 s after the start of the contrast injection . \\n the scan was automatically started when a threshold of 150 hounsfield units was reached in a re\n gion of interest positioned in the ascending aorta . \\n images were reconstructed with ecg gating to obtain optimal , motion - free image quality . \\n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a s\n ingle observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \\n all lesion , regardless of size , were included for comparison with ct coronary angiograp\n hy . \\n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \\n stenosis was evaluated in two orthogonal views and classified as significant if the mean\n lumen diameter reduction was 50% using a validated quantitative coronary angiography ( qca ) . \\n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiograp\n hy . \\n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \\n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of th\n e number , areas , and peak hounsfield units of the detected calcified lesions . \\n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \\n maximum intensity projections were\n used to identify coronary lesions and ( curved ) multiplanar reconstructions to classify lesions as significant or nonsignificant . \\n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \\n the di\n agnostic performance of ct coronary angiography for the detection of significant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and\n positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \\n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease p\n er vessel ) , and patient by patient ( no or any disease per patient ) . \\n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a single observer unaware of the multi - slice ct results identified coronary\n lesion as a single vessel , double vessel , or triple vessel disease . \\n all lesion , regardless of size , were included for comparison with ct coronary angiography . \\n lesions were classified as having nonsignificant disease ( luminal\n irregularities or < 50% stenosis ) or as having significant stenosis . \\n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary an\n giography ( qca ) . \\n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiography . \\n total calcium scores of all patients were calculated with dedicated\n software and expressed as agatston scores . \\n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of the number , areas , and peak hounsfield units of the detected calcified lesi\n ons . \\n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \\n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstruction\n s to classify lesions as significant or nonsignificant . \\n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \\n the diagnostic performance of ct coronary angiography for the detection of signif\n icant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of\n confidence interval ( cis ) . \\n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease per vessel ) , and patient by patient ( no or any disease per patient ) . \\n\n in this study , 29 ( 58% ) subjects were female , and 21 ( 42% ) were male showing an average age of 50.36 8.39 years . \\n of fifty patients 24 ( 48% ) , 13 ( 26% ) , eight ( 16% ) , and five ( 10% ) underwent mitral valve replacement ,\n double valve replacement ( dvr ) , aortic valve replacement , and other surgeries , respectively . \\n high distribution of cad risk factors such as hypertension ( 24% ) , smoking ( 22% ) , and dyslipidemia ( 18% ) was observed in the stu\n dy group . \\n the mean creatinine level was 0.766 0.17 and average dye used in conventional angiography was 48.5 26.6 whereas for ct angiography it was 72.8 6.32 . \\n average radiation dose in conventional coronary angiography and msct\n coronary angiography was 5.2 msv and 9.2 msv , respectively . \\n the majority of the patients had sinus rhythm ( 68% ) , whereas atrial fibrillation was found in 32% of the subjects . \\n patients included in the study had low to intermed\n iate probability of cad . in this study , three patients had complications after conventional angiography . \\n complications were of local site hematoma , acute kidney injury managed conservatively , and acute heart failure . \\n a patient\n who developed hematoma was obese female patients with body mass index > 30 kg / m . \\n the patient suffered from pseudoaneurysm , had hospitalized for 9 days , which leads to increased morbidity and cost of hospital stay . \\n the diagnos\n tic accuracy of ct coronary angiography was evaluated regarding true positive , true negative values and is presented in table 1 . the overall sensitivity and \\n specificity of ct angiography technique was 100% ( 95% ci : 39.76%100% ) and\n 91.30% ( 95% ci : 79.21%97.58% ) , respectively [ table 2 ] . \\n the positive predictive value ( 50% ; 95% ci : 15.70%84.30% ) and negative predictive value ( 100% ; 95% ci : 91.59%100% ) of ct angiography were also fairly high in these\n patients . \\n recent reports from multiple studies demonstrated that recent - generation msct scanners showed promise for noninvasive detection of coronary stenosis however , until now no studies were found regarding the clinical efficacy\n or prognostic value of 128-slice ct coronary angiography versus conventional invasive coronary angiography in the diagnosis of patients planned for major noncoronary surgeries such as dvr , bentall , atrial septal defect closure , etc .\n in our study , we reported 8% cad prevalence in patients planned for major noncoronary cardiac surgery . \\n we performed conventional and msct coronary angiography in all patients and the results showed that ct coronary angiography with i\n nvasive coronary angiography as the reference standard had a considerably high sensitivity ( 100% ) and specificity ( 95.65% ) . \\n the health economic model using invasive coronary angiography as the reference standard showed that at a p\n retest probability of cad of 70% or lower , ct coronary angiography resulted in lower cost per patient with a true positive diagnosis . at a pretest probability of cad of 70% or higher , invasive coronary angiography was associated with a\n lower cost per patient with a true positive diagnosis . in our study population , \\n two patients developed local site complications in the form of hematoma and pseudoaneurysm after conventional angiography . \\n hence , msct coronary ang\n iography will be more favorable in female obese patients with intermediate likelihood of cad . \\n hence , msct coronary angiography will be cost - effective in patients of valvular heart diseases . \\n however , ct angiography suffers from\n a drawback that average amount of dye used in msct coronary angiography were 72.8 6.32 ml which is higher than average amount of dye required for conventional angiography ( 48.6 26.6 ml ) . \\n hence , the use of ct coronary angiography\n could not be used in patients with known renal dysfunction , where reduction of contrast dye load is highly advocated . \\n our results show that 128-slice ct coronary angiography is a reliable technique to detect coronary stenosis in pat\n ients planned for noncoronary cardiac surgery . \\n although there has been important technological progress in the development of ct coronary angiography , its clinical application remains limited . \\n a study wth large numbers of patient\n s is required for the recommendation of only ct coronary angiography for the coronary evaluation in major non - cardiac surgeries . \\n mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , guja\n rat , india ) . \\n u.n . mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , gujarat , india ) . \\n ", "language": "en", "n_whitespaces": 2837, "n_words": 2237, "vocab_size": 651 }
39
Python
33
a72f1c9f5b907f96cbb7de3bbb02a1d431d34071
test_modeling_flax_longt5.py
31,310
79
120
test_summarization
https://github.com/huggingface/transformers.git
Add `LongT5` model (#16792) * Initial commit * Make some fixes * Make PT model full forward pass * Drop TF & Flax implementation, fix copies etc * Add Flax model and update some corresponding stuff * Drop some TF things * Update config and flax local attn * Add encoder_attention_type to config * . * Update docs * Do some cleansing * Fix some issues -> make style; add some docs * Fix position_bias + mask addition + Update tests * Fix repo consistency * Fix model consistency by removing flax operation over attn_mask * [WIP] Add PT TGlobal LongT5 * . * [WIP] Add flax tglobal model * [WIP] Update flax model to use the right attention type in the encoder * Fix flax tglobal model forward pass * Make the use of global_relative_attention_bias * Add test suites for TGlobal model * Fix minor bugs, clean code * Fix pt-flax equivalence though not convinced with correctness * Fix LocalAttn implementation to match the original impl. + update READMEs * Few updates * Update: [Flax] improve large model init and loading #16148 * Add ckpt conversion script accoring to #16853 + handle torch device placement * Minor updates to conversion script. * Typo: AutoModelForSeq2SeqLM -> FlaxAutoModelForSeq2SeqLM * gpu support + dtype fix * Apply some suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> Co-authored-by: Patrick von Platen <[email protected]> * * Remove (de)parallelize stuff * Edit shape comments * Update README.md * make fix-copies * Remove caching logic for local & tglobal attention * Apply another batch of suggestions from code review * Add missing checkpoints * Format converting scripts * Drop (de)parallelize links from longT5 mdx * Fix converting script + revert config file change * Revert "Remove caching logic for local & tglobal attention" This reverts commit 2a619828f6ddc3e65bd9bb1725a12b77fa883a46. * Stash caching logic in Flax model * Make side relative bias used always * Drop caching logic in PT model * Return side bias as it was * Drop all remaining model parallel logic * Remove clamp statements * Move test files to the proper place * Update docs with new version of hf-doc-builder * Fix test imports * Make some minor improvements * Add missing checkpoints to docs * Make TGlobal model compatible with torch.onnx.export * Replace some np.ndarray with jnp.ndarray * Fix TGlobal for ONNX conversion + update docs * fix _make_global_fixed_block_ids and masked neg value * update flax model * style and quality * fix imports * remove load_tf_weights_in_longt5 from init and fix copies * add slow test for TGlobal model * typo fix * Drop obsolete is_parallelizable and one warning * Update __init__ files to fix repo-consistency * fix pipeline test * Fix some device placements * [wip]: Update tests -- need to generate summaries to update expected_summary * Fix quality * Update LongT5 model card * Update (slow) summarization tests * make style * rename checkpoitns * finish * fix flax tests Co-authored-by: phungvanduy <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]> Co-authored-by: Patrick von Platen <[email protected]> Co-authored-by: patil-suraj <[email protected]>
263
0
5,722
10
2
13
def get_daily_sector_prices(start_date, end_date): # sector ticker information sp500_tickers = { "S&P 500 Materials (Sector)": "^SP500-15", "S&P 500 Industrials (Sector)": "^SP500-20", "S&P 500 Consumer Discretionary (Sector)": "^SP500-25", "S&P 500 Consumer Staples (Sector)": "^SP500-30", "S&P 500 Health Care (Sector)": "^SP500-35", "S&P 500 Financials (Sector)": "^SP500-40", "S&P 500 Information Technology (Sector)": "^SP500-45", "S&P 500 Telecommunication Services (Sector)": "^SP500-50", "S&P 500 Utilities (Sector)": "^SP500-55", "S&P 500 Real Estate (Sector)": "^SP500-60", "S&P 500 Energy (Sector)": "^GSPE", } sp500_tickers_data = {} # to store data for ( sector, sector_ticker, ) in sp500_tickers.items(): # iterate thru the sectors # load the data required from yfinance sp500_tickers_data[ sector ] = { # builds a dictionary entry for the sector with adj close data "sector_data": yf.download( sector_ticker, start=start_date, end=end_date, progress=False, )["Adj Close"] } # stores the data here return sp500_tickers_data
openbb_terminal/portfolio/attribution_model.py
204
OpenBBTerminal
{ "docstring": "\n fetches daily sector prices for S&P500 for a fixed time period\n\n Parameters\n ----------\n start_date : str ('yyyy-mm-dd') or datetime.date\n start date for fetching data\n end_date : str ('yyyy-mm-dd') or datetime.date\n end date for fetching data\n\n Returns\n -------\n sp500_tickers_data : Dictionary\n dictionary of dataframes with SPY daily sector prices\n ", "language": "en", "n_whitespaces": 97, "n_words": 48, "vocab_size": 33 }
131
Python
80
aed683f44015cb5aa6cae9c2ce719c956cda7b46
attribution_model.py
286,486
30
107
get_daily_sector_prices
https://github.com/OpenBB-finance/OpenBBTerminal.git
Feature/attribution toolkit (#3156) * add attribution toolkit * add attrib to test script for portfolio * removes yahooquery dependency and early rounding * Update _index.md * update feature to include raw and type flags, graph always shows, table output optional, one type of output at a time * Linting * Update index * Update index 2 * Update tests * changes argument descriptions * Small fix * Formatting Black Co-authored-by: S3908818 <[email protected]> Co-authored-by: Louise Platts (S3908818) <[email protected]> Co-authored-by: Jeroen Bouma <[email protected]> Co-authored-by: James Maslek <[email protected]> Co-authored-by: Louise Amy <[email protected]> Co-authored-by: Jeroen Bouma <[email protected]>
371
0
85,828
14
1
21
def test_equivalence_components_pca_spca(global_random_seed): rng = np.random.RandomState(global_random_seed) X = rng.randn(50, 4) n_components = 2 pca = PCA( n_components=n_components, svd_solver="randomized", random_state=0, ).fit(X) spca = SparsePCA( n_components=n_components, method="lars", ridge_alpha=0, alpha=0, random_state=0, ).fit(X) assert_allclose(pca.components_, spca.components_)
sklearn/decomposition/tests/test_sparse_pca.py
142
scikit-learn
{ "docstring": "Check the equivalence of the components found by PCA and SparsePCA.\n\n Non-regression test for:\n https://github.com/scikit-learn/scikit-learn/issues/23932\n ", "language": "en", "n_whitespaces": 24, "n_words": 15, "vocab_size": 14 }
30
Python
23
4f315db68bb190f0ac03d594f5b45d8fb4213f6f
test_sparse_pca.py
260,619
17
91
test_equivalence_components_pca_spca
https://github.com/scikit-learn/scikit-learn.git
FIX make SparsePCA components_ deterministic (#23935)
113
0
76,379
12
1
2
def send_event_if_public_demo(func):
haystack/telemetry.py
13
haystack
{ "docstring": "\n Can be used as a decorator to send an event only if HAYSTACK_EXECUTION_CONTEXT is \"public_demo\"\n ", "language": "en", "n_whitespaces": 22, "n_words": 15, "vocab_size": 15 }
2
Python
2
ac5617e757e9ace6f30b7291686d9dbbc339f433
telemetry.py
256,954
4
15
send_event_if_public_demo
https://github.com/deepset-ai/haystack.git
Add basic telemetry features (#2314) * add basic telemetry features * change pipeline_config to _component_config * Update Documentation & Code Style * add super().__init__() calls to error classes * make posthog mock work with python 3.7 * Update Documentation & Code Style * update link to docs web page * log exceptions, send event for raised HaystackErrors, refactor Path(CONFIG_PATH) * add comment on send_event in BaseComponent.init() and fix mypy * mock NonPrivateParameters and fix pylint undefined-variable * Update Documentation & Code Style * check model path contains multiple / * add test for writing to file * add test for en-/disable telemetry * Update Documentation & Code Style * merge file deletion methods and ignore pylint global statement * Update Documentation & Code Style * set env variable in demo to activate telemetry * fix mock of HAYSTACK_TELEMETRY_ENABLED * fix mypy and linter * add CI as env variable to execution contexts * remove threading, add test for custom error event * Update Documentation & Code Style * simplify config/log file deletion * add test for final event being sent * force writing config file in test * make test compatible with python 3.7 * switch to posthog production server * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
5
0
74,971
6
1
12
def test_model_with_fixed_input_dim(self): model = test_utils.get_small_mlp(10, 3, 5) loss_object = keras.losses.MeanSquaredError() optimizer = gradient_descent.SGD()
keras/saving/saving_utils_test.py
57
keras
{ "docstring": "Ensure that the batch_dim is removed when saving.\n\n When serving or retraining, it is important to reset the batch dim.\n This can be an issue inside of tf.function. See b/132783590 for context.\n ", "language": "en", "n_whitespaces": 53, "n_words": 32, "vocab_size": 30 }
13
Python
11
84afc5193d38057e2e2badf9c889ea87d80d8fbf
saving_utils_test.py
276,277
14
118
test_model_with_fixed_input_dim
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
41
0
81,611
9
1
20
def test_installed_without_username(self): # Remove username to simulate privacy mode del self.user_data_from_bitbucket["principal"]["username"] response = self.client.post(self.path, data=self.user_data_from_bitbucket) assert response.status_code == 200 integration = Integration.objects.get(provider=self.provider, external_id=self.client_key) assert integration.name == self.user_display_name assert integration.metadata == self.user_metadata
tests/sentry/integrations/bitbucket/test_installed.py
122
sentry
{ "docstring": "Test a user (not team) installation where the user has hidden their username from public view", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 15 }
31
Python
26
2790a30b7f6a6cffa2cd1aa69c678327a41a0664
test_installed.py
96,012
7
76
test_installed_without_username
https://github.com/getsentry/sentry.git
fix(bitbucket): Fix domain name (#31536) * fix(bitbucket): Fix domain name
87
0
19,263
10
1
4
def call(cls, reduce_function, axis=None):
modin/core/dataframe/algebra/reduce.py
20
modin
{ "docstring": "\n Build Reduce operator that will be performed across rows/columns.\n\n It's used if `func` reduces the dimension of partitions in contrast to `Fold`.\n\n Parameters\n ----------\n reduce_function : callable(pandas.DataFrame) -> pandas.Series\n Source function.\n axis : int, optional\n Axis to apply function along.\n\n Returns\n -------\n callable\n Function that takes query compiler and executes Reduce function.\n ", "language": "en", "n_whitespaces": 156, "n_words": 52, "vocab_size": 47 }
4
Python
4
58bbcc37477866d19c8b092a0e1974a4f0baa586
reduce.py
153,043
3
16
call
https://github.com/modin-project/modin.git
REFACTOR-#2656: Update modin to fit algebra (code only) (#3717) Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Vasily Litvinov <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]> Co-authored-by: Devin Petersohn <[email protected]> Signed-off-by: Rehan Durrani <[email protected]>
11
0
35,227
6
2
9
def test_resource_exhausted_info(self): # generate some random data to be captured implicitly in training func. from sklearn.datasets import fetch_olivetti_faces a_large_array = [] for i in range(50): a_large_array.append(fetch_olivetti_faces())
python/ray/tune/tests/test_tune_restore.py
56
ray
{ "docstring": "This is to test if helpful information is displayed when\n the objects captured in trainable/training function are too\n large and RESOURCES_EXHAUSTED error of gRPC is triggered.", "language": "en", "n_whitespaces": 39, "n_words": 26, "vocab_size": 24 }
26
Python
25
46ed3557ba6b4f4f72c15ef960aba5270ada2a9c
test_tune_restore.py
126,557
11
51
test_resource_exhausted_info
https://github.com/ray-project/ray.git
[tune] Fix test_resource_exhausted_info test (#27426) #27213 broke this test Signed-off-by: Kai Fricke <[email protected]>
72
0
28,198
11
9
26
def check_shape(_shape, **kwargs): target_shape = _shape for k, v in kwargs.items(): data_shape = v.shape if len(target_shape) != len(data_shape) or any( t not in [s, None] for t, s in zip(target_shape, data_shape) ): dim_labels = iter(itertools.chain( 'MNLIJKLH', (f"D{i}" for i in itertools.count()))) text_shape = ", ".join((str(n) if n is not None else next(dim_labels) for n in target_shape)) if len(target_shape) == 1: text_shape += "," raise ValueError( f"{k!r} must be {len(target_shape)}D " f"with shape ({text_shape}). " f"Your input has shape {v.shape}." )
lib/matplotlib/_api/__init__.py
244
matplotlib
{ "docstring": "\n For each *key, value* pair in *kwargs*, check that *value* has the shape\n *_shape*, if not, raise an appropriate ValueError.\n\n *None* in the shape is treated as a \"free\" size that can have any length.\n e.g. (None, 2) -> (N, 2)\n\n The values checked must be numpy arrays.\n\n Examples\n --------\n To check for (N, 2) shaped arrays\n\n >>> _api.check_shape((None, 2), arg=arg, other_arg=other_arg)\n ", "language": "en", "n_whitespaces": 93, "n_words": 62, "vocab_size": 54 }
80
Python
62
df3d2ab53722d191bbbc667a5ac2f7cb7cdfee84
__init__.py
110,468
22
134
check_shape
https://github.com/matplotlib/matplotlib.git
Improve argument checking for set_xticks().
390
0
24,175
18
4
17
def get_unicode_from_response(r): warnings.warn( ( "In requests 3.0, get_unicode_from_response will be removed. For " "more information, please see the discussion on issue #2266. (This" " warning should only appear once.)" ), DeprecationWarning, ) tried_encodings = [] # Try charset from content-type encoding = get_encoding_from_headers(r.headers) if encoding: try: return str(r.content, encoding) except UnicodeError: tried_encodings.append(encoding) # Fall back: try: return str(r.content, encoding, errors="replace") except TypeError: return r.content # The unreserved URI characters (RFC 3986) UNRESERVED_SET = frozenset( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~" )
pipenv/patched/pip/_vendor/requests/utils.py
150
pipenv
{ "docstring": "Returns the requested content back in unicode.\n\n :param r: Response object to get unicode content from.\n\n Tried:\n\n 1. charset from content-type\n 2. fall back and replace all unicode characters\n\n :rtype: str\n ", "language": "en", "n_whitespaces": 49, "n_words": 31, "vocab_size": 28 }
78
Python
67
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
utils.py
22,137
20
76
get_unicode_from_response
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
212
0
4,209
12
6
26
def render(self, filename="SystemStateGraph.gv", view=True): # type: (str, bool) -> None try: from graphviz import Digraph except ImportError: log_automotive.info("Please install graphviz.") return ps = Digraph(name="SystemStateGraph", node_attr={"fillcolor": "lightgrey", "style": "filled", "shape": "box"}, graph_attr={"concentrate": "true"}) for n in self.nodes: ps.node(str(n)) for e, f in self.__transition_functions.items(): try: desc = "" if f is None else f[1]["desc"] except (AttributeError, KeyError): desc = "" ps.edge(str(e[0]), str(e[1]), label=desc) ps.render(filename, view=view)
scapy/contrib/automotive/scanner/graph.py
260
scapy
{ "docstring": "\n Renders this Graph as PDF, if `graphviz` is installed.\n\n :param filename: A filename for the rendered PDF.\n :param view: If True, rendered file will be opened.\n ", "language": "en", "n_whitespaces": 55, "n_words": 26, "vocab_size": 24 }
63
Python
53
495b21f2867e48286767085c8cf2918e4092e9dc
graph.py
209,588
20
152
render
https://github.com/secdev/scapy.git
Add Automotive Logger for all debug outputs of the automotive layer
328
0
52,742
13
26
53
def preprocess_data(self, ds): if not isinstance(ds, dict): raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds))) # the new, cleaned datastructure, which will have legacy # items reduced to a standard structure suitable for the # attributes of the task class new_ds = AnsibleMapping() if isinstance(ds, AnsibleBaseYAMLObject): new_ds.ansible_pos = ds.ansible_pos # since this affects the task action parsing, we have to resolve in preprocess instead of in typical validator default_collection = AnsibleCollectionConfig.default_collection collections_list = ds.get('collections') if collections_list is None: # use the parent value if our ds doesn't define it collections_list = self.collections else: # Validate this untemplated field early on to guarantee we are dealing with a list. # This is also done in CollectionSearch._load_collections() but this runs before that call. collections_list = self.get_validated_value('collections', self.fattributes.get('collections'), collections_list, None) if default_collection and not self._role: # FIXME: and not a collections role if collections_list: if default_collection not in collections_list: collections_list.insert(0, default_collection) else: collections_list = [default_collection] if collections_list and 'ansible.builtin' not in collections_list and 'ansible.legacy' not in collections_list: collections_list.append('ansible.legacy') if collections_list: ds['collections'] = collections_list # use the args parsing class to determine the action, args, # and the delegate_to value from the various possible forms # supported as legacy args_parser = ModuleArgsParser(task_ds=ds, collection_list=collections_list) try: (action, args, delegate_to) = args_parser.parse() except AnsibleParserError as e: # if the raises exception was created with obj=ds args, then it includes the detail # so we dont need to add it so we can just re raise. if e.obj: raise # But if it wasn't, we can add the yaml object now to get more detail raise AnsibleParserError(to_native(e), obj=ds, orig_exc=e) else: self.resolved_action = args_parser.resolved_action # the command/shell/script modules used to support the `cmd` arg, # which corresponds to what we now call _raw_params, so move that # value over to _raw_params (assuming it is empty) if action in C._ACTION_HAS_CMD: if 'cmd' in args: if args.get('_raw_params', '') != '': raise AnsibleError("The 'cmd' argument cannot be used when other raw parameters are specified." " Please put everything in one or the other place.", obj=ds) args['_raw_params'] = args.pop('cmd') new_ds['action'] = action new_ds['args'] = args new_ds['delegate_to'] = delegate_to # we handle any 'vars' specified in the ds here, as we may # be adding things to them below (special handling for includes). # When that deprecated feature is removed, this can be too. if 'vars' in ds: # _load_vars is defined in Base, and is used to load a dictionary # or list of dictionaries in a standard way new_ds['vars'] = self._load_vars(None, ds.get('vars')) else: new_ds['vars'] = dict() for (k, v) in ds.items(): if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell': # we don't want to re-assign these values, which were determined by the ModuleArgsParser() above continue elif k.startswith('with_') and k.replace("with_", "") in lookup_loader: # transform into loop property self._preprocess_with_loop(ds, new_ds, k, v) elif C.INVALID_TASK_ATTRIBUTE_FAILED or k in self._valid_attrs: new_ds[k] = v else: display.warning("Ignoring invalid attribute: %s" % k) return super(Task, self).preprocess_data(new_ds)
lib/ansible/playbook/task.py
737
ansible
{ "docstring": "\n tasks are especially complex arguments so need pre-processing.\n keep it short.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
491
Python
278
43153c58310d02223f2cb0964f4255ba1ac4ed53
task.py
267,587
54
421
preprocess_data
https://github.com/ansible/ansible.git
`FieldAttribute`s as descriptors (#73908)
1,282
0
78,966
15
1
13
def test_custom_changelist(self): # Insert some data post_data = {"name": "First Gadget"} response = self.client.post(reverse("admin:admin_views_gadget_add"), post_data) self.assertEqual(response.status_code, 302) # redirect somewhere # Hit the page once to get messages out of the queue message list response = self.client.get(reverse("admin:admin_views_gadget_changelist")) # Data is still not visible on the page response = self.client.get(reverse("admin:admin_views_gadget_changelist")) self.assertNotContains(response, "First Gadget") @override_settings(ROOT_URLCONF="admin_views.urls")
tests/admin_views/tests.py
146
@override_settings(ROOT_URLCONF="admin_views.urls")
django
{ "docstring": "\n Validate that a custom ChangeList class can be used (#9749)\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
53
Python
40
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,588
7
72
test_custom_changelist
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
123
1
52,011
11
3
7
def fdiff(self, argindex=1): if argindex == 1: return Pow(self.args[0], self.args[1])*self.args[1]/self.args[0] elif argindex == 2: return log(self.args[0])*Pow(*self.args) else: raise ArgumentIndexError(self, argindex)
sympy/codegen/scipy_nodes.py
120
sympy
{ "docstring": "\n Returns the first derivative of this function.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
20
Python
17
27ff0c7bf7062f5b4b80ad12098e6422af5fcb44
scipy_nodes.py
200,522
7
78
fdiff
https://github.com/sympy/sympy.git
more tests of cosm1, powm1 from scipy.special
81
0
49,688
13
1
6
def isocalendar(self): return self._get_values().isocalendar().set_index(self._parent.index)
pandas/core/indexes/accessors.py
44
pandas
{ "docstring": "\n Calculate year, week, and day according to the ISO 8601 standard.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n With columns year, week and day.\n\n See Also\n --------\n Timestamp.isocalendar : Function return a 3-tuple containing ISO year,\n week number, and weekday for the given Timestamp object.\n datetime.date.isocalendar : Return a named tuple object with\n three components: year, week and weekday.\n\n Examples\n --------\n >>> ser = pd.to_datetime(pd.Series([\"2010-01-01\", pd.NaT]))\n >>> ser.dt.isocalendar()\n year week day\n 0 2009 53 5\n 1 <NA> <NA> <NA>\n >>> ser.dt.isocalendar().week\n 0 53\n 1 <NA>\n Name: week, dtype: UInt32\n ", "language": "en", "n_whitespaces": 293, "n_words": 88, "vocab_size": 64 }
4
Python
4
5531195f6f0d87817a704b288008809a3c98a304
accessors.py
165,949
2
25
isocalendar
https://github.com/pandas-dev/pandas.git
fix-ci-isocalendar (#46690)
18
0
39,746
11
2
10
def is_solenoidal(field): # Field is solenoidal irrespective of frame # Take the first frame in the result of the separate method in Vector if field == Vector(0): return True frame = list(field.separate())[0] return divergence(field, frame).simplify() is S.Zero
sympy/physics/vector/fieldfunctions.py
75
sympy
{ "docstring": "\n Checks if a field is solenoidal.\n\n Parameters\n ==========\n\n field : Vector\n The field to check for solenoidal property\n\n Examples\n ========\n\n >>> from sympy.physics.vector import ReferenceFrame\n >>> from sympy.physics.vector import is_solenoidal\n >>> R = ReferenceFrame('R')\n >>> is_solenoidal(R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z)\n True\n >>> is_solenoidal(R[1] * R.y)\n False\n\n ", "language": "en", "n_whitespaces": 96, "n_words": 46, "vocab_size": 36 }
37
Python
28
9a3ffc6781bd44c47cf49e128ef154389c32876a
fieldfunctions.py
197,436
5
44
is_solenoidal
https://github.com/sympy/sympy.git
Some pep8 cleanup of sympy.physics.vector.
62
0
48,544
11
2
5
def _allow_scroll(self) -> bool: return self.allow_horizontal_scroll and self.allow_vertical_scroll
src/textual/widget.py
28
textual
{ "docstring": "Check if both axis may be scrolled.\n\n Returns:\n bool: True if horizontal and vertical scrolling is enabled.\n ", "language": "en", "n_whitespaces": 42, "n_words": 17, "vocab_size": 16 }
8
Python
8
b22436933acc0d7440ec300f971a249bd6105a5b
widget.py
184,630
7
16
_allow_scroll
https://github.com/Textualize/textual.git
lots of docstrings
22
0
44,728
7
1
25
def test_align_labels(): fig, (ax3, ax1, ax2) = plt.subplots(3, 1, layout="constrained", figsize=(6.4, 8), gridspec_kw={"height_ratios": (1, 1, 0.7)}) ax1.set_ylim(0, 1) ax1.set_ylabel("Label") ax2.set_ylim(-1.5, 1.5) ax2.set_ylabel("Label") ax3.set_ylim(0, 1) ax3.set_ylabel("Label") fig.align_ylabels(axs=(ax3, ax1, ax2)) fig.draw_without_rendering() after_align = [ax1.yaxis.label.get_window_extent(), ax2.yaxis.label.get_window_extent(), ax3.yaxis.label.get_window_extent()] # ensure labels are approximately aligned np.testing.assert_allclose([after_align[0].x0, after_align[2].x0], after_align[1].x0, rtol=0, atol=1e-05) # ensure labels do not go off the edge assert after_align[0].x0 >= 1
lib/matplotlib/tests/test_constrainedlayout.py
294
matplotlib
{ "docstring": "\n Tests for a bug in which constrained layout and align_ylabels on\n three unevenly sized subplots, one of whose y tick labels include\n negative numbers, drives the non-negative subplots' y labels off\n the edge of the plot\n ", "language": "en", "n_whitespaces": 52, "n_words": 36, "vocab_size": 31 }
58
Python
51
ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22
test_constrainedlayout.py
107,162
19
200
test_align_labels
https://github.com/matplotlib/matplotlib.git
ENH: implement and use base layout_engine for more flexible layout.
317
0
22,617
12
1
5
def tag(value, viewname=None): return { 'tag': value, 'viewname': viewname, } @register.inclusion_tag('builtins/badge.html')
netbox/utilities/templatetags/builtins/tags.py
51
@register.inclusion_tag('builtins/badge.html')
netbox
{ "docstring": "\n Display a tag, optionally linked to a filtered list of objects.\n\n Args:\n value: A Tag instance\n viewname: If provided, the tag will be a hyperlink to the specified view's URL\n ", "language": "en", "n_whitespaces": 54, "n_words": 30, "vocab_size": 26 }
11
Python
11
7c105019d8ae9205051c302e7499b33a455f9176
tags.py
264,451
5
21
tag
https://github.com/netbox-community/netbox.git
Closes #8600: Document built-in template tags & filters
33
1
77,737
8
2
32
def forward(self, x, mask=None): B_, N, C = x.shape qkv = self.qkv(x).reshape( [-1, N, 3, self.num_heads, C // self.num_heads]).transpose( [2, 0, 3, 1, 4]) q, k, v = qkv[0], qkv[1], qkv[2] q = q * self.scale attn = paddle.mm(q, k.transpose([0, 1, 3, 2])) index = self.relative_position_index.flatten() relative_position_bias = paddle.index_select( self.relative_position_bias_table, index) relative_position_bias = relative_position_bias.reshape([ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 ]) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.transpose( [2, 0, 1]) # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] attn = attn.reshape([-1, nW, self.num_heads, N, N ]) + mask.unsqueeze(1).unsqueeze(0) attn = attn.reshape([-1, self.num_heads, N, N]) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) # x = (attn @ v).transpose(1, 2).reshape([B_, N, C]) x = paddle.mm(attn, v).transpose([0, 2, 1, 3]).reshape([-1, N, C]) x = self.proj(x) x = self.proj_drop(x) return x
ppdet/modeling/backbones/swin_transformer.py
510
PaddleDetection
{ "docstring": " Forward function.\n Args:\n x: input features with shape of (num_windows*B, N, C)\n mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None\n ", "language": "en", "n_whitespaces": 60, "n_words": 23, "vocab_size": 20 }
139
Python
81
e6d4d2bc7ba5eb4aa543e3439fa4e24cdd68d028
swin_transformer.py
211,049
31
337
forward
https://github.com/PaddlePaddle/PaddleDetection.git
fix export_model for swin (#6399)
438
0
53,015
13
5
12
def handle_m2m_field(self, obj, field): if field.remote_field.through._meta.auto_created: self._start_relational_field(field) if self.use_natural_foreign_keys and hasattr( field.remote_field.model, "natural_key" ): # If the objects in the m2m have a natural key, use it
django/core/serializers/xml_serializer.py
71
django
{ "docstring": "\n Handle a ManyToManyField. Related objects are only serialized as\n references to the object's PK (i.e. the related *data* is not dumped,\n just the relation).\n ", "language": "en", "n_whitespaces": 53, "n_words": 24, "vocab_size": 22 }
27
Python
25
9c19aff7c7561e3a82978a272ecdaad40dda5c00
xml_serializer.py
204,761
16
98
handle_m2m_field
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
104
0
50,875
12
1
15
def test_get_with_extra_component(self): # Generate signature signature = generate_signature(self.image.id, "fill-800x600") # Get the image response = self.client.get( reverse( "wagtailimages_serve", args=(signature, self.image.id, "fill-800x600") ) + "test.png" ) # Check response self.assertEqual(response.status_code, 200) self.assertTrue(response.streaming) self.assertEqual(response["Content-Type"], "image/png")
wagtail/images/tests/tests.py
132
wagtail
{ "docstring": "\n Test that a filename can be optionally added to the end of the URL.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 13 }
33
Python
26
d10f15e55806c6944827d801cd9c2d53f5da4186
tests.py
75,351
11
76
test_get_with_extra_component
https://github.com/wagtail/wagtail.git
Reformat with black
151
0
16,398
15
1
2
def with_cleanup(func): # type: (Any) -> Any
.venv/lib/python3.8/site-packages/pip/_internal/cli/req_command.py
14
transferlearning
{ "docstring": "Decorator for common logic related to managing temporary\n directories.\n ", "language": "en", "n_whitespaces": 15, "n_words": 9, "vocab_size": 9 }
7
Python
7
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
req_command.py
60,556
4
12
with_cleanup
https://github.com/jindongwang/transferlearning.git
upd; format
13
0
12,207
6
7
11
def convert_path(pathname): if os.sep == '/': return pathname if not pathname: return pathname if pathname[0] == '/': raise ValueError("path '%s' cannot be absolute" % pathname) if pathname[-1] == '/': raise ValueError("path '%s' cannot end with '/'" % pathname) paths = pathname.split('/') while os.curdir in paths: paths.remove(os.curdir) if not paths: return os.curdir return os.path.join(*paths)
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/util.py
163
transferlearning
{ "docstring": "Return 'pathname' as a name that will work on the native filesystem.\n\n The path is split on '/' and put back together again using the current\n directory separator. Needed because filenames in the setup script are\n always supplied in Unix style, and have to be converted to the local\n convention before we can actually use them in the filesystem. Raises\n ValueError on non-Unix-ish systems if 'pathname' either starts or\n ends with a slash.\n ", "language": "en", "n_whitespaces": 96, "n_words": 73, "vocab_size": 60 }
53
Python
32
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
util.py
62,143
15
93
convert_path
https://github.com/jindongwang/transferlearning.git
upd; format
122
0
12,879
11
3
13
def _wrap_data_with_container(method, data_to_wrap, original_input, estimator): output_config = _get_output_config(method, estimator) if output_config["dense"] == "default" or not _auto_wrap_is_configured(estimator): return data_to_wrap # dense_config == "pandas" return _wrap_in_pandas_container( data_to_wrap=data_to_wrap, index=getattr(original_input, "index", None), columns=estimator.get_feature_names_out, )
sklearn/utils/_set_output.py
97
scikit-learn
{ "docstring": "Wrap output with container based on an estimator's or global config.\n\n Parameters\n ----------\n method : {\"transform\"}\n Estimator's method to get container output for.\n\n data_to_wrap : {ndarray, dataframe}\n Data to wrap with container.\n\n original_input : {ndarray, dataframe}\n Original input of function.\n\n estimator : estimator instance\n Estimator with to get the output configuration from.\n\n Returns\n -------\n output : {ndarray, dataframe}\n If the output config is \"default\" or the estimator is not configured\n for wrapping return `data_to_wrap` unchanged.\n If the output config is \"pandas\", return `data_to_wrap` as a pandas\n DataFrame.\n ", "language": "en", "n_whitespaces": 173, "n_words": 87, "vocab_size": 55 }
30
Python
28
2a6703d9e8d1e54d22dd07f2bfff3c92adecd758
_set_output.py
261,326
9
61
_wrap_data_with_container
https://github.com/scikit-learn/scikit-learn.git
ENH Introduces set_output API for pandas output (#23734) * Introduces set_output API for all transformers * TransformerMixin inherits from _SetOutputMixin * Adds tests * Adds whatsnew * Adds example on using set_output API * Adds developer docs for set_output
76
0
76,754
11
1
3
def is_origin(self) -> bool: return self == (0, 0)
src/textual/geometry.py
27
textual
{ "docstring": "Check if the point is at the origin (0, 0).\n\n Returns:\n bool: True if the offset is the origin.\n\n ", "language": "en", "n_whitespaces": 44, "n_words": 19, "vocab_size": 14 }
9
Python
9
c0a631ac492580c2d8a311cdd69385cbc95a7fc0
geometry.py
184,593
8
16
is_origin
https://github.com/Textualize/textual.git
faster screenshots, docstrings
23
0
44,695
7
4
15
def get_text_heights(self, renderer): bbox, bbox2 = self.get_ticklabel_extents(renderer) # MGDTODO: Need a better way to get the pad pad_pixels = self.majorTicks[0].get_pad_pixels() above = 0.0 if bbox2.height: above += bbox2.height + pad_pixels below = 0.0 if bbox.height: below += bbox.height + pad_pixels if self.get_label_position() == 'top': above += self.label.get_window_extent(renderer).height + pad_pixels else: below += self.label.get_window_extent(renderer).height + pad_pixels return above, below
lib/matplotlib/axis.py
170
matplotlib
{ "docstring": "\n Return how much space should be reserved for text above and below the\n Axes, as a pair of floats.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 19 }
58
Python
36
f156db08eee54d285ab0fb4e031e48d078ba6aa3
axis.py
107,485
14
107
get_text_heights
https://github.com/matplotlib/matplotlib.git
DOC: More cleanup axes -> Axes
179
0
22,774
14
1
6
def mask(self, row_indices, col_indices): return ( self.force_materialization() .list_of_block_partitions[0] .mask(row_indices, col_indices) )
modin/core/execution/dask/implementations/pandas_on_dask/partitioning/virtual_partition.py
47
modin
{ "docstring": "\n Create (synchronously) a mask that extracts the indices provided.\n\n Parameters\n ----------\n row_indices : list-like, slice or label\n The row labels for the rows to extract.\n col_indices : list-like, slice or label\n The column labels for the columns to extract.\n\n Returns\n -------\n PandasOnDaskDataframeVirtualPartition\n A new ``PandasOnDaskDataframeVirtualPartition`` object,\n materialized.\n ", "language": "en", "n_whitespaces": 155, "n_words": 47, "vocab_size": 35 }
11
Python
11
9bf8d57ca44e22fd69b0abc55793cf60c199ab4d
virtual_partition.py
154,161
6
30
mask
https://github.com/modin-project/modin.git
FIX-#4676: drain sub-virtual-partition call queues. (#4695) Signed-off-by: mvashishtha <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]>
65
0
35,821
12
1
5
def site_config_path(self) -> Path: return self._first_item_as_path_if_multipath(self.site_config_dir)
pipenv/patched/notpip/_vendor/platformdirs/unix.py
30
pipenv
{ "docstring": ":return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``", "language": "en", "n_whitespaces": 17, "n_words": 18, "vocab_size": 18 }
6
Python
6
f3166e673fe8d40277b804d35d77dcdb760fc3b3
unix.py
20,231
3
17
site_config_path
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
20
0
3,283
8
4
23
def send_sale_toggle_notifications(): manager = get_plugins_manager() sales = get_sales_to_notify_about() catalogue_infos = fetch_catalogue_infos(sales) if not sales: return for sale in sales: catalogues = catalogue_infos.get(sale.id) manager.sale_toggle(sale, catalogues) sale_ids = ", ".join([str(sale.id) for sale in sales]) sales.update(notification_sent_datetime=datetime.now(pytz.UTC)) task_logger.info("The sale_toggle webhook sent for sales with ids: %s", sale_ids)
saleor/discount/tasks.py
153
saleor
{ "docstring": "Send the notification about starting or ending sales.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
43
Python
33
67492396aa41d068cac82e8fa328f218b5951d13
tasks.py
27,919
12
91
send_sale_toggle_notifications
https://github.com/saleor/saleor.git
New event for starting and ending sales (#10110) * Add sale started and sale ended webhooks * Add started_notification_sent and ended_notification_sent flags to Sale model * Add sale_webhook_schedule * Add send_sale_started_and_sale_ended_notifications discount task * Add tests for discount tasks * Move sale task celery beat schedule to settings * Add tests for sale_webhook_schedule * Add sale_started and sale_ended methods to PluginSample * Update send_sale_started_and_sale_ended_notifications logging * Update SaleUpdate mutation - ensure the notification is sent and the flag is changed if needed * Update SaleCreate mutation - send sale_creatd and sale_ended notifications * Optimize fetch_catalogue_info * Clean up * Apply code review suggestions * Add SALE_TOGGLE webhook * Use sale_toggle webhook instead of sale_started and sale_ended * Delete sale_started and sale_eded wbhooks * Drop notification flags from Sale model * Add missing docstrings and comments * Fix failing tests * Update changelog * Add description for SaleToggle event type * Update discount task and webhook schedule * Set notification_sent_datetime to current date by default * Fix typo in comment
91
0
5,140
12
1
4
def isasyncgenfunction(obj): return _has_code_flag(obj, CO_ASYNC_GENERATOR)
python3.10.4/Lib/inspect.py
23
XX-Net
{ "docstring": "Return true if the object is an asynchronous generator function.\n\n Asynchronous generator functions are defined with \"async def\"\n syntax and have \"yield\" expressions in their body.\n ", "language": "en", "n_whitespaces": 35, "n_words": 26, "vocab_size": 25 }
5
Python
5
8198943edd73a363c266633e1aa5b2a9e9c9f526
inspect.py
218,469
2
13
isasyncgenfunction
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
11
0
55,329
7
9
17
def parse_known_args(self, args=None, namespace=None, nohelp=False): if args is None: # args default to the system args args = _sys.argv[1:] args = fix_underscores(args) # handle the single dash stuff. See _handle_single_dash_addarg for info actions = set() for action in self._actions: actions.update(action.option_strings) args = self._handle_single_dash_parsearg(args, actions) if nohelp: # ignore help args = [ a for a in args if a != '-h' and a != '--help' and a != '--helpall' and a != '--h' ] return super().parse_known_args(args, namespace)
parlai/core/params.py
177
ParlAI
{ "docstring": "\n Parse known args to ignore help flag.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
77
Python
48
4291c8a63a3ae9e7107dda0f90fff8da3b31d29b
params.py
195,034
15
107
parse_known_args
https://github.com/facebookresearch/ParlAI.git
python 3.8 parser fix on args_that_override (#4507) * single dash * handle args during parsing
251
0
47,160
15
2
4
def on_kill(self) -> None: if self.hook: self.hook.cancel_job()
airflow/providers/google/cloud/operators/vertex_ai/custom_job.py
36
airflow
{ "docstring": "\n Callback called when the operator is killed.\n Cancel any running job.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
7
Python
7
640c0b67631c5f2c8ee866b0726fa7a8a452cd3c
custom_job.py
44,285
7
20
on_kill
https://github.com/apache/airflow.git
Create CustomJob and Datasets operators for Vertex AI service (#20077)
32
0
8,232
10
2
10
def debug_print(self, msg): from distutils.debug import DEBUG if DEBUG: print(msg) sys.stdout.flush() # -- Option validation methods ------------------------------------- # (these are very handy in writing the 'finalize_options()' method) # # NB. the general philosophy here is to ensure that a particular option # value meets certain type and value constraints. If not, we try to # force it into conformance (eg. if we expect a list but have a string, # split the string on comma and/or whitespace). If we can't force the # option into conformance, raise DistutilsOptionError. Thus, command # classes need do nothing more than (eg.) # self.ensure_string_list('foo') # and they can be guaranteed that thereafter, self.foo will be # a list of strings.
python3.10.4/Lib/distutils/cmd.py
60
XX-Net
{ "docstring": "Print 'msg' to stdout if the global DEBUG (taken from the\n DISTUTILS_DEBUG environment variable) flag is true.\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 16 }
116
Python
86
8198943edd73a363c266633e1aa5b2a9e9c9f526
cmd.py
222,616
5
28
debug_print
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
200
0
56,677
10
1
4
def test_large_params(ray_start_4_cpus): array_size = int(1e8)
python/ray/train/tests/test_base_trainer.py
23
ray
{ "docstring": "Tests if large arguments are can be serialized by the Trainer.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
5
Python
5
2b62bba7c4014c8d943b197bf8396df7dd0f82e3
test_base_trainer.py
128,168
6
48
test_large_params
https://github.com/ray-project/ray.git
[AIR] Support large checkpoints and other arguments (#28826) Signed-off-by: Amog Kamsetty [email protected] Previously the arguments passed to the Trainer would be captured in the Trainable context. For arguments that are very large in size, this would prevent the Trainable from being registered due to gRPC resource limits. Instead, we now always use tune.with_parameters to save the Trainer arguments in the object store rather than capturing it in the context.
11
0
28,617
8
1
2
def lataxis(self): return self["lataxis"]
packages/python/plotly/plotly/graph_objs/layout/_geo.py
22
plotly.py
{ "docstring": "\n The 'lataxis' property is an instance of Lataxis\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.layout.geo.Lataxis`\n - A dict of string/value properties that will be passed\n to the Lataxis constructor\n\n Supported dict properties:\n\n dtick\n Sets the graticule's longitude/latitude tick\n step.\n gridcolor\n Sets the graticule's stroke color.\n gridwidth\n Sets the graticule's stroke width (in px).\n range\n Sets the range of this axis (in degrees), sets\n the map's clipped coordinates.\n showgrid\n Sets whether or not graticule are shown on the\n map.\n tick0\n Sets the graticule's starting tick\n longitude/latitude.\n\n Returns\n -------\n plotly.graph_objs.layout.geo.Lataxis\n ", "language": "en", "n_whitespaces": 454, "n_words": 91, "vocab_size": 63 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_geo.py
231,525
2
11
lataxis
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
62,969
7
1
7
def make_token(self, user): return self._make_token_with_timestamp( user, self._num_seconds(self._now()), self.secret, )
django/contrib/auth/tokens.py
49
django
{ "docstring": "\n Return a token that can be used once to do a password reset\n for the given user.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 16 }
9
Python
9
0dcd549bbe36c060f536ec270d34d9e7d4b8e6c7
tokens.py
203,186
6
31
make_token
https://github.com/django/django.git
Fixed #30360 -- Added support for secret key rotation. Thanks Florian Apolloner for the implementation idea. Co-authored-by: Andreas Pelme <[email protected]> Co-authored-by: Carlton Gibson <[email protected]> Co-authored-by: Vuyisile Ndlovu <[email protected]>
63
0
50,245
11
12
49
def get_process_curses_data(self, p, selected, args): ret = [self.curse_new_line()] # When a process is selected: # * display a special character at the beginning of the line # * underline the command name ret.append(self.curse_add_line(unicode_message('PROCESS_SELECTOR') if (selected and not args.disable_cursor) else ' ', 'SELECTED')) # CPU ret.append(self._get_process_curses_cpu(p, selected, args)) # MEM ret.append(self._get_process_curses_mem(p, selected, args)) ret.append(self._get_process_curses_vms(p, selected, args)) ret.append(self._get_process_curses_rss(p, selected, args)) # PID if not self.args.programs: # Display processes, so the PID should be displayed msg = self.layout_stat['pid'].format(p['pid'], width=self.__max_pid_size()) else: # Display programs, so the PID should not be displayed # Instead displays the number of children msg = self.layout_stat['pid'].format( len(p['childrens']) if 'childrens' in p else '_', width=self.__max_pid_size() ) ret.append(self.curse_add_line(msg)) # USER ret.append(self._get_process_curses_username(p, selected, args)) # TIME+ ret.append(self._get_process_curses_time(p, selected, args)) # THREAD ret.append(self._get_process_curses_thread(p, selected, args)) # NICE ret.append(self._get_process_curses_nice(p, selected, args)) # STATUS ret.append(self._get_process_curses_status(p, selected, args)) # IO read/write ret.append(self._get_process_curses_io_read(p, selected, args)) ret.append(self._get_process_curses_io_write(p, selected, args)) # Command line # If no command line for the process is available, fallback to the bare process name instead bare_process_name = p['name'] cmdline = p.get('cmdline', '?') try: process_decoration = 'PROCESS_SELECTED' if (selected and not args.disable_cursor) else 'PROCESS' if cmdline: path, cmd, arguments = split_cmdline(bare_process_name, cmdline) # Manage end of line in arguments (see #1692) arguments.replace('\r\n', ' ') arguments.replace('\n', ' ') if os.path.isdir(path) and not args.process_short_name: msg = self.layout_stat['command'].format(path) + os.sep ret.append(self.curse_add_line(msg, splittable=True)) ret.append(self.curse_add_line(cmd, decoration=process_decoration, splittable=True)) else: msg = self.layout_stat['command'].format(cmd) ret.append(self.curse_add_line(msg, decoration=process_decoration, splittable=True)) if arguments: msg = ' ' + self.layout_stat['command'].format(arguments) ret.append(self.curse_add_line(msg, splittable=True)) else: msg = self.layout_stat['name'].format(bare_process_name) ret.append(self.curse_add_line(msg, decoration=process_decoration, splittable=True)) except (TypeError, UnicodeEncodeError) as e: # Avoid crash after running fine for several hours #1335 logger.debug("Can not decode command line '{}' ({})".format(cmdline, e)) ret.append(self.curse_add_line('', splittable=True)) return ret
glances/plugins/glances_processlist.py
920
glances
{ "docstring": "Get curses data to display for a process.\n\n - p is the process to display\n - selected is a tag=True if p is the selected process\n ", "language": "en", "n_whitespaces": 47, "n_words": 26, "vocab_size": 16 }
268
Python
147
9614e2bb19c6bdd512fea5dafbed1250da0049d9
glances_processlist.py
70,279
46
560
get_process_curses_data
https://github.com/nicolargo/glances.git
First version but UI should be improved and when user is in program mode, it did not work...
935
0
15,483
17
1
14
async def test_available_template_with_entities(hass): await setup.async_setup_component( hass, "switch", { "switch": { "platform": "template", "switches": { "test_template_switch": { **OPTIMISTIC_SWITCH_CONFIG, "value_template": "{{ 1 == 1 }}", "availability_template": "{{ is_state('availability_state.state', 'on') }}", } }, } }, ) await hass.async_block_till_done() await hass.async_start() await hass.async_block_till_done() hass.states.async_set("availability_state.state", STATE_ON) await hass.async_block_till_done() assert hass.states.get("switch.test_template_switch").state != STATE_UNAVAILABLE hass.states.async_set("availability_state.state", STATE_OFF) await hass.async_block_till_done() assert hass.states.get("switch.test_template_switch").state == STATE_UNAVAILABLE
tests/components/template/test_switch.py
224
core
{ "docstring": "Test availability templates with values from other entities.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
55
Python
34
11cc1feb853bcfd9633ebfc44eae142c10a7f983
test_switch.py
300,441
26
123
test_available_template_with_entities
https://github.com/home-assistant/core.git
Tweak template switch tests (#71738)
293
0
99,301
17
1
18
def test_delete_button_with_next_url(self): # page_listing_more_button generator yields only `Delete button` with this permission set page_perms = DeleteOnlyPagePerms() page = self.root_page base_url = reverse("wagtailadmin_pages:delete", args=[page.id]) next_url = "a/random/url/" full_url = base_url + "?" + urlencode({"next": next_url}) delete_button = next( page_listing_more_buttons(page, page_perms, next_url=next_url) ) self.assertEqual(delete_button.url, full_url)
wagtail/admin/tests/test_buttons_hooks.py
124
wagtail
{ "docstring": "\n Ensure that the built in delete button supports a next_url provided.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
43
Python
36
bf65fa94ea5aa17f3c42e5cb5401fb7d34a60b5e
test_buttons_hooks.py
79,300
10
72
test_delete_button_with_next_url
https://github.com/wagtail/wagtail.git
fix issue with edit page header delete button showing an invalid next_url - fixes #9195 - header button on edit page & page listing - unpublish now correctly includes the next url (was missing on page listing previously) - header button on edit page - delete button does not include next url (as this would be the edit page for what was deleted) - adds more robust unit tests for the page listing & page header more hooks, including separating the tests out to separate classes
124
0
16,914
12
6
29
async def get_cluster_status(self, req): (legacy_status, formatted_status_string, error) = await asyncio.gather( *[ self._gcs_aio_client.internal_kv_get( key.encode(), namespace=None, timeout=GCS_RPC_TIMEOUT_SECONDS ) for key in [ DEBUG_AUTOSCALING_STATUS_LEGACY, DEBUG_AUTOSCALING_STATUS, DEBUG_AUTOSCALING_ERROR, ] ] ) formatted_status = ( json.loads(formatted_status_string.decode()) if formatted_status_string else {} ) return dashboard_optional_utils.rest_response( success=True, message="Got cluster status.", autoscaling_status=legacy_status.decode() if legacy_status else None, autoscaling_error=error.decode() if error else None, cluster_status=formatted_status if formatted_status else None, )
dashboard/modules/reporter/reporter_head.py
180
ray
{ "docstring": "Returns status information about the cluster.\n\n Currently contains two fields:\n autoscaling_status (str)-- a status message from the autoscaler.\n autoscaling_error (str)-- an error message from the autoscaler if\n anything has gone wrong during autoscaling.\n\n These fields are both read from the GCS, it's expected that the\n autoscaler writes them there.\n ", "language": "en", "n_whitespaces": 114, "n_words": 49, "vocab_size": 39 }
57
Python
43
dac7bf17d9214dd3b79238caf0c8ec76f40328c6
reporter_head.py
126,860
25
121
get_cluster_status
https://github.com/ray-project/ray.git
[serve] Make serve agent not blocking when GCS is down. (#27526) This PR fixed several issue which block serve agent when GCS is down. We need to make sure serve agent is always alive and can make sure the external requests can be sent to the agent and check the status. - internal kv used in dashboard/agent blocks the agent. We use the async one instead - serve controller use ray.nodes which is a blocking call and blocking forever. change to use gcs client with timeout - agent use serve controller client which is a blocking call with max retries = -1. This blocks until controller is back. To enable Serve HA, we also need to setup: - RAY_gcs_server_request_timeout_seconds=5 - RAY_SERVE_KV_TIMEOUT_S=5 which we should set in KubeRay.
352
0
28,284
15
7
23
def piecewise_exclusive(expr, *, skip_nan=False): if not expr.has(Piecewise): return expr if isinstance(expr, Piecewise): cumcond = false newargs = [] for arg in expr.args: cancond = And(arg.cond, Not(cumcond)).simplify() cumcond = Or(arg.cond, cumcond).simplify() newargs.append( ExprCondPair(piecewise_exclusive(arg.expr, skip_nan=skip_nan), cancond)) if not skip_nan and cumcond is not true: newargs.append(ExprCondPair(Undefined, Not(cumcond).simplify())) return Piecewise(*newargs, evaluate=False) return expr.func(*[piecewise_exclusive(arg, skip_nan=skip_nan) for arg in expr.args], evaluate=False)
sympy/functions/elementary/piecewise.py
249
sympy
{ "docstring": "\n Return a :class:`Piecewise` with exclusive conditions, i.e., where exactly\n one condition is True.\n\n SymPy normally represents the condition in an \"if-elif\"-fashion, which\n leads to that more than one condition can be True. This is sometimes not\n wanted when representing the :class:`Piecewise` mathematically.\n\n Note that further manipulation of the resulting :class:`Piecewise`, e.g.\n simplifying it, will most likely make it non-exclusive. Hence, this is\n primarily a function to be used in conjunction with printing the Piecewise\n or if one would like to reorder the expression-condition pairs.\n\n ``piecewise_exclusive`` will also explicitly add a final\n :class:`~sympy.core.numbers.NaN` segment to the :class:`Piecewise`, unless\n all cases are covered. This can be avoided by passing ``skip_nan=True`` as\n a final argument. It can also be used in some situations where SymPy cannot\n determine that all cases are covered.\n\n Examples\n ========\n >>> from sympy import piecewise_exclusive, Symbol, Piecewise, S\n >>> x = Symbol('x', real=True)\n >>> p = Piecewise((0, x < 0), (S.Half, x <= 0), (1, True))\n >>> piecewise_exclusive(p)\n Piecewise((0, x < 0), (1/2, Eq(x, 0)), (1, x > 0))\n >>> piecewise_exclusive(Piecewise((2, x > 1)))\n Piecewise((2, x > 1), (nan, x <= 1))\n >>> piecewise_exclusive(Piecewise((2, x > 1)), skip_nan=True)\n Piecewise((2, x > 1))\n\n ", "language": "en", "n_whitespaces": 272, "n_words": 193, "vocab_size": 124 }
55
Python
40
a226912a87198dac24e5cc9db4b2077422b021f0
piecewise.py
199,141
18
160
piecewise_exclusive
https://github.com/sympy/sympy.git
Add piecewise_canonical function
238
0
49,161
17
4
10
def temperature_unit(self) -> str: if ( self._unit_value and self._unit_value.metadata.unit and "f" in self._unit_value.metadata.unit.lower() ): return UnitOfTemperature.FAHRENHEIT return UnitOfTemperature.CELSIUS
homeassistant/components/zwave_js/climate.py
75
core
{ "docstring": "Return the unit of measurement used by the platform.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
18
Python
16
9a747bafa398185eb3d4fe041c52acfbb8264372
climate.py
290,231
9
45
temperature_unit
https://github.com/home-assistant/core.git
Use enums instead of deprecated constants (#81591)
90
0
89,349
13
5
32
def _bcoo_todense_batching_rule(batched_args, batch_dims, *, spinfo): data, indices = batched_args if any(b not in [0, None] for b in batch_dims): raise NotImplementedError(f"batch_dims={batch_dims}. Only 0 and None are supported.") if batch_dims[0] is None: data = data[None, ...] if batch_dims[1] is None: indices = indices[None, ...] new_spinfo = BCOOInfo( shape=(max(data.shape[0], indices.shape[0]), *spinfo.shape)) return bcoo_todense(data, indices, spinfo=new_spinfo), 0 ad.defjvp(bcoo_todense_p, _bcoo_todense_jvp, None) ad.primitive_transposes[bcoo_todense_p] = _bcoo_todense_transpose batching.primitive_batchers[bcoo_todense_p] = _bcoo_todense_batching_rule xla.register_translation(bcoo_todense_p, xla.lower_fun( _bcoo_todense_impl, multiple_results=False, new_style=True)) #-------------------------------------------------------------------- # bcoo_fromdense bcoo_fromdense_p = core.Primitive('bcoo_fromdense') bcoo_fromdense_p.multiple_results = True _TRACED_NSE_ERROR =
jax/experimental/sparse/bcoo.py
270
jax
{ "docstring": "\nThe error arose for the nse argument of bcoo_fromdense. In order for BCOO.fromdense()\nto be used in traced/compiled code, you must pass a concrete value to the nse\n(number of specified elements) argument.\n", "language": "en", "n_whitespaces": 30, "n_words": 33, "vocab_size": 28 }
79
Python
63
2c20d82776fea482aaf52e18ebad4f7fce5c3a81
bcoo.py
119,021
11
114
_bcoo_todense_batching_rule
https://github.com/google/jax.git
[sparse] generalize metadata argument in BCOO primitives
91
0
26,534
14
1
7
def addslashes(value): return value.replace("\\", "\\\\").replace('"', '\\"').replace("'", "\\'") @register.filter(is_safe=True) @stringfilter
django/template/defaultfilters.py
81
@register.filter(is_safe=True) @stringfilter
django
{ "docstring": "\n Add slashes before quotes. Useful for escaping strings in CSV, for\n example. Less useful for escaping JavaScript; use the ``escapejs``\n filter instead.\n ", "language": "en", "n_whitespaces": 35, "n_words": 22, "vocab_size": 19 }
9
Python
9
9c19aff7c7561e3a82978a272ecdaad40dda5c00
defaultfilters.py
206,236
2
29
addslashes
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
13
1
51,427
12
1
8
def get_provider_plugins() -> t.Dict[str, t.Type[CloudProvider]]: return get_cloud_plugins()[0] @cache
test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py
46
@cache
ansible
{ "docstring": "Return a dictionary of the available cloud provider plugins.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
8
Python
8
3eb0485dd92c88cc92152d3656d94492db44b183
__init__.py
267,824
3
26
get_provider_plugins
https://github.com/ansible/ansible.git
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
13
1
79,105
8
4
35
def test_get_elb_config(self): conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters) conn_elb = boto.ec2.elb.connect_to_region(region, **boto_conn_parameters) zones = [zone.name for zone in conn_ec2.get_all_zones()] elb_name = 'TestGetELBConfig' load_balancer = conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')]) reservations = conn_ec2.run_instances('ami-08389d60', min_count=3) all_instance_ids = [instance.id for instance in reservations.instances] load_balancer.register_instances(all_instance_ids) # DescribeTags does not appear to be included in moto # so mock the _get_all_tags function. Ideally we wouldn't # need to mock this. with patch('salt.modules.boto_elb._get_all_tags', MagicMock(return_value=None)): ret = boto_elb.get_elb_config(elb_name, **conn_parameters) _expected_keys = ['subnets', 'availability_zones', 'canonical_hosted_zone_name_id', 'tags', 'dns_name', 'listeners', 'backends', 'policies', 'vpc_id', 'scheme', 'canonical_hosted_zone_name', 'security_groups'] for key in _expected_keys: self.assertIn(key, ret)
tests/unit/modules/test_boto_elb.py
281
salt
{ "docstring": "\n tests that given an valid ids in the form of a list that the boto_elb\n deregister_instances all members of the given list\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 16 }
90
Python
72
07abfa2a7a70b0bfa23ae45172e090fc4b9c180c
test_boto_elb.py
215,645
28
167
test_get_elb_config
https://github.com/saltstack/salt.git
adding test_get_elb_config function
666
0
54,068
12
3
7
def offset_reached(self) -> bool: if self._event and self._offset_value: return is_offset_reached( self._event.start_datetime_local, self._offset_value ) return False
homeassistant/components/google/calendar.py
52
core
{ "docstring": "Return whether or not the event offset was reached.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
15
Python
14
36bb947cdf24cb74c4d4288ca61825226e1de5ff
calendar.py
296,298
7
32
offset_reached
https://github.com/home-assistant/core.git
Fix bug in google calendar offset calculation (#70024) Move the offset reached computation outside of the update method so that it is computed when state updates occur rather than when data refreshes happen (which are throttled and happen at most every 15 minutes). Issue #69892
73
0
95,285
11
1
3
def validate_can_orderby(self) -> None: raise NotImplementedError
src/sentry/snuba/metrics/fields/base.py
19
sentry
{ "docstring": "\n Validate that the expression can be used to order a query\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
6
Python
6
4acb1834c41648180bbb41cbe248b50d65e5977d
base.py
86,711
5
10
validate_can_orderby
https://github.com/getsentry/sentry.git
feat(metrics): Adds mqb query transform to MetricsQuery [TET-163] (#37652) So far this PR has only test cases that shows expected output from MQB (input to metrics abstraction layer) and the final output that would be passed to metrics abstraction layer I have printed out queries spit out by MQB and coalesced them into the test cases in this PR, and so should cover all queries made by performance to metrics: - I have only listed a variation or two of the same functions for example `p75(transaction.duration)` but I did not add `p50(transaction.duration)` because the logic would be the same so need to add this to these tests - Only thing missing is the recent `countIf` functions added for performance which I will add later on listed here -> https://github.com/getsentry/sentry/blob/master/src/sentry/search/events/datasets/metrics.py#L179-L276 ### Changes to MQB output:- - Removed tags from select statement, as if they are listed in the `groupBy`, they will be returned by metrics abstraction layer - Having clauses are not supported - Transform functions are not supported - Removed ordering by `bucketed_time` as this behavior is handled post query by metrics abstraction layer - Replaced metric ids/names with MRI as this is the naming contract we can guarantee - Replaced tag values with their tag names because metrics abstraction layer will handle the indexer resolving and reverse resolving - Replaced SnQL function definition with their corresponding derived metrics so for example failure_rate, apdex, user_misery, team_key_transactions, count_web_vitals and histogram functions ### ToDo from me to get this test to pass - [x] `snuba-sdk` needs to support MRI as a column name in `Column` [TET-323] - [x] `MetricField` needs to support `args` and `alias` [TET-320, TET-322] - [x] Add `MetricGroupByField` for `groupBy` columns that accept an `alias` [TET-320] - [x] Aliasing functionality needs to be supported [TET-320] - [x] Add derived metric for `team_key_transaction` [TET-325] - [x] Add derived metric for `count_web_vital_measurements` [TET-161] - [x] Add derived metric for `rate` [TET-129] - [x] `MetricsQuery` accepts MRI rather than public facing names [TET-321] - [x] Support for tuples conditions [TET-319] - [x] Add derived metrics for the 3 `countIf` functions [TET-326] - [x] Transform MQB `Query` object to `MetricsQuery` (This PR) - [x] Figure out addition of Granularity processor [TET-327] - [x] Add Invalid test cases (This PR) - [ ] Discuss granularity differences/query bounds (Will be handled in subsequent PR [TET-452]) [TET-323]: https://getsentry.atlassian.net/browse/TET-323?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ
20
0
18,156
6
1
16
def set_globals(self): logger.debug("Setting global config") section = "global" self.add_section(title=section, info="Options that apply to all extraction plugins") self.add_item( section=section, title="allow_growth", datatype=bool, default=False, group="settings", info="[Nvidia Only]. Enable the Tensorflow GPU `allow_growth` configuration option. " "This option prevents Tensorflow from allocating all of the GPU VRAM at launch " "but can lead to higher VRAM fragmentation and slower performance. Should only " "be enabled if you are having problems running extraction.") self.add_item( section=section, title="aligner_min_scale", datatype=float, min_max=(0.0, 1.0), rounding=2, default=0.05, group="filters", info="Filters out faces below this size. This is a multiplier of the minimum " "dimension of the frame (i.e. 1280x720 = 720). If the original face extract " "box is smaller than the minimum dimension times this multiplier, it is " "considered a false positive and discarded. Faces which are found to be " "unusually smaller than the frame tend to be misaligned images, except in " "extreme long-shots. These can be usually be safely discarded.") self.add_item( section=section, title="aligner_max_scale", datatype=float, min_max=(0.0, 10.0), rounding=2, default=2.00, group="filters", info="Filters out faces above this size. This is a multiplier of the minimum " "dimension of the frame (i.e. 1280x720 = 720). If the original face extract " "box is larger than the minimum dimension times this multiplier, it is " "considered a false positive and discarded. Faces which are found to be " "unusually larger than the frame tend to be misaligned images except in extreme " "close-ups. These can be usually be safely discarded.") self.add_item( section=section, title="aligner_distance", datatype=float, min_max=(0.0, 25.0), rounding=1, default=16, group="filters", info="Filters out faces who's landmarks are above this distance from an 'average' " "face. Values above 16 tend to be fairly safe. Values above 10 will remove more " "false positives, but may also filter out some faces at extreme angles.")
plugins/extract/_config.py
322
faceswap
{ "docstring": "\n Set the global options for extract\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
288
Python
148
a8f22cc019d56cec18ccd8223587d97dc4b37d04
_config.py
101,643
53
206
set_globals
https://github.com/deepfakes/faceswap.git
Extract updates: - Default CPU detector to MTCNN - add basic Aligner false positive filters - Typing: align + plugins - Use specific AlignerBatch class for alignment -
914
0
21,051
10
2
8
def load(cls): urls = cls.get_urls() # Remove songs without id # and create Song objects tracks = [Song.from_url(url) for url in urls] return cls(tracks)
spotdl/types/saved.py
56
spotify-downloader
{ "docstring": "\n Loads saved tracks from Spotify.\n Will throw an exception if users is not logged in.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
24
Python
22
fa2ad657482aca9dc628e6d7062b8badf2706bb6
saved.py
30,105
4
32
load
https://github.com/spotDL/spotify-downloader.git
v4 init
66
0
5,314
9
1
4
def print_help(self): help_text = print(help_text)
gamestonk_terminal/cryptocurrency/overview/overview_controller.py
27
OpenBBTerminal
{ "docstring": "Print help\nOverview Menu:\n\nCoinGecko:\n cgglobal global crypto market info\n cgnews last news available on CoinGecko\n cgdefi global DeFi market info\n cgstables stablecoins\n cgnft non fungible token market status\n cgnftday non fungible token of the day\n cgexchanges top crypto exchanges\n cgexrates coin exchange rates\n cgplatforms crypto financial platforms\n cgproducts crypto financial products\n cgindexes crypto indexes\n cgderivatives crypto derivatives\n cgcategories crypto categories\n cghold ethereum, bitcoin holdings overview statistics\nCoinPaprika:\n cpglobal global crypto market info\n cpinfo basic info about all coins available on CoinPaprika\n cpmarkets market related info about all coins available on CoinPaprika\n cpexchanges list all exchanges\n cpexmarkets all available markets on given exchange\n cpplatforms list blockchain platforms eg. ethereum, solana, kusama, terra\n cpcontracts all smart contracts for given platform\nCoinbase:\n cbpairs info about available trading pairs on Coinbase\nCryptoPanic:\n news recent crypto news from CryptoPanic aggregator\nWithdrawalFees:\n wf overall withdrawal fees\n ewf overall exchange withdrawal fees\n wfpe crypto withdrawal fees per exchange\nBlockchainCenter:\n altindex displays altcoin season index (if 75% of top 50 coins perform better than btc)\n", "language": "en", "n_whitespaces": 482, "n_words": 168, "vocab_size": 110 }
5
Python
5
18c3a4e5f69de5909fd3f516e54855b938bda51f
overview_controller.py
281,209
39
13
print_help
https://github.com/OpenBB-finance/OpenBBTerminal.git
Feature (crypto): Altcoin season index (#1155) * adding blockchaincenter model * added altindex feature * fix tests name * added autocompletion and fixed chart * fixed help strings and chart issue * refactor for subplot * changed dates to more readable format
27
0
83,615
7
13
40
def get_data(): sales_order_entry = frappe.db.sql( , as_dict=1, ) sales_orders = [row.name for row in sales_order_entry] mr_records = frappe.get_all( "Material Request Item", {"sales_order": ("in", sales_orders), "docstatus": 1}, ["parent", "qty", "sales_order", "item_code"], ) bundled_item_map = get_packed_items(sales_orders) item_with_product_bundle = get_items_with_product_bundle( [row.item_code for row in sales_order_entry] ) materials_request_dict = {} for record in mr_records: key = (record.sales_order, record.item_code) if key not in materials_request_dict: materials_request_dict.setdefault(key, {"qty": 0, "material_requests": [record.parent]}) details = materials_request_dict.get(key) details["qty"] += record.qty if record.parent not in details.get("material_requests"): details["material_requests"].append(record.parent) pending_so = [] for so in sales_order_entry: if so.item_code not in item_with_product_bundle: material_requests_against_so = materials_request_dict.get((so.name, so.item_code)) or {} # check for pending sales order if flt(so.total_qty) > flt(material_requests_against_so.get("qty")): so_record = { "item_code": so.item_code, "item_name": so.item_name, "description": so.description, "sales_order_no": so.name, "date": so.transaction_date, "material_request": ",".join(material_requests_against_so.get("material_requests", [])), "customer": so.customer, "territory": so.territory, "so_qty": so.total_qty, "requested_qty": material_requests_against_so.get("qty"), "pending_qty": so.total_qty - flt(material_requests_against_so.get("qty")), "company": so.company, } pending_so.append(so_record) else: for item in bundled_item_map.get((so.name, so.item_code), []): material_requests_against_so = materials_request_dict.get((so.name, item.item_code)) or {} if flt(item.qty) > flt(material_requests_against_so.get("qty")): so_record = { "item_code": item.item_code, "item_name": item.item_name, "description": item.description, "sales_order_no": so.name, "date": so.transaction_date, "material_request": ",".join(material_requests_against_so.get("material_requests", [])), "customer": so.customer, "territory": so.territory, "so_qty": item.qty, "requested_qty": material_requests_against_so.get("qty", 0), "pending_qty": item.qty - flt(material_requests_against_so.get("qty", 0)), "company": so.company, } pending_so.append(so_record) return pending_so
erpnext/selling/report/pending_so_items_for_purchase_request/pending_so_items_for_purchase_request.py
832
erpnext
{ "docstring": "\n\t\tSELECT\n\t\t\tso_item.item_code,\n\t\t\tso_item.item_name,\n\t\t\tso_item.description,\n\t\t\tso.name,\n\t\t\tso.transaction_date,\n\t\t\tso.customer,\n\t\t\tso.territory,\n\t\t\tsum(so_item.qty) as total_qty,\n\t\t\tso.company\n\t\tFROM `tabSales Order` so, `tabSales Order Item` so_item\n\t\tWHERE\n\t\t\tso.docstatus = 1\n\t\t\tand so.name = so_item.parent\n\t\t\tand so.status not in (\"Closed\",\"Completed\",\"Cancelled\")\n\t\tGROUP BY\n\t\t\tso.name,so_item.item_code\n\t\t", "language": "en", "n_whitespaces": 20, "n_words": 36, "vocab_size": 33 }
189
Python
122
494bd9ef78313436f0424b918f200dab8fc7c20b
pending_so_items_for_purchase_request.py
67,421
82
501
get_data
https://github.com/frappe/erpnext.git
style: format code with black
124
0
14,520
23
1
5
def test_validate_subscription_query_valid_with_fragment(): result = validate_subscription_query(TEST_VALID_SUBSCRIPTION_QUERY_WITH_FRAGMENT) assert result is True TEST_INVALID_MULTIPLE_QUERY_AND_SUBSCRIPTION =
saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscription.py
31
saleor
{ "docstring": "\nquery{\n products(first:100){\n edges{\n node{\n id\n }\n }\n }\n}\nsubscription{\n event{\n ...on ProductUpdated{\n product{\n id\n }\n }\n }\n}", "language": "en", "n_whitespaces": 65, "n_words": 19, "vocab_size": 11 }
11
Python
9
aca6418d6c36956bc1ab530e6ef7e146ec9df90c
test_create_deliveries_for_subscription.py
26,494
3
14
test_validate_subscription_query_valid_with_fragment
https://github.com/saleor/saleor.git
Add Webhook payload via graphql subscriptions (#9394) * Add PoC of webhook subscriptions * add async webhooks subscription payloads feature * remove unneeded file * add translations subscription handling, fixes after review * remove todo * add descriptions * add descriptions, move subsrciption_payloads.py * refactor * fix imports, add changelog * check_document_is_single_subscription refactor Co-authored-by: Maciej Korycinski <[email protected]> Co-authored-by: Marcin Gębala <[email protected]>
16
0
5,023
8
1
8
def sample_with_count(self) -> Tuple[SampleBatchType, int]: batch = self.sample() return batch, batch.count
rllib/evaluation/rollout_worker.py
43
ray
{ "docstring": "Same as sample() but returns the count as a separate value.\n\n Returns:\n A columnar batch of experiences (e.g., tensors) and the\n size of the collected batch.\n\n Examples:\n >>> import gym\n >>> from ray.rllib.evaluation.rollout_worker import RolloutWorker\n >>> from ray.rllib.algorithms.pg.pg_tf_policy import PGTF1Policy\n >>> worker = RolloutWorker( # doctest: +SKIP\n ... env_creator=lambda _: gym.make(\"CartPole-v0\"), # doctest: +SKIP\n ... policy_spec=PGTFPolicy) # doctest: +SKIP\n >>> print(worker.sample_with_count()) # doctest: +SKIP\n (SampleBatch({\"obs\": [...], \"action\": [...], ...}), 3)\n ", "language": "en", "n_whitespaces": 209, "n_words": 70, "vocab_size": 48 }
11
Python
11
b383d987d161fee39fafe873c0822f4ea6ea02eb
rollout_worker.py
124,753
19
26
sample_with_count
https://github.com/ray-project/ray.git
[RLlib] Fix a bunch of issues related to connectors. (#26510)
32
0
27,674
8
3
13
def overrides(cls, **kwargs): default_config = cls() config_overrides = {} for key, value in kwargs.items(): if not hasattr(default_config, key): raise KeyError( f"Invalid property name {key} for config class {cls.__name__}!" ) # Allow things like "lambda" as well. key = cls._translate_special_keys(key, warn_deprecated=True) config_overrides[key] = value return config_overrides
rllib/algorithms/algorithm_config.py
116
ray
{ "docstring": "Generates and validates a set of config key/value pairs (passed via kwargs).\n\n Validation whether given config keys are valid is done immediately upon\n construction (by comparing against the properties of a default AlgorithmConfig\n object of this class).\n Allows combination with a full AlgorithmConfig object to yield a new\n AlgorithmConfig object.\n\n Used anywhere, we would like to enable the user to only define a few config\n settings that would change with respect to some main config, e.g. in multi-agent\n setups and evaluation configs.\n\n Examples:\n >>> from ray.rllib.algorithms.ppo import PPOConfig\n >>> from ray.rllib.policy.policy import PolicySpec\n >>> config = (\n ... PPOConfig()\n ... .multi_agent(\n ... policies={\n ... \"pol0\": PolicySpec(config=PPOConfig.overrides(lambda_=0.95))\n ... },\n ... )\n ... )\n\n >>> from ray.rllib.algorithms.algorithm_config import AlgorithmConfig\n >>> from ray.rllib.algorithms.pg import PGConfig\n >>> config = (\n ... PGConfig()\n ... .evaluation(\n ... evaluation_num_workers=1,\n ... evaluation_interval=1,\n ... evaluation_config=AlgorithmConfig.overrides(explore=False),\n ... )\n ... )\n\n Returns:\n A dict mapping valid config property-names to values.\n\n Raises:\n KeyError: In case a non-existing property name (kwargs key) is being\n passed in. Valid property names are taken from a default AlgorithmConfig\n object of `cls`.\n ", "language": "en", "n_whitespaces": 599, "n_words": 175, "vocab_size": 112 }
45
Python
39
794cfd9725b4dc113aa50e60428367b15e921514
algorithm_config.py
137,274
11
64
overrides
https://github.com/ray-project/ray.git
[RLlib] `AlgorithmConfig.overrides()` to replace `multiagent->policies->config` and `evaluation_config` dicts. (#30879)
173
0
31,119
14
1
67
def setup_axes3(fig, rect): # rotate a bit for better orientation tr_rotate = Affine2D().translate(-95, 0) # scale degree to radians tr_scale = Affine2D().scale(np.pi/180., 1.) tr = tr_rotate + tr_scale + PolarAxes.PolarTransform() grid_locator1 = angle_helper.LocatorHMS(4) tick_formatter1 = angle_helper.FormatterHMS() grid_locator2 = MaxNLocator(3) # Specify theta limits in degrees ra0, ra1 = 8.*15, 14.*15 # Specify radial limits cz0, cz1 = 0, 14000 grid_helper = floating_axes.GridHelperCurveLinear( tr, extremes=(ra0, ra1, cz0, cz1), grid_locator1=grid_locator1, grid_locator2=grid_locator2, tick_formatter1=tick_formatter1, tick_formatter2=None) ax1 = fig.add_subplot( rect, axes_class=floating_axes.FloatingAxes, grid_helper=grid_helper) # adjust axis ax1.axis["left"].set_axis_direction("bottom") ax1.axis["right"].set_axis_direction("top") ax1.axis["bottom"].set_visible(False) ax1.axis["top"].set_axis_direction("bottom") ax1.axis["top"].toggle(ticklabels=True, label=True) ax1.axis["top"].major_ticklabels.set_axis_direction("top") ax1.axis["top"].label.set_axis_direction("top") ax1.axis["left"].label.set_text(r"cz [km$^{-1}$]") ax1.axis["top"].label.set_text(r"$\alpha_{1950}$") ax1.grid() # create a parasite axes whose transData in RA, cz aux_ax = ax1.get_aux_axes(tr) aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax ax1.patch.zorder = 0.9 # but this has a side effect that the patch is # drawn twice, and possibly over some other # artists. So, we decrease the zorder a bit to # prevent this. return ax1, aux_ax ########################################################## fig = plt.figure(figsize=(8, 4)) fig.subplots_adjust(wspace=0.3, left=0.05, right=0.95) ax1, aux_ax1 = setup_axes1(fig, 131) aux_ax1.bar([0, 1, 2, 3], [3, 2, 1, 3]) ax2, aux_ax2 = setup_axes2(fig, 132) theta = np.random.rand(10)*.5*np.pi radius = np.random.rand(10) + 1. aux_ax2.scatter(theta, radius) ax3, aux_ax3 = setup_axes3(fig, 133) theta = (8 + np.random.rand(10)*(14 - 8))*15. # in degrees radius = np.random.rand(10)*14000. aux_ax3.scatter(theta, radius) plt.show()
examples/axisartist/demo_floating_axes.py
732
matplotlib
{ "docstring": "\n Sometimes, things like axis_direction need to be adjusted.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
214
Python
152
f34d0b9fb38b813eef3eb0de0d424860f9b3b102
demo_floating_axes.py
108,976
31
293
setup_axes3
https://github.com/matplotlib/matplotlib.git
Display grid in floating axes example. This is the only full featured example with floating axes, so displaying grids makes it easier to check that grids are indeed working.
347
0
23,409
11
2
17
def _setup_boto_session(self) -> None: if self.use_aws_account: self._boto_session = boto3session.Session( aws_access_key_id=self._provider.get("aws_access_key_id"), aws_secret_access_key=self._provider.get("aws_secret_access_key"), ) self._boto_s3_resource = make_s3_resource(self._provider, session=self._boto_session) else: self._boto_session = boto3session.Session() self._boto_s3_resource = make_s3_resource(self._provider, config=Config(signature_version=UNSIGNED), session=self._boto_session)
airbyte-integrations/connectors/source-s3/source_s3/s3file.py
155
airbyte
{ "docstring": "\n Making a new Session at file level rather than stream level as boto3 sessions are NOT thread-safe.\n Currently grabbing last_modified across multiple files asynchronously and may implement more multi-threading in future.\n See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html (anchor link broken, scroll to bottom)\n ", "language": "en", "n_whitespaces": 68, "n_words": 39, "vocab_size": 38 }
25
Python
18
91eff1dffdb04be968b6ee4ef8d8bbfeb2e882d0
s3file.py
3,612
15
96
_setup_boto_session
https://github.com/airbytehq/airbyte.git
🐛 Source S3: Loading of files' metadata (#8252)
131
0
497
15
1
11
def transpose(x): return tf.compat.v1.transpose(x) @keras_export("keras.backend.gather") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
keras/backend.py
59
@keras_export("keras.backend.gather") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
keras
{ "docstring": "Transposes a tensor and returns it.\n\n Args:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n\n Examples:\n\n >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]])\n >>> tf.keras.backend.eval(var)\n array([[1., 2., 3.],\n [4., 5., 6.]], dtype=float32)\n >>> var_transposed = tf.keras.backend.transpose(var)\n >>> tf.keras.backend.eval(var_transposed)\n array([[1., 4.],\n [2., 5.],\n [3., 6.]], dtype=float32)\n >>> input = tf.keras.backend.placeholder((2, 3))\n >>> input\n <KerasTensor: shape=(2, 3) dtype=float32 ...>\n >>> input_transposed = tf.keras.backend.transpose(input)\n >>> input_transposed\n <KerasTensor: shape=(3, 2) dtype=float32 ...>\n ", "language": "en", "n_whitespaces": 168, "n_words": 69, "vocab_size": 51 }
7
Python
7
84afc5193d38057e2e2badf9c889ea87d80d8fbf
backend.py
269,611
2
17
transpose
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
10
1
80,230
9
2
9
def test_csrf_cookie_bad_or_missing_token(self): cases = [ (None, None, REASON_CSRF_TOKEN_MISSING), (16 * "a", None, "CSRF token from POST has incorrect length."), (64 * "*", None, "CSRF token from POST has invalid characters."), (64 * "a", None, "CSRF token from POST incorrect."), ( None, 16 * "a", "CSRF token from the 'X-Csrftoken' HTTP header has incorrect length.", ), ( None, 64 * "*", "CSRF token from the 'X-Csrftoken' HTTP header has invalid characters.", ), ( None, 64 * "a", "CSRF token from the 'X-Csrftoken' HTTP header incorrect.", ), ] for post_token, meta_token, expected in cases: with self.subTest(post_token=post_token, meta_token=meta_token): self._check_bad_or_missing_token( expected, post_token=post_token, meta_token=meta_token, )
tests/csrf_tests/tests.py
184
django
{ "docstring": "\n If a CSRF cookie is present but the token is missing or invalid, the\n middleware rejects the incoming request.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 16 }
100
Python
49
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
202,390
29
119
test_csrf_cookie_bad_or_missing_token
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
471
0
50,101
12
1
4
def get(self) -> Any: raise NotImplementedError()
nni/common/serializer.py
23
nni
{ "docstring": "\n Get the original object. Usually used together with ``trace_copy``.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
6
Python
6
2d8f925b5ac558c45589bd90324efc86a568539e
serializer.py
112,291
5
12
get
https://github.com/microsoft/nni.git
Bug fix of Retiarii hyperparameter mutation (#4751)
20
0
24,626
7
2
8
def find_all_matches(self, sources=None, finder=None): # type: (Optional[List[Dict[S, Union[S, bool]]]], Optional[PackageFinder]) -> List[InstallationCandidate] from .dependencies import find_all_matches, get_finder if not finder: _, finder = get_finder(sources=sources) return find_all_matches(finder, self.as_ireq())
pipenv/vendor/requirementslib/models/requirements.py
77
pipenv
{ "docstring": "Find all matching candidates for the current requirement.\n\n Consults a finder to find all matching candidates.\n\n :param sources: Pipfile-formatted sources, defaults to None\n :param sources: list[dict], optional\n :param PackageFinder finder: A **PackageFinder** instance from pip's repository implementation\n :return: A list of Installation Candidates\n :rtype: list[ :class:`~pipenv.patched.pip._internal.index.InstallationCandidate` ]\n ", "language": "en", "n_whitespaces": 96, "n_words": 47, "vocab_size": 40 }
27
Python
27
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
requirements.py
22,233
5
46
find_all_matches
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
73
0
4,277
11
1
10
def test_basic_add_GET(self): response = self.client.get(reverse("admin:admin_views_section_add")) self.assertIsInstance(response, TemplateResponse) self.assertEqual(response.status_code, 200)
tests/admin_views/tests.py
63
django
{ "docstring": "\n A smoke test to ensure GET on the add_view works.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
9
Python
9
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,827
4
37
test_basic_add_GET
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
37
0
52,119
11
1
2
def test_create_only_default_callable_sets_context(self):
tests/test_fields.py
13
django-rest-framework
{ "docstring": "\n CreateOnlyDefault instances with a callable default should set context\n on the callable if possible\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
2
Python
2
8b2ccccbe53f855fd9ee9a06e7b7997270e26dda
test_fields.py
48,642
9
63
test_create_only_default_callable_sets_context
https://github.com/encode/django-rest-framework.git
Stop calling `set_context`, planned for 3.13 drop (#8589) Per the deprecation warnings (which have been raised since DRF 3.11), `set_context()` was planned not to be supported in DRF 3.13. I think we can safely delete it, in favor of `requires_context`. From the 3.11 announcement: > Previous our approach to this was that implementations could include a > `set_context` method, which would be called prior to validation. However > this approach had issues with potential race conditions. We have now > move this approach into a pending deprecation state. It will continue to > function, but will be escalated to a deprecated state in 3.12, and > removed entirely in 3.13. Why keep `RemovedInDRF313Warning` around? ========================================= It's a bit odd that version 3.13 includes an exception class describing things which are to be deleted in 3.13, but I've opted to keep the (now unreferenced) class around, for fear of breaking others' setup. (For example, if projects have a `filterwarnings` setup meant to intercept `rest_framework.RemovedInDRF313Warning`, an error will be thrown due to an unresolvable reference).
9
0
9,552
6
2
29
def test_barcode_splitter_legacy_fallback(self): test_file = os.path.join( self.BARCODE_SAMPLE_DIR, "patch-code-t-middle.pdf", ) tempdir = tempfile.mkdtemp(prefix="paperless-", dir=settings.SCRATCH_DIR) pdf_file, separator_page_numbers = barcodes.scan_file_for_separating_barcodes( test_file, ) self.assertEqual(test_file, pdf_file) self.assertTrue(len(separator_page_numbers) > 0) document_list = barcodes.separate_pages(test_file, separator_page_numbers) self.assertTrue(document_list) for document in document_list: barcodes.save_to_dir(document, target_dir=tempdir) target_file1 = os.path.join(tempdir, "patch-code-t-middle_document_0.pdf") target_file2 = os.path.join(tempdir, "patch-code-t-middle_document_1.pdf") self.assertTrue(os.path.isfile(target_file1)) self.assertTrue(os.path.isfile(target_file2))
src/documents/tests/test_barcodes.py
239
paperless-ngx
{ "docstring": "\n GIVEN:\n - File containing barcode\n - Legacy method of detection is enabled\n WHEN:\n - File is scanned for barcodes\n THEN:\n - Barcodes are properly detected\n ", "language": "en", "n_whitespaces": 98, "n_words": 25, "vocab_size": 20 }
44
Python
37
f8ce6285df44cc580319c370a9d76149012615b1
test_barcodes.py
320,165
19
148
test_barcode_splitter_legacy_fallback
https://github.com/paperless-ngx/paperless-ngx.git
Allows using pdf2image instead of pikepdf if desired
193
0
117,082
10
1
2
def selected(self): return self["selected"]
packages/python/plotly/plotly/graph_objs/_bar.py
22
plotly.py
{ "docstring": "\n The 'selected' property is an instance of Selected\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.bar.Selected`\n - A dict of string/value properties that will be passed\n to the Selected constructor\n\n Supported dict properties:\n\n marker\n :class:`plotly.graph_objects.bar.selected.Marke\n r` instance or dict with compatible properties\n textfont\n :class:`plotly.graph_objects.bar.selected.Textf\n ont` instance or dict with compatible\n properties\n\n Returns\n -------\n plotly.graph_objs.bar.Selected\n ", "language": "en", "n_whitespaces": 264, "n_words": 56, "vocab_size": 39 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_bar.py
226,189
2
11
selected
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
57,862
7
3
39
def test_learning_curve_display_default_usage(pyplot, data): X, y = data estimator = DecisionTreeClassifier(random_state=0) train_sizes = [0.3, 0.6, 0.9] display = LearningCurveDisplay.from_estimator( estimator, X, y, train_sizes=train_sizes ) import matplotlib as mpl assert display.errorbar_ is None assert isinstance(display.lines_, list) for line in display.lines_: assert isinstance(line, mpl.lines.Line2D) assert isinstance(display.fill_between_, list) for fill in display.fill_between_: assert isinstance(fill, mpl.collections.PolyCollection) assert fill.get_alpha() == 0.5 assert display.score_name == "Score" assert display.ax_.get_xlabel() == "Number of samples in the training set" assert display.ax_.get_ylabel() == "Score" _, legend_labels = display.ax_.get_legend_handles_labels() assert legend_labels == ["Testing metric"] train_sizes_abs, train_scores, test_scores = learning_curve( estimator, X, y, train_sizes=train_sizes ) assert_array_equal(display.train_sizes, train_sizes_abs) assert_allclose(display.train_scores, train_scores) assert_allclose(display.test_scores, test_scores)
sklearn/model_selection/tests/test_plot.py
313
scikit-learn
{ "docstring": "Check the default usage of the LearningCurveDisplay class.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
98
Python
68
758fe0d9c72ba343097003e7992c9239e58bfc63
test_plot.py
261,652
27
211
test_learning_curve_display_default_usage
https://github.com/scikit-learn/scikit-learn.git
FEA add LearningCurveDisplay to show plot learning curve (#24084) Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Arturo Amor <[email protected]>
199
0
76,917
11
7
14
def _get_partition_size_along_axis(self, partition, axis=0): if isinstance(partition, self._partition_mgr_cls._partition_class): return [ partition.apply( lambda df: len(df) if not axis else len(df.columns) )._data ] elif partition.axis == axis: return [ ptn.apply(lambda df: len(df) if not axis else len(df.columns))._data for ptn in partition.list_of_partitions_to_combine ] return [ partition.list_of_partitions_to_combine[0] .apply(lambda df: len(df) if not axis else (len(df.columns))) ._data ]
modin/core/execution/dask/implementations/pandas_on_dask/dataframe/dataframe.py
193
modin
{ "docstring": "\n Compute the length along the specified axis of the specified partition.\n\n Parameters\n ----------\n partition : ``PandasOnDaskDataframeVirtualPartition`` or ``PandasOnDaskDataframePartition``\n The partition whose size to compute.\n axis : int, default: 0\n The axis along which to compute size.\n\n Returns\n -------\n list\n A list of lengths along the specified axis that sum to the overall length of the partition\n along the specified axis.\n\n Notes\n -----\n This utility function is used to ensure that computation occurs asynchronously across all partitions\n whether the partitions are virtual or physical partitions.\n ", "language": "en", "n_whitespaces": 220, "n_words": 84, "vocab_size": 54 }
52
Python
33
a7354c9ca76525a265da98f2afe882c53f378840
dataframe.py
153,953
17
125
_get_partition_size_along_axis
https://github.com/modin-project/modin.git
FEAT-#4419: Extend virtual partitioning API to pandas on Dask (#4420) Signed-off-by: Rehan Durrani <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]>
243
0
35,721
17
3
17
def _projections(self): from sympy.vector.operators import _get_coord_systems if isinstance(self, VectorZero): return (S.Zero, S.Zero, S.Zero) base_vec = next(iter(_get_coord_systems(self))).base_vectors() return tuple([self.dot(i) for i in base_vec])
sympy/vector/vector.py
106
sympy
{ "docstring": "\n Returns the components of this vector but the output includes\n also zero values components.\n\n Examples\n ========\n\n >>> from sympy.vector import CoordSys3D, Vector\n >>> C = CoordSys3D('C')\n >>> v1 = 3*C.i + 4*C.j + 5*C.k\n >>> v1._projections\n (3, 4, 5)\n >>> v2 = C.x*C.y*C.z*C.i\n >>> v2._projections\n (C.x*C.y*C.z, 0, 0)\n >>> v3 = Vector.zero\n >>> v3._projections\n (0, 0, 0)\n ", "language": "en", "n_whitespaces": 170, "n_words": 57, "vocab_size": 43 }
22
Python
21
975df9b627556d176039ba3a0f3a2e3a3df9686c
vector.py
196,456
6
68
_projections
https://github.com/sympy/sympy.git
Fixed removals not fully performed earlier
68
0
47,938
14
2
4
def download_and_preprocess_ecosystem_docs(): import urllib.request import requests
doc/source/custom_directives.py
22
ray
{ "docstring": "\n This function downloads markdown readme files for various\n ecosystem libraries, saves them in specified locations and preprocesses\n them before sphinx build starts.\n\n If you have ecosystem libraries that live in a separate repo from Ray,\n adding them here will allow for their docs to be present in Ray docs\n without the need for duplicate files. For more details, see ``doc/README.md``.\n ", "language": "en", "n_whitespaces": 82, "n_words": 60, "vocab_size": 52 }
6
Python
5
756d08cd31b71f3654b8ca732c961e8cd9afe71d
custom_directives.py
147,565
8
33
download_and_preprocess_ecosystem_docs
https://github.com/ray-project/ray.git
[docs] Add support for external markdown (#23505) This PR fixes the issue of diverging documentation between Ray Docs and ecosystem library readmes which live in separate repos (eg. xgboost_ray). This is achieved by adding an extra step before the docs build process starts that downloads the readmes of specified ecosystem libraries from their GitHub repositories. The files are then preprocessed by a very simple parser to allow for differences between GitHub and Docs markdowns. In summary, this makes the markdown files in ecosystem library repositories single sources of truth and removes the need to manually keep the doc pages up to date, all the while allowing for differences between what's rendered on GitHub and in the Docs. See ray-project/xgboost_ray#204 & https://ray--23505.org.readthedocs.build/en/23505/ray-more-libs/xgboost-ray.html for an example. Needs ray-project/xgboost_ray#204 and ray-project/lightgbm_ray#30 to be merged first.
15
0
34,004
6
1
2
def close(self): # XXX: Should have a connect too? # def connect(self): #
salt/transport/base.py
18
salt
{ "docstring": "\n Close the connection.\n \n # Connect to the server / broker.\n # ", "language": "en", "n_whitespaces": 39, "n_words": 11, "vocab_size": 9 }
13
Python
10
ab4803984bce4a4de7cc10910e7310c4babf557e
base.py
215,394
1
6
close
https://github.com/saltstack/salt.git
Start to add base class defs
34
0
53,945
6
6
22
def adjust_legend_subtitles(legend): # Legend title not in rcParams until 3.0 font_size = plt.rcParams.get("legend.title_fontsize", None) hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children() for hpack in hpackers: draw_area, text_area = hpack.get_children() handles = draw_area.get_children() if not all(artist.get_visible() for artist in handles): draw_area.set_width(0) for text in text_area.get_children(): if font_size is not None: text.set_size(font_size)
seaborn/utils.py
165
seaborn
{ "docstring": "\n Make invisible-handle \"subtitles\" entries look more like titles.\n\n Note: This function is not part of the public API and may be changed or removed.\n\n ", "language": "en", "n_whitespaces": 34, "n_words": 24, "vocab_size": 24 }
46
Python
34
6460a21555ba6557e1f6f06f4d677d9c19148169
utils.py
42,077
11
100
adjust_legend_subtitles
https://github.com/mwaskom/seaborn.git
Workaround for matplotlib rc_context issue (#2925) * Workaround for matplotlib rc_context issue Fixes #2914 * Add some additional comments about this workaround
138
0
7,477
15
1
2
def ohlc(self): return self["ohlc"]
packages/python/plotly/plotly/graph_objs/layout/template/_data.py
22
plotly.py
{ "docstring": "\n The 'ohlc' property is a tuple of instances of\n Ohlc that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.layout.template.data.Ohlc\n - A list or tuple of dicts of string/value properties that\n will be passed to the Ohlc constructor\n\n Supported dict properties:\n\n Returns\n -------\n tuple[plotly.graph_objs.layout.template.data.Ohlc]\n ", "language": "en", "n_whitespaces": 131, "n_words": 48, "vocab_size": 33 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_data.py
232,559
2
11
ohlc
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
64,003
7
9
17
def detailed_match_files(patterns, files, all_matches=None): all_files = files if isinstance(files, Collection) else list(files) return_files = {} for pattern in patterns: if pattern.include is not None: result_files = pattern.match(all_files) if pattern.include: # Add files and record pattern. for result_file in result_files: if result_file in return_files: if all_matches: return_files[result_file].patterns.append(pattern) else: return_files[result_file].patterns[0] = pattern else: return_files[result_file] = MatchDetail([pattern]) else: # Remove files. for file in result_files: del return_files[file] return return_files
python/ray/_private/thirdparty/pathspec/util.py
190
ray
{ "docstring": "\n Matches the files to the patterns, and returns which patterns matched\n the files.\n\n *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)\n contains the patterns to use.\n\n *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains\n the normalized file paths to be matched against *patterns*.\n\n *all_matches* (:class:`boot` or :data:`None`) is whether to return all\n matches patterns (:data:`True`), or only the last matched pattern\n (:data:`False`). Default is :data:`None` for :data:`False`.\n\n Returns the matched files (:class:`dict`) which maps each matched file\n (:class:`str`) to the patterns that matched in order (:class:`.MatchDetail`).\n ", "language": "en", "n_whitespaces": 116, "n_words": 79, "vocab_size": 52 }
66
Python
45
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
util.py
130,277
19
121
detailed_match_files
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
361
0
29,202
22
1
2
def columnordersrc(self): return self["columnordersrc"]
packages/python/plotly/plotly/graph_objs/_table.py
22
plotly.py
{ "docstring": "\n Sets the source reference on Chart Studio Cloud for\n `columnorder`.\n\n The 'columnordersrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 84, "n_words": 27, "vocab_size": 25 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_table.py
228,402
2
11
columnordersrc
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
60,075
7
1
7
def get_ugs() -> pd.DataFrame: return get_df( "https://finance.yahoo.com/screener/predefined/undervalued_growth_stocks" ) @log_start_end(log=logger)
openbb_terminal/stocks/discovery/yahoofinance_model.py
40
@log_start_end(log=logger)
OpenBBTerminal
{ "docstring": "Get stocks with earnings growth rates better than 25% and relatively low PE and PEG ratios.\n [Source: Yahoo Finance]\n\n Returns\n -------\n pd.DataFrame\n Undervalued stocks\n ", "language": "en", "n_whitespaces": 46, "n_words": 24, "vocab_size": 22 }
9
Python
9
bd12c203a0585dab6ca3ff81c3b4500e088b41d6
yahoofinance_model.py
285,143
12
14
get_ugs
https://github.com/OpenBB-finance/OpenBBTerminal.git
Fixed bad yfinance urls (#2282)
24
1
85,185
8
2
9
def ion(): stack = ExitStack() stack.callback(ion if isinteractive() else ioff) matplotlib.interactive(True) install_repl_displayhook() return stack
lib/matplotlib/pyplot.py
59
matplotlib
{ "docstring": "\n Enable interactive mode.\n\n See `.pyplot.isinteractive` for more details.\n\n See Also\n --------\n ioff : Disable interactive mode.\n isinteractive : Whether interactive mode is enabled.\n show : Show all figures (and maybe block).\n pause : Show all figures, and block for a time.\n\n Notes\n -----\n For a temporary change, this can be used as a context manager::\n\n # if interactive mode is off\n # then figures will not be shown on creation\n plt.ioff()\n # This figure will not be shown immediately\n fig = plt.figure()\n\n with plt.ion():\n # interactive mode will be on\n # figures will automatically be shown\n fig2 = plt.figure()\n # ...\n\n To enable optional usage as a context manager, this function returns a\n `~contextlib.ExitStack` object, which is not intended to be stored or\n accessed by the user.\n ", "language": "en", "n_whitespaces": 259, "n_words": 127, "vocab_size": 82 }
14
Python
13
2d918ba09155810194bb4ba136369082ad46c8c8
pyplot.py
109,119
6
33
ion
https://github.com/matplotlib/matplotlib.git
Simplify impl. of functions optionally used as context managers. We can actually just put the "exit" logic into an ExitStack callback. If the return value is never `__enter__`'d via a "with" statement, it is never `__exit__`'d either.
32
0
23,442
10
3
9
def format_coord(self, lon, lat): lon, lat = np.rad2deg([lon, lat]) ns = 'N' if lat >= 0.0 else 'S' ew = 'E' if lon >= 0.0 else 'W' return ('%f\N{DEGREE SIGN}%s, %f\N{DEGREE SIGN}%s' % (abs(lat), ns, abs(lon), ew))
examples/misc/custom_projection.py
102
matplotlib
{ "docstring": "\n Override this method to change how the values are displayed in\n the status bar.\n\n In this case, we want them to be displayed in degrees N/S/E/W.\n ", "language": "en", "n_whitespaces": 55, "n_words": 26, "vocab_size": 21 }
37
Python
29
075ff0952896f44d7d0b0b3318f0978ae53f84d7
custom_projection.py
108,004
6
66
format_coord
https://github.com/matplotlib/matplotlib.git
Small style fixes.
87
0
23,009
10
2
9
def call_with_layout(fn, layout, *args, **kwargs): if layout: with dtensor.run_on(layout): result = fn(*args, **kwargs) return dtensor.relayout(result, layout) return fn(*args, **kwargs)
keras/dtensor/utils.py
86
keras
{ "docstring": "Invoke the function with inputs and relayout the result.\n\n Args:\n fn: the function to invoke.\n layout: if not None, the output of the fn will be relayout with this.\n *args: positional arguments to be called with fn.\n **kwargs: keyword arguments to be called with fn.\n\n Returns:\n The output of fn, with potential relayout with the layout specified.\n ", "language": "en", "n_whitespaces": 75, "n_words": 57, "vocab_size": 35 }
19
Python
16
d56b634f711802ae88c277926b6634465f346275
utils.py
269,092
6
53
call_with_layout
https://github.com/keras-team/keras.git
Remove the @tf.function for the dtensor run_with_layout(). This was creating one tf.function per initializer, and causing function retracing. We only need this currently for Identity initializer, since tf.function will convert the tf.MatrixDiag to tf.constant. PiperOrigin-RevId: 433516308
35
0
79,890
13
3
28
def detection_evaluate(self, dataset, results, topk=20, eval_fn=None): if eval_fn is None: eval_fn = bbox_map_eval else: assert callable(eval_fn) prog_bar = mmcv.ProgressBar(len(results)) _mAPs = {} for i, (result, ) in enumerate(zip(results)): # self.dataset[i] should not call directly # because there is a risk of mismatch data_info = dataset.prepare_train_img(i) mAP = eval_fn(result, data_info['ann_info']) _mAPs[i] = mAP prog_bar.update() # descending select topk image _mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1])) good_mAPs = _mAPs[-topk:] bad_mAPs = _mAPs[:topk] return good_mAPs, bad_mAPs
tools/analysis_tools/analyze_results.py
219
mmdetection
{ "docstring": "Evaluation for object detection.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n results (list): Object detection results from test\n results pkl file.\n topk (int): Number of the highest topk and\n lowest topk after evaluation index sorting. Default: 20.\n eval_fn (callable, optional): Eval function, Default: None.\n\n Returns:\n tuple: A tuple contains good samples and bad samples.\n good_mAPs (dict[int, float]): A dict contains good\n samples's indices in dataset and model's\n performance on them.\n bad_mAPs (dict[int, float]): A dict contains bad\n samples's indices in dataset and model's\n performance on them.\n ", "language": "en", "n_whitespaces": 297, "n_words": 85, "vocab_size": 58 }
73
Python
58
f3a451abab8fc89810b317ca0a88ee9fd12cb0c2
analyze_results.py
244,298
16
136
detection_evaluate
https://github.com/open-mmlab/mmdetection.git
[Feature] Support panoptic segmentation result analysis (#7922) * support analyze panoptic segmentation result * fix lint * update docstring * update docstring * set print_log=False by default * update * fix bug 8035
238
0
70,313
13