complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
3
spinner
def spinner(text="In progress..."): import streamlit.legacy_caching.caching as legacy_caching import streamlit.caching as caching from streamlit.elements.utils import clean_text from streamlit.proto.Spinner_pb2 import Spinner as SpinnerProto # @st.cache optionally uses spinner for long-running computations. # Normally, streamlit warns the user when they call st functions # from within an @st.cache'd function. But we do *not* want to show # these warnings for spinner's message, so we create and mutate this # message delta within the "suppress_cached_st_function_warning" # context. with legacy_caching.suppress_cached_st_function_warning(): with caching.suppress_cached_st_function_warning(): message = empty() try: # Set the message 0.1 seconds in the future to avoid annoying # flickering if this spinner runs too quickly. DELAY_SECS = 0.1 display_message = True display_message_lock = _threading.Lock()
704eab3478cf69847825b23dabf15813a8ac9fa2
def spinner(text="In progress..."): """Temporarily displays a message while executing a block of code. Parameters ---------- text : str A message to display while executing that block Example ------- >>> with st.spinner('Wait for it...'): >>> time.sleep(5) >>> st.success('Done!') """ import streamlit.legacy_caching.caching as legacy_caching import streamlit.caching as caching from streamlit.elements.utils import clean_text from streamlit.proto.Spinner_pb2 import Spinner as SpinnerProto # @st.cache optionally uses spinner for long-running computations. # Normally, streamlit warns the user when they call st functions # from within an @st.cache'd function. But we do *not* want to show # these warnings for spinner's message, so we create and mutate this # message delta within the "suppress_cached_st_function_warning" # context. with legacy_caching.suppress_cached_st_function_warning(): with caching.suppress_cached_st_function_warning(): message = empty() try: # Set the message 0.1 seconds in the future to avoid annoying # flickering if this spinner runs too quickly. DELAY_SECS = 0.1 display_message = True display_message_lock = _threading.Lock()
11
__init__.py
132
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
26,274
1
202
124
80
118,532
110
streamlit
20
lib/streamlit/__init__.py
Python
22
{ "docstring": "Temporarily displays a message while executing a block of code.\n\n Parameters\n ----------\n text : str\n A message to display while executing that block\n\n Example\n -------\n\n >>> with st.spinner('Wait for it...'):\n >>> time.sleep(5)\n >>> st.success('Done!')\n\n ", "language": "en", "n_whitespaces": 72, "n_words": 34, "vocab_size": 27 }
https://github.com/streamlit/streamlit.git
3
process_queue
def process_queue(self): if not self.queue: logger.debug(f"No queued changes; aborting") return logger.debug(f"Processing {len(self.queue)} queued changes") # Iterate through the in-memory queue, creating Change instances changes = [] for key, change in self.queue.items(): logger.debug(f' {key}: {change}') object_type, pk = key action, data = change changes.append(StagedChange( branch=self.branch, action=action, object_type=object_type, object_id=pk, data=data )) # Save all Change instances to the database StagedChange.objects.bulk_create(changes) # # Signal handlers #
a5308ea28e851a4ddb65a4e7ca2297b641e5891f
13
staging.py
183
Closes #10851: New staging mechanism (#10890) * WIP * Convert checkout() context manager to a class * Misc cleanup * Drop unique constraint from Change model * Extend staging tests * Misc cleanup * Incorporate M2M changes * Don't cancel wipe out creation records when an object is deleted * Rename Change to StagedChange * Add documentation for change staging
78,296
0
281
98
52
266,109
63
netbox
20
netbox/netbox/staging.py
Python
18
{ "docstring": "\n Create Change instances for all actions stored in the queue.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/netbox-community/netbox.git
4
save
def save(self, commit=True): if self.errors: raise ValueError( "The %s could not be %s because the data didn't validate." % ( self.instance._meta.object_name, "created" if self.instance._state.adding else "changed", ) ) if commit: # If committing, save the instance and the m2m data immediately. self.instance.save() self._save_m2m() else: # If not committing, add a method to the form to allow deferred # saving of m2m data. self.save_m2m = self._save_m2m return self.instance save.alters_data = True
9c19aff7c7561e3a82978a272ecdaad40dda5c00
15
models.py
130
Refs #33476 -- Reformatted code with Black.
51,322
0
279
71
54
206,002
70
django
13
django/forms/models.py
Python
15
{ "docstring": "\n Save this form's self.instance object if commit=True. Otherwise, add\n a save_m2m() method to the form which can be called after the instance\n is saved manually at a later time. Return the model instance.\n ", "language": "en", "n_whitespaces": 62, "n_words": 33, "vocab_size": 30 }
https://github.com/django/django.git
14
solve
def solve(self, parameters=None, limit=None): self.pre_solve(parameters) coeff = self.coeff var = self.free_symbols if 1 in coeff: # negate coeff[] because input is of the form: ax + by + c == 0 # but is used as: ax + by == -c c = -coeff[1] else: c = 0 result = DiophantineSolutionSet(var, parameters=self.parameters) params = result.parameters if len(var) == 1: q, r = divmod(c, coeff[var[0]]) if not r: result.add((q,)) return result else: return result A = [coeff[v] for v in var] B = [] if len(var) > 2: B.append(igcd(A[-2], A[-1])) A[-2] = A[-2] // B[0] A[-1] = A[-1] // B[0] for i in range(len(A) - 3, 0, -1): gcd = igcd(B[0], A[i]) B[0] = B[0] // gcd A[i] = A[i] // gcd B.insert(0, gcd) B.append(A[-1]) solutions = [] for Ai, Bi in zip(A, B): tot_x, tot_y = [], [] for j, arg in enumerate(Add.make_args(c)): if arg.is_Integer: # example: 5 -> k = 5 k, p = arg, S.One pnew = params[0] else: # arg is a Mul or Symbol # example: 3*t_1 -> k = 3 # example: t_0 -> k = 1 k, p = arg.as_coeff_Mul() pnew = params[params.index(p) + 1] sol = sol_x, sol_y = base_solution_linear(k, Ai, Bi, pnew) if p is S.One: if None in sol: return result else: # convert a + b*pnew -> a*p + b*pnew if isinstance(sol_x, Add): sol_x = sol_x.args[0]*p + sol_x.args[1] if isinstance(sol_y, Add): sol_y = sol_y.args[0]*p + sol_y.args[1] tot_x.append(sol_x) tot_y.append(sol_y) solutions.append(Add(*tot_x)) c = Add(*tot_y) solutions.append(c) result.add(solutions) return result
bd9f607176c58dfba01e27c05c2b7d49ff97c901
19
diophantine.py
727
Improve loop performance in solvers
48,926
0
1,068
454
136
198,419
246
sympy
51
sympy/solvers/diophantine/diophantine.py
Python
148
{ "docstring": "\n base_solution_linear() can solve diophantine equations of the form:\n\n a*x + b*y == c\n\n We break down multivariate linear diophantine equations into a\n series of bivariate linear diophantine equations which can then\n be solved individually by base_solution_linear().\n\n Consider the following:\n\n a_0*x_0 + a_1*x_1 + a_2*x_2 == c\n\n which can be re-written as:\n\n a_0*x_0 + g_0*y_0 == c\n\n where\n\n g_0 == gcd(a_1, a_2)\n\n and\n\n y == (a_1*x_1)/g_0 + (a_2*x_2)/g_0\n\n This leaves us with two binary linear diophantine equations.\n For the first equation:\n\n a == a_0\n b == g_0\n c == c\n\n For the second:\n\n a == a_1/g_0\n b == a_2/g_0\n c == the solution we find for y_0 in the first equation.\n\n The arrays A and B are the arrays of integers used for\n 'a' and 'b' in each of the n-1 bivariate equations we solve.\n \n Consider the trivariate linear equation:\n\n 4*x_0 + 6*x_1 + 3*x_2 == 2\n\n This can be re-written as:\n\n 4*x_0 + 3*y_0 == 2\n\n where\n\n y_0 == 2*x_1 + x_2\n (Note that gcd(3, 6) == 3)\n\n The complete integral solution to this equation is:\n\n x_0 == 2 + 3*t_0\n y_0 == -2 - 4*t_0\n\n where 't_0' is any integer.\n\n Now that we have a solution for 'x_0', find 'x_1' and 'x_2':\n\n 2*x_1 + x_2 == -2 - 4*t_0\n\n We can then solve for '-2' and '-4' independently,\n and combine the results:\n\n 2*x_1a + x_2a == -2\n x_1a == 0 + t_0\n x_2a == -2 - 2*t_0\n\n 2*x_1b + x_2b == -4*t_0\n x_1b == 0*t_0 + t_1\n x_2b == -4*t_0 - 2*t_1\n\n ==>\n\n x_1 == t_0 + t_1\n x_2 == -2 - 6*t_0 - 2*t_1\n\n where 't_0' and 't_1' are any integers.\n\n Note that:\n\n 4*(2 + 3*t_0) + 6*(t_0 + t_1) + 3*(-2 - 6*t_0 - 2*t_1) == 2\n\n for any integral values of 't_0', 't_1'; as required.\n\n This method is generalised for many variables, below.\n\n ", "language": "en", "n_whitespaces": 695, "n_words": 307, "vocab_size": 153 }
https://github.com/sympy/sympy.git
1
disable_bracketed_paste
def disable_bracketed_paste(self) -> None: self.console.file.write("\x1b[?2004l") self.console.file.flush()
fe151a7f25cfd7f1134ebafbddc7eeade1c18ccb
9
driver.py
50
Support for bracketed paste mode (#567) * Detecting bracketed paste, sending paste events * Bracketed pasting support in TextInput * Restore debugging conditional * Handle pasting of text in text-input, improve scrolling * Fix ordering of handling in parser for bracketed pastes * Docstrings * Add docstrings
44,330
0
27
27
6
183,771
6
textual
6
src/textual/driver.py
Python
5
{ "docstring": "Write the ANSI escape code `ESC[?2004l`, which\n disables bracketed paste mode.", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 11 }
https://github.com/Textualize/textual.git
2
get_training_arguments
def get_training_arguments(self) -> transformers.training_args.TrainingArguments: with self.as_directory() as checkpoint_path: training_args_path = os.path.join(checkpoint_path, TRAINING_ARGS_NAME) if os.path.exists(training_args_path): with open(training_args_path, "rb") as f: training_args = torch.load(f, map_location="cpu") else: training_args = None return training_args
ac1d21027da8a8c002cc7c28b8d1dc89c0d72fcf
16
huggingface_checkpoint.py
126
[AIR] Add framework-specific checkpoints (#26777)
27,836
0
132
72
23
125,331
29
ray
18
python/ray/train/huggingface/huggingface_checkpoint.py
Python
10
{ "docstring": "Retrieve the training arguments stored in this checkpoint.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/ray-project/ray.git
1
all_bursa
def all_bursa(): path = os.path.join(os.path.dirname(__file__), "data/bursa_open_hours.json") bursa = pd.read_json(path) # , orient="index") return bursa
33a041e5bf93ce93ab1a19adbc5ed74c2f1eb337
11
bursa_model.py
60
Trading hours stock feature (#1697)
84,728
0
27
34
12
284,458
14
OpenBBTerminal
9
openbb_terminal/stocks/tradinghours/bursa_model.py
Python
4
{ "docstring": "Get all exchanges from dictionary\n\n Parameters\n __________\n\n Returns\n _______\n pd.DataFrame\n All exchanges\n ", "language": "en", "n_whitespaces": 37, "n_words": 12, "vocab_size": 11 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
3
get_git_changeset
def get_git_changeset(): # Repository may not be found if __file__ is undefined, e.g. in a frozen # module. if "__file__" not in globals(): return None repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) git_log = subprocess.run( "git log --pretty=format:%ct --quiet -1 HEAD", capture_output=True, shell=True, cwd=repo_dir, text=True, ) timestamp = git_log.stdout tz = datetime.timezone.utc try: timestamp = datetime.datetime.fromtimestamp(int(timestamp), tz=tz) except ValueError: return None return timestamp.strftime("%Y%m%d%H%M%S") version_component_re = _lazy_re_compile(r"(\d+|[a-z]+|\.)")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
version.py
188
Refs #33476 -- Reformatted code with Black.
51,708
0
153
107
49
206,790
62
django
27
django/utils/version.py
Python
18
{ "docstring": "Return a numeric identifier of the latest git changeset.\n\n The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.\n This value isn't guaranteed to be unique, but collisions are very unlikely,\n so it's sufficient for generating the development version numbers.\n ", "language": "en", "n_whitespaces": 54, "n_words": 42, "vocab_size": 38 }
https://github.com/django/django.git
1
tournament_matrix
def tournament_matrix(G): r A = nx.adjacency_matrix(G) return A - A.T @not_implemented_for("undirected") @not_implemented_for("multigraph")
8a325d26aa7fdd3a72580c4720fa97f971bbefcb
@not_implemented_for("undirected") @not_implemented_for("multigraph")
8
tournament.py
56
Use scipy.sparse array datastructure (#6037) * Use scipy.sparse array datastructure * Add reminder to rm wrapper when scipy adds creation fns. * Rm mention of np matrix from code comment. * Update networkx/algorithms/bipartite/matrix.py Co-authored-by: Stefan van der Walt <[email protected]> Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Stefan van der Walt <[email protected]>
42,336
1
18
21
11
177,316
12
networkx
7
networkx/algorithms/tournament.py
Python
38
{ "docstring": "Returns the tournament matrix for the given tournament graph.\n\n This function requires SciPy.\n\n The *tournament matrix* of a tournament graph with edge set *E* is\n the matrix *T* defined by\n\n .. math::\n\n T_{i j} =\n \\begin{cases}\n +1 & \\text{if } (i, j) \\in E \\\\\n -1 & \\text{if } (j, i) \\in E \\\\\n 0 & \\text{if } i == j.\n \\end{cases}\n\n An equivalent definition is `T = A - A^T`, where *A* is the\n adjacency matrix of the graph `G`.\n\n Parameters\n ----------\n G : NetworkX graph\n A directed graph representing a tournament.\n\n Returns\n -------\n SciPy sparse array\n The tournament matrix of the tournament graph `G`.\n\n Raises\n ------\n ImportError\n If SciPy is not available.\n\n ", "language": "en", "n_whitespaces": 219, "n_words": 114, "vocab_size": 77 }
https://github.com/networkx/networkx.git
13
model_scaling
def model_scaling(layer_setting, arch_setting): # scale width new_layer_setting = copy.deepcopy(layer_setting) for layer_cfg in new_layer_setting: for block_cfg in layer_cfg: block_cfg[1] = make_divisible(block_cfg[1] * arch_setting[0], 8) # scale depth split_layer_setting = [new_layer_setting[0]] for layer_cfg in new_layer_setting[1:-1]: tmp_index = [0] for i in range(len(layer_cfg) - 1): if layer_cfg[i + 1][1] != layer_cfg[i][1]: tmp_index.append(i + 1) tmp_index.append(len(layer_cfg)) for i in range(len(tmp_index) - 1): split_layer_setting.append(layer_cfg[tmp_index[i]:tmp_index[i + 1]]) split_layer_setting.append(new_layer_setting[-1]) num_of_layers = [len(layer_cfg) for layer_cfg in split_layer_setting[1:-1]] new_layers = [ int(math.ceil(arch_setting[1] * num)) for num in num_of_layers ] merge_layer_setting = [split_layer_setting[0]] for i, layer_cfg in enumerate(split_layer_setting[1:-1]): if new_layers[i] <= num_of_layers[i]: tmp_layer_cfg = layer_cfg[:new_layers[i]] else: tmp_layer_cfg = copy.deepcopy(layer_cfg) + [layer_cfg[-1]] * ( new_layers[i] - num_of_layers[i]) if tmp_layer_cfg[0][3] == 1 and i != 0: merge_layer_setting[-1] += tmp_layer_cfg.copy() else: merge_layer_setting.append(tmp_layer_cfg.copy()) merge_layer_setting.append(split_layer_setting[-1]) return merge_layer_setting @BACKBONES.register_module()
3f0f2a059743593fd07b629c261b609bd9a767e6
@BACKBONES.register_module()
16
efficientnet.py
510
[Feature] Support efficientnet in mmdetection. (#7514) * Initial implementation * Add missing import * Add MemoryEfficientSwishImplementation. Add docstrings * Add efficientnet2mmdet tool * Add config folder * Flake8 * Flake8 * Flake8 * Fix config * Requested changes * docformatter * Update train config from https://github.com/google/automl/blob/master/efficientdet * Run pre-commit * Fix schedule * Set by_epoch=False in scheduler * Train 80 epochs * Remove duplicated arg * Update README.md * efficient3 efficient0 * efficientNet imports * efficientNet * config edit path for eff3 and dropout for eff0 * efficientnet review2 * fix model_converter location and drop path * fix model converter and efficientnet import * register memoryefficietnswish * eff0, eff3 * fix flake8 yapf isort * same padding in tensorflow and edit drop path rate * fix init of utils * Align mmdet utils with mmcls * Align mmdet.models.utils with mmcls * Use mmcls efficientnet backbone * Update * Update * Update metafile Co-authored-by: David de la Iglesia Castro <[email protected]> Co-authored-by: David de la Iglesia Castro <[email protected]> Co-authored-by: jiangyitong <[email protected]> Co-authored-by: jiangyitong <[email protected]>
70,249
1
415
325
78
244,119
123
mmdetection
26
mmdet/models/backbones/efficientnet.py
Python
33
{ "docstring": "Scaling operation to the layer's parameters according to the\n arch_setting.", "language": "en", "n_whitespaces": 12, "n_words": 10, "vocab_size": 8 }
https://github.com/open-mmlab/mmdetection.git
17
polyfit
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): _check_arraylike("polyfit", x, y) deg = core.concrete_or_error(int, deg, "deg must be int") order = deg + 1 # check arguments if deg < 0: raise ValueError("expected deg >= 0") if x.ndim != 1: raise TypeError("expected 1D vector for x") if x.size == 0: raise TypeError("expected non-empty vector for x") if y.ndim < 1 or y.ndim > 2: raise TypeError("expected 1D or 2D array for y") if x.shape[0] != y.shape[0]: raise TypeError("expected x and y to have same length") # set rcond if rcond is None: rcond = len(x) * finfo(x.dtype).eps rcond = core.concrete_or_error(float, rcond, "rcond must be float") # set up least squares equation for powers of x lhs = vander(x, order) rhs = y # apply weighting if w is not None: _check_arraylike("polyfit", w) w, = _promote_dtypes_inexact(w) if w.ndim != 1: raise TypeError("expected a 1-d array for weights") if w.shape[0] != y.shape[0]: raise TypeError("expected w and y to have the same length") lhs *= w[:, np.newaxis] if rhs.ndim == 2: rhs *= w[:, np.newaxis] else: rhs *= w # scale lhs to improve condition number and solve scale = sqrt((lhs*lhs).sum(axis=0)) lhs /= scale[np.newaxis,:] c, resids, rank, s = linalg.lstsq(lhs, rhs, rcond) c = (c.T/scale).T # broadcast scale coefficients if full: return c, resids, rank, s, rcond elif cov: Vbase = linalg.inv(dot(lhs.T, lhs)) Vbase /= outer(scale, scale) if cov == "unscaled": fac = 1 else: if len(x) <= order: raise ValueError("the number of data points must exceed order " "to scale the covariance matrix") fac = resids / (len(x) - order) fac = fac[0] #making np.array() of shape (1,) to int if y.ndim == 1: return c, Vbase * fac else: return c, Vbase[:, :, np.newaxis] * fac else: return c _POLY_DOC = @_wraps(np.poly, lax_description=_POLY_DOC) @jit
603bb3c5ca288674579211e64fa47c6b2b0fb7a6
@_wraps(np.poly, lax_description=_POLY_DOC) @jit
17
polynomial.py
700
lax_numpy: move poly functions into numpy.polynomial
26,696
1
463
424
155
119,831
293
jax
50
jax/_src/numpy/polynomial.py
Python
54
{ "docstring": "\\\nThis differs from np.poly when an integer array is given.\nnp.poly returns a result with dtype float64 in this case.\njax returns a result with an inexact type, but not necessarily\nfloat64.\n\nThis also differs from np.poly when the input array strictly\ncontains pairs of complex conjugates, e.g. [1j, -1j, 1-1j, 1+1j].\nnp.poly returns an array with a real dtype in such cases.\njax returns an array with a complex dtype in such cases.\n", "language": "en", "n_whitespaces": 66, "n_words": 75, "vocab_size": 44 }
https://github.com/google/jax.git
1
key
def key(self) -> TaskInstanceKey: return TaskInstanceKey(self.dag_id, self.task_id, self.run_id, self.try_number, self.map_index)
6fc6edf6af7f676bfa54ff3a2e6e6d2edb938f2e
8
taskinstance.py
47
Make `airflow dags test` be able to execute Mapped Tasks (#21210) * Make `airflow dags test` be able to execute Mapped Tasks In order to do this there were two steps required: - The BackfillJob needs to know about mapped tasks, both to expand them, and in order to update it's TI tracking - The DebugExecutor needed to "unmap" the mapped task to get the real operator back I was testing this with the following dag: ``` from airflow import DAG from airflow.decorators import task from airflow.operators.python import PythonOperator import pendulum @task def make_list(): return list(map(lambda a: f'echo "{a!r}"', [1, 2, {'a': 'b'}])) def consumer(*args): print(repr(args)) with DAG(dag_id='maptest', start_date=pendulum.DateTime(2022, 1, 18)) as dag: PythonOperator(task_id='consumer', python_callable=consumer).map(op_args=make_list()) ``` It can't "unmap" decorated operators successfully yet, so we're using old-school PythonOperator We also just pass the whole value to the operator, not just the current mapping value(s) * Always have a `task_group` property on DAGNodes And since TaskGroup is a DAGNode, we don't need to store parent group directly anymore -- it'll already be stored * Add "integation" tests for running mapped tasks via BackfillJob * Only show "Map Index" in Backfill report when relevant Co-authored-by: Tzu-ping Chung <[email protected]>
8,256
0
24
31
10
44,419
10
airflow
8
airflow/models/taskinstance.py
Python
3
{ "docstring": "Returns a tuple that identifies the task instance uniquely", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/apache/airflow.git
3
_check_guts_toc_mtime
def _check_guts_toc_mtime(attr_name, old_toc, new_toc, last_build): for dest_name, src_name, typecode in old_toc: if misc.mtime(src_name) > last_build: logger.info("Building because %s changed", src_name) return True return False
f5925fa56f713e78ab5723de2a58195ca346847f
12
utils.py
67
building: cleanup remove pyc argument from _check_guts_toc_mtime The only place where we use `_check_guts_toc_mtime` with `pyc` argument enabled is when checking the `Analysis.pure` TOC, and the source names of those entries already point to source .py files. So shortening the filenames by one character results in checking for non-existant .p files. Even if an entry happened to point to a .pyc file, it is highly unlikely that there would be an adjacent .py file available, because under contemporary python 3 versions, that would hide the .pyc file from the loader.
77,596
0
62
43
23
264,086
24
pyinstaller
12
PyInstaller/building/utils.py
Python
6
{ "docstring": "\n Rebuild is required if mtimes of files listed in old TOC are newer than last_build.\n\n Use this for calculated/analysed values read from cache.\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 23 }
https://github.com/pyinstaller/pyinstaller.git
6
fit
def fit(self, X, y=None): X = self._validate_data( X, accept_sparse=["csr", "csc"], dtype=[np.float64, np.float32] ) n_samples, n_features = X.shape if self.n_components == "auto": self.n_components_ = johnson_lindenstrauss_min_dim( n_samples=n_samples, eps=self.eps ) if self.n_components_ <= 0: raise ValueError( "eps=%f and n_samples=%d lead to a target dimension of " "%d which is invalid" % (self.eps, n_samples, self.n_components_) ) elif self.n_components_ > n_features: raise ValueError( "eps=%f and n_samples=%d lead to a target dimension of " "%d which is larger than the original space with " "n_features=%d" % (self.eps, n_samples, self.n_components_, n_features) ) else: if self.n_components <= 0: raise ValueError( "n_components must be greater than 0, got %s" % self.n_components ) elif self.n_components > n_features: warnings.warn( "The number of components is higher than the number of" " features: n_features < n_components (%s < %s)." "The dimensionality of the problem will not be reduced." % (n_features, self.n_components), DataDimensionalityWarning, ) self.n_components_ = self.n_components # Generate a projection matrix of size [n_components, n_features] self.components_ = self._make_random_matrix( self.n_components_, n_features ).astype(X.dtype, copy=False) # Check contract assert self.components_.shape == (self.n_components_, n_features), ( "An error has occurred the self.components_ matrix has " " not the proper shape." ) return self
8b6b519caf3b3b9602958a859b4d3a7eb1d9eadd
16
random_projection.py
357
ENH Preserving dtype for np.float32 in RandomProjection (#22114) Co-authored-by: takoika <> Co-authored-by: Thomas J. Fan <[email protected]>
75,247
0
760
220
109
258,487
185
scikit-learn
25
sklearn/random_projection.py
Python
43
{ "docstring": "Generate a sparse random projection matrix.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Training set: only the shape is used to find optimal random\n matrix dimensions based on the theory referenced in the\n afore mentioned papers.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n BaseRandomProjection class instance.\n ", "language": "en", "n_whitespaces": 171, "n_words": 60, "vocab_size": 53 }
https://github.com/scikit-learn/scikit-learn.git
2
bisectors
def bisectors(self): # use lines containing sides so containment check during # intersection calculation can be avoided, thus reducing # the processing time for calculating the bisectors s = [Line(l) for l in self.sides] v = self.vertices c = self.incenter l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0]) l2 = Segment(v[1], Line(v[1], c).intersection(s[2])[0]) l3 = Segment(v[2], Line(v[2], c).intersection(s[0])[0]) return {v[0]: l1, v[1]: l2, v[2]: l3}
498015021131af4dbb07eb110e5badaba8250c7b
14
polygon.py
213
Updated import locations
47,800
0
139
143
53
196,300
62
sympy
15
sympy/geometry/polygon.py
Python
8
{ "docstring": "The angle bisectors of the triangle.\n\n An angle bisector of a triangle is a straight line through a vertex\n which cuts the corresponding angle in half.\n\n Returns\n =======\n\n bisectors : dict\n Each key is a vertex (Point) and each value is the corresponding\n bisector (Segment).\n\n See Also\n ========\n\n sympy.geometry.point.Point, sympy.geometry.line.Segment\n\n Examples\n ========\n\n >>> from sympy import Point, Triangle, Segment\n >>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)\n >>> t = Triangle(p1, p2, p3)\n >>> from sympy import sqrt\n >>> t.bisectors()[p2] == Segment(Point(1, 0), Point(0, sqrt(2) - 1))\n True\n\n ", "language": "en", "n_whitespaces": 232, "n_words": 91, "vocab_size": 63 }
https://github.com/sympy/sympy.git
7
get_success_response
def get_success_response(self, *args, **params): status_code = params.pop("status_code", None) if status_code and status_code >= 400: raise Exception("status_code must be < 400") method = params.pop("method", self.method).lower() response = self.get_response(*args, method=method, **params) if status_code: assert_status_code(response, status_code) elif method == "get": assert_status_code(response, status.HTTP_200_OK) # TODO(mgaeta): Add the other methods. # elif method == "post": # assert_status_code(response, status.HTTP_201_CREATED) elif method == "put": assert_status_code(response, status.HTTP_200_OK) elif method == "delete": assert_status_code(response, status.HTTP_204_NO_CONTENT) else: # TODO(mgaeta): Add other methods. assert_status_code(response, 200, 300) return response
a68089d62f514557ec38e3744593e20af484e5e2
11
cases.py
212
ref(tests): Infer `status_code` from `method` (#34825)
18,674
0
255
126
47
90,548
76
sentry
15
src/sentry/testutils/cases.py
Python
17
{ "docstring": "\n Call `get_response` (see above) and assert the response's status code.\n\n :param params:\n * status_code: (Optional) Assert that the response's status code is\n a specific code. Omit to assert any successful status_code.\n :returns Response object\n ", "language": "en", "n_whitespaces": 85, "n_words": 34, "vocab_size": 29 }
https://github.com/getsentry/sentry.git
1
starmap
def starmap(self, func, iterable, chunksize=None): return self._map_async( func, iterable, chunksize=chunksize, unpack_args=True ).get()
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
10
pool.py
52
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,954
0
44
35
10
133,189
12
ray
8
python/ray/util/multiprocessing/pool.py
Python
4
{ "docstring": "Same as `map`, but unpacks each element of the iterable as the\n arguments to func like: [func(*args) for args in iterable].\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 19 }
https://github.com/ray-project/ray.git
1
_get_char_x
def _get_char_x(self, linelength): return linelength + self.image_pad + self.line_number_width
f3166e673fe8d40277b804d35d77dcdb760fc3b3
8
img.py
30
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,344
0
23
18
8
20,359
9
pipenv
5
pipenv/patched/notpip/_vendor/pygments/formatters/img.py
Python
2
{ "docstring": "\n Get the X coordinate of a character position.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/pypa/pipenv.git
1
test_evaluate_word_analogies
def test_evaluate_word_analogies(self): model = word2vec.Word2Vec(LeeCorpus()) score, sections = model.wv.evaluate_word_analogies(datapath('questions-words.txt')) score_cosmul, sections_cosmul = model.wv.evaluate_word_analogies( datapath('questions-words.txt'), similarity_function='most_similar_cosmul' ) self.assertEqual(score, score_cosmul) self.assertEqual(sections, sections_cosmul) self.assertGreaterEqual(score, 0.0) self.assertLessEqual(score, 1.0) self.assertGreater(len(sections), 0) # Check that dict contains the right keys first_section = sections[0] self.assertIn('section', first_section) self.assertIn('correct', first_section) self.assertIn('incorrect', first_section)
ac3bbcdf87b263f79d5e19cce173e6c709a15f9d
11
test_word2vec.py
206
streamlining most_similar_cosmul and evaluate_word_analogies (#2656) * streamlining most_similar_cosmul * Fix PR requested changes and add unit test * fix merge artifacts Co-authored-by: n3hrox <[email protected]> Co-authored-by: Michael Penkov <[email protected]>
1,666
0
170
127
38
9,740
43
gensim
21
gensim/test/test_word2vec.py
Python
16
{ "docstring": "Test that evaluating analogies on KeyedVectors give sane results", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/RaRe-Technologies/gensim.git
2
get_default_locale
def get_default_locale(self): parent = self.get_parent() if parent is not None: return ( parent.specific_class.objects.defer() .select_related("locale") .get(id=parent.id) .locale ) return super().get_default_locale()
d10f15e55806c6944827d801cd9c2d53f5da4186
17
__init__.py
94
Reformat with black
16,124
0
129
55
17
73,813
19
wagtail
12
wagtail/core/models/__init__.py
Python
10
{ "docstring": "\n Finds the default locale to use for this page.\n\n This will be called just before the initial save.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
https://github.com/wagtail/wagtail.git
4
_wait_for_data
async def _wait_for_data(self, func_name): # StreamReader uses a future to link the protocol feed_data() method # to a read coroutine. Running two read coroutines at the same time # would have an unexpected behaviour. It would not possible to know # which coroutine would get the next data. if self._waiter is not None: raise RuntimeError( f'{func_name}() called while another coroutine is ' f'already waiting for incoming data') assert not self._eof, '_wait_for_data after EOF' # Waiting for data while paused will make deadlock, so prevent it. # This is essential for readexactly(n) for case when n > self._limit. if self._paused: self._paused = False self._transport.resume_reading() self._waiter = self._loop.create_future() try: await self._waiter finally: self._waiter = None
8198943edd73a363c266633e1aa5b2a9e9c9f526
12
streams.py
133
add python 3.10.4 for windows
56,110
0
289
72
85
220,750
113
XX-Net
11
python3.10.4/Lib/asyncio/streams.py
Python
14
{ "docstring": "Wait until feed_data() or feed_eof() is called.\n\n If stream was paused, automatically resume it.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 14 }
https://github.com/XX-net/XX-Net.git
5
chi2
def chi2(X, y): # XXX: we might want to do some of the following in logspace instead for # numerical stability. X = check_array(X, accept_sparse="csr") if np.any((X.data if issparse(X) else X) < 0): raise ValueError("Input X must be non-negative.") # Use a sparse representation for Y by default to reduce memory usage when # y has many unique classes. Y = LabelBinarizer(sparse_output=True).fit_transform(y) if Y.shape[1] == 1: Y = Y.toarray() Y = np.append(1 - Y, Y, axis=1) observed = safe_sparse_dot(Y.T, X) # n_classes * n_features if issparse(observed): # convert back to a dense array before calling _chisquare # XXX: could _chisquare be reimplement to accept sparse matrices for # cases where both n_classes and n_features are large (and X is # sparse)? observed = observed.toarray() feature_count = X.sum(axis=0).reshape(1, -1) class_prob = Y.mean(axis=0).reshape(1, -1) expected = np.dot(class_prob.T, feature_count) return _chisquare(observed, expected)
432778464cbffc8ca675c1df786c31f8c23fc62c
12
_univariate_selection.py
275
[MRG] chi2: reduce memory footprint (#21837) * added sparse_output=True to LabelBinarizer in chi2 * added changelog entry * Update sklearn/feature_selection/_univariate_selection.py Co-authored-by: Olivier Grisel <[email protected]> * Update sklearn/feature_selection/_univariate_selection.py Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Wagner, Louis <[email protected]> Co-authored-by: Olivier Grisel <[email protected]>
75,276
0
241
168
99
258,531
139
scikit-learn
29
sklearn/feature_selection/_univariate_selection.py
Python
15
{ "docstring": "Compute chi-squared stats between each non-negative feature and class.\n\n This score can be used to select the n_features features with the\n highest values for the test chi-squared statistic from X, which must\n contain only non-negative features such as booleans or frequencies\n (e.g., term counts in document classification), relative to the classes.\n\n Recall that the chi-square test measures dependence between stochastic\n variables, so using this function \"weeds out\" the features that are the\n most likely to be independent of class and therefore irrelevant for\n classification.\n\n Read more in the :ref:`User Guide <univariate_feature_selection>`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Sample vectors.\n\n y : array-like of shape (n_samples,)\n Target vector (class labels).\n\n Returns\n -------\n chi2 : ndarray of shape (n_features,)\n Chi2 statistics for each feature.\n\n p_values : ndarray of shape (n_features,)\n P-values for each feature.\n\n Notes\n -----\n Complexity of this algorithm is O(n_classes * n_features).\n\n See Also\n --------\n f_classif : ANOVA F-value between label/feature for classification tasks.\n f_regression : F-value between label/feature for regression tasks.\n ", "language": "en", "n_whitespaces": 270, "n_words": 167, "vocab_size": 119 }
https://github.com/scikit-learn/scikit-learn.git
3
equals
def equals(self, word1, word2): if self.reduce(word1*word2**-1) == self.identity: return True elif self._rewriting_system.is_confluent: return False return None
65be461082dda54c8748922f9c29a19af1279fe1
11
fp_groups.py
64
Remove abbreviations in documentation
48,440
0
66
40
14
197,297
16
sympy
8
sympy/combinatorics/fp_groups.py
Python
6
{ "docstring": "\n Compare `word1` and `word2` for equality in the group\n using the group's rewriting system. If the system is\n confluent, the returned answer is necessarily correct.\n (If it is not, `False` could be returned in some cases\n where in fact `word1 == word2`)\n\n ", "language": "en", "n_whitespaces": 85, "n_words": 42, "vocab_size": 34 }
https://github.com/sympy/sympy.git
4
now
def now(parser, token): bits = token.split_contents() asvar = None if len(bits) == 4 and bits[-2] == "as": asvar = bits[-1] bits = bits[:-2] if len(bits) != 2: raise TemplateSyntaxError("'now' statement takes one argument") format_string = bits[1][1:-1] return NowNode(format_string, asvar) @register.tag
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@register.tag
11
defaulttags.py
141
Refs #33476 -- Reformatted code with Black.
51,441
1
81
81
31
206,250
40
django
12
django/template/defaulttags.py
Python
10
{ "docstring": "\n Display the date, formatted according to the given string.\n\n Use the same format as PHP's ``date()`` function; see https://php.net/date\n for all the possible values.\n\n Sample usage::\n\n It is {% now \"jS F Y H:i\" %}\n ", "language": "en", "n_whitespaces": 58, "n_words": 35, "vocab_size": 32 }
https://github.com/django/django.git
1
exradii
def exradii(self): side = self.sides a = side[0].length b = side[1].length c = side[2].length s = (a+b+c)/2 area = self.area exradii = {self.sides[0]: simplify(area/(s-a)), self.sides[1]: simplify(area/(s-b)), self.sides[2]: simplify(area/(s-c))} return exradii
498015021131af4dbb07eb110e5badaba8250c7b
13
polygon.py
169
Updated import locations
47,810
0
129
110
23
196,310
30
sympy
11
sympy/geometry/polygon.py
Python
11
{ "docstring": "The radius of excircles of a triangle.\n\n An excircle of the triangle is a circle lying outside the triangle,\n tangent to one of its sides and tangent to the extensions of the\n other two.\n\n Returns\n =======\n\n exradii : dict\n\n See Also\n ========\n\n sympy.geometry.polygon.Triangle.inradius\n\n Examples\n ========\n\n The exradius touches the side of the triangle to which it is keyed, e.g.\n the exradius touching side 2 is:\n\n >>> from sympy import Point, Triangle\n >>> p1, p2, p3 = Point(0, 0), Point(6, 0), Point(0, 2)\n >>> t = Triangle(p1, p2, p3)\n >>> t.exradii[t.sides[2]]\n -2 + sqrt(10)\n\n References\n ==========\n\n [1] http://mathworld.wolfram.com/Exradius.html\n [2] http://mathworld.wolfram.com/Excircles.html\n\n ", "language": "en", "n_whitespaces": 260, "n_words": 99, "vocab_size": 71 }
https://github.com/sympy/sympy.git
2
col_swap
def col_swap(self, i, j): for k in range(0, self.rows): self[k, i], self[k, j] = self[k, j], self[k, i]
59d22b6bb7287613d598611027f640d068ca5748
10
repmatrix.py
69
Moved imports to higher level
47,898
0
43
49
15
196,398
18
sympy
7
sympy/matrices/repmatrix.py
Python
3
{ "docstring": "Swap the two given columns of the matrix in-place.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> M = Matrix([[1, 0], [1, 0]])\n >>> M\n Matrix([\n [1, 0],\n [1, 0]])\n >>> M.col_swap(0, 1)\n >>> M\n Matrix([\n [0, 1],\n [0, 1]])\n\n See Also\n ========\n\n col\n row_swap\n ", "language": "en", "n_whitespaces": 171, "n_words": 45, "vocab_size": 31 }
https://github.com/sympy/sympy.git
2
renderable
def renderable(self) -> RenderableType: renderable = self.get_renderable() return Screen(renderable) if self._alt_screen else renderable
f3166e673fe8d40277b804d35d77dcdb760fc3b3
8
live.py
44
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,534
0
34
26
12
20,769
13
pipenv
6
pipenv/patched/notpip/_vendor/rich/live.py
Python
8
{ "docstring": "Get the renderable that is being displayed\n\n Returns:\n RenderableType: Displayed renderable.\n ", "language": "en", "n_whitespaces": 36, "n_words": 11, "vocab_size": 11 }
https://github.com/pypa/pipenv.git
3
get_base_snippet_action_menu_items
def get_base_snippet_action_menu_items(model): menu_items = [ SaveMenuItem(order=0), DeleteMenuItem(order=10), ] for hook in hooks.get_hooks("register_snippet_action_menu_item"): action_menu_item = hook(model) if action_menu_item: menu_items.append(action_menu_item) return menu_items
d10f15e55806c6944827d801cd9c2d53f5da4186
11
action_menu.py
85
Reformat with black
16,441
0
74
51
18
75,927
20
wagtail
11
wagtail/snippets/action_menu.py
Python
10
{ "docstring": "\n Retrieve the global list of menu items for the snippet action menu,\n which may then be customised on a per-request basis\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 20 }
https://github.com/wagtail/wagtail.git
1
has_key
def has_key(self, key, version=None): return ( self.get(key, self._missing_key, version=version) is not self._missing_key )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
base.py
51
Refs #33476 -- Reformatted code with Black.
50,721
0
45
34
13
204,387
13
django
6
django/core/cache/backends/base.py
Python
4
{ "docstring": "\n Return True if the key is in the cache and has not expired.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
https://github.com/django/django.git
4
_read_all_pages
def _read_all_pages(self, endpoint): internal_data = [] while True: resp = self._session.get(endpoint) if resp.status_code == 200: internal_data += resp.json() if "next" in resp.links: endpoint = resp.links["next"]["url"] else: logger.debug("Exiting pagination loop") break else: logger.warning(f"Request to {endpoint} return HTTP {resp.status_code}") break return internal_data
0fdd3d56f43c8442a0c9ecd3cad07a88137ff7de
15
cleanup-tags.py
149
Changes the cleanup images workflow so it uses a OAuth token with the correct scope (GITHUB_TOKEN is not enough). Also prevents running if the token is not defined and generally does commenting/cleanups"
117,014
0
233
78
32
319,887
40
paperless-ngx
13
.github/scripts/cleanup-tags.py
Python
15
{ "docstring": "\n Internal function to read all pages of an endpoint, utilizing the\n next.url until exhausted\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
https://github.com/paperless-ngx/paperless-ngx.git
3
_build_template
def _build_template(name, template, files, config, nav): # Run `pre_template` plugin events. template = config['plugins'].run_event( 'pre_template', template, template_name=name, config=config ) if utils.is_error_template(name): # Force absolute URLs in the nav of error pages and account for the # possibility that the docs root might be different than the server root. # See https://github.com/mkdocs/mkdocs/issues/77. # However, if site_url is not set, assume the docs root and server root # are the same. See https://github.com/mkdocs/mkdocs/issues/1598. base_url = urlsplit(config['site_url'] or '/').path else: base_url = utils.get_relative_url('.', name) context = get_context(nav, files, config, base_url=base_url) # Run `template_context` plugin events. context = config['plugins'].run_event( 'template_context', context, template_name=name, config=config ) output = template.render(context) # Run `post_template` plugin events. output = config['plugins'].run_event('post_template', output, template_name=name, config=config) return output
dca7cbb43fcd6ea7c677c98ba585395b070d387b
14
build.py
221
Format code with `black -l100 --skip-string-normalization`
57,244
0
221
134
73
224,209
116
mkdocs
18
mkdocs/commands/build.py
Python
15
{ "docstring": "\n Return rendered output for given template as a string.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
https://github.com/mkdocs/mkdocs.git
2
get_build_datetime
def get_build_datetime() -> datetime: source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH') if source_date_epoch is None: return datetime.now(timezone.utc) return datetime.fromtimestamp(int(source_date_epoch), timezone.utc)
df3739d51903ab56771ac071a05b5aa9cdf9e129
10
__init__.py
76
Add a lot more type annotations, fix new type warnings (#2970) (including some behavior changes, assumed to be no-op) This is based on auto-generated annotations from "monkeytype".
57,432
0
35
45
14
224,944
16
mkdocs
11
mkdocs/utils/__init__.py
Python
11
{ "docstring": "\n Returns an aware datetime object.\n\n Support SOURCE_DATE_EPOCH environment variable for reproducible builds.\n See https://reproducible-builds.org/specs/source-date-epoch/\n ", "language": "en", "n_whitespaces": 27, "n_words": 14, "vocab_size": 14 }
https://github.com/mkdocs/mkdocs.git
2
test_read_video_from_file_audio_resampling
def test_read_video_from_file_audio_resampling(self, test_video, samples): # video related width, height, min_dimension, max_dimension = 0, 0, 0, 0 video_start_pts, video_end_pts = 0, -1 video_timebase_num, video_timebase_den = 0, 1 # audio related channels = 0 audio_start_pts, audio_end_pts = 0, -1 audio_timebase_num, audio_timebase_den = 0, 1 full_path = os.path.join(VIDEO_DIR, test_video) tv_result = torch.ops.video_reader.read_video_from_file( full_path, SEEK_FRAME_MARGIN, 0, # getPtsOnly 1, # readVideoStream width, height, min_dimension, max_dimension, video_start_pts, video_end_pts, video_timebase_num, video_timebase_den, 1, # readAudioStream samples, channels, audio_start_pts, audio_end_pts, audio_timebase_num, audio_timebase_den, ) ( vframes, vframe_pts, vtimebase, vfps, vduration, aframes, aframe_pts, atimebase, asample_rate, aduration, ) = tv_result if aframes.numel() > 0: assert samples == asample_rate.item() assert 1 == aframes.size(1) # when audio stream is found duration = float(aframe_pts[-1]) * float(atimebase[0]) / float(atimebase[1]) assert aframes.size(0) == approx(int(duration * asample_rate.item()), abs=0.1 * asample_rate.item())
c50d48845f7b1ca86d6a3b7f37a59be0ae11e36b
15
test_video_reader.py
327
Improve test_video_reader (#5498) * Improve test_video_reader * Fix linter error
46,871
0
605
228
80
192,301
123
vision
46
test/test_video_reader.py
Python
46
{ "docstring": "\n Test the case when decoder starts with a video file to decode frames, and\n audio waveform are resampled\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 18 }
https://github.com/pytorch/vision.git
6
get_data
def get_data(filters): data = [] if erpnext.get_region() == "India": employee_pan_dict = frappe._dict( frappe.db.sql() ) component_types = frappe.db.sql( ) component_types = [comp_type[0] for comp_type in component_types] if not len(component_types): return [] conditions = get_conditions(filters) entry = frappe.db.sql( % (conditions, ", ".join(["%s"] * len(component_types))), tuple(component_types), as_dict=1, ) for d in entry: employee = { "employee": d.employee, "employee_name": d.employee_name, "it_comp": d.salary_component, "posting_date": d.posting_date, # "pan_number": employee_pan_dict.get(d.employee), "it_amount": d.amount, "gross_pay": d.gross_pay, } if erpnext.get_region() == "India": employee["pan_number"] = employee_pan_dict.get(d.employee) data.append(employee) return data
494bd9ef78313436f0424b918f200dab8fc7c20b
15
income_tax_deductions.py
310
style: format code with black
14,386
0
47
184
57
66,950
78
erpnext
28
erpnext/payroll/report/income_tax_deductions/income_tax_deductions.py
Python
40
{ "docstring": " select employee, pan_number from `tabEmployee` select name from `tabSalary Component`\n\t\twhere is_income_tax_component = 1 select sal.employee, sal.employee_name, sal.posting_date, ded.salary_component, ded.amount,sal.gross_pay\n\t\tfrom `tabSalary Slip` sal, `tabSalary Detail` ded\n\t\twhere sal.name = ded.parent\n\t\tand ded.parentfield = 'deductions'\n\t\tand ded.parenttype = 'Salary Slip'\n\t\tand sal.docstatus = 1 %s\n\t\tand ded.salary_component in (%s)\n\t", "language": "en", "n_whitespaces": 43, "n_words": 49, "vocab_size": 34 }
https://github.com/frappe/erpnext.git
2
smart_str
def smart_str(s, encoding="utf-8", strings_only=False, errors="strict"): if isinstance(s, Promise): # The input is the result of a gettext_lazy() call. return s return force_str(s, encoding, strings_only, errors) _PROTECTED_TYPES = ( type(None), int, float, Decimal, datetime.datetime, datetime.date, datetime.time, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
7
encoding.py
97
Refs #33476 -- Reformatted code with Black.
51,599
0
78
39
35
206,640
36
django
16
django/utils/encoding.py
Python
4
{ "docstring": "\n Return a string representing 's'. Treat bytestrings using the 'encoding'\n codec.\n\n If strings_only is True, don't convert (some) non-string-like objects.\n ", "language": "en", "n_whitespaces": 33, "n_words": 20, "vocab_size": 20 }
https://github.com/django/django.git
2
mayDisableConsoleWindow
def mayDisableConsoleWindow(): # TODO: What about MSYS2? return isWin32Windows() or isMacOS()
613c31d98f20bdd9a4e5884c99826a06a3328438
8
Options.py
27
Standalone: Added support for requiring modes * For wx on macOS, console must be disabled, avoid the trap. * For the PySide2, on macOS the --onefile must be used when the application bundle is built or else signing has issues. * Recommend to use new option --disable-console for PySide2, PySide6 and wx on non-macOS
42,834
0
20
13
11
178,818
11
Nuitka
3
nuitka/Options.py
Python
2
{ "docstring": ":returns: bool derived from platform support of disabling the console,", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/Nuitka/Nuitka.git
1
test_as_ignores_mau
def test_as_ignores_mau(self): # Create and sync so that the MAU counts get updated token1 = self.create_user("kermit1") self.do_sync_for_user(token1) token2 = self.create_user("kermit2") self.do_sync_for_user(token2) # check we're testing what we think we are: there should be two active users self.assertEqual(self.get_success(self.store.get_monthly_active_count()), 2) # We've created and activated two users, we shouldn't be able to # register new users with self.assertRaises(SynapseError) as cm: self.create_user("kermit3") e = cm.exception self.assertEqual(e.code, 403) self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) # Cheekily add an application service that we use to register a new user # with. as_token = "foobartoken" self.store.services_cache.append( ApplicationService( token=as_token, id="SomeASID", sender="@as_sender:test", namespaces={"users": [{"regex": "@as_*", "exclusive": True}]}, ) ) self.create_user("as_kermit4", token=as_token, appservice=True)
7bc08f320147a1d80371eb13258328c88073fad0
16
test_mau.py
272
Remove remaining bits of groups code. (#12936) * Update worker docs to remove group endpoints. * Removes an unused parameter to `ApplicationService`. * Break dependency between media repo and groups. * Avoid copying `m.room.related_groups` state events during room upgrades.
72,300
0
333
163
79
248,480
100
synapse
28
tests/test_mau.py
Python
22
{ "docstring": "Test that application services can still create users when the MAU\n limit has been reached. This only works when application service\n user ip tracking is disabled.\n ", "language": "en", "n_whitespaces": 47, "n_words": 26, "vocab_size": 24 }
https://github.com/matrix-org/synapse.git
2
get_region_to_control_producer
def get_region_to_control_producer() -> KafkaProducer: global _publisher if _publisher is None: config = settings.KAFKA_TOPICS.get(settings.KAFKA_REGION_TO_CONTROL) _publisher = KafkaProducer( kafka_config.get_kafka_producer_cluster_options(config["cluster"]) )
941184cd24186324fd9f7f304b7f713041834726
14
producer.py
69
chore(hybrid-cloud): AuditLogEntry is a control silo model now (#39890) In the control silo, creating an audit log entry writes to the db directly, whilst in region silo mode creating an audit log entry will instead push to a new kafka producer that consumes into the control silo asynchronously.
18,181
0
59
48
15
86,878
18
sentry
10
src/sentry/region_to_control/producer.py
Python
14
{ "docstring": "\n Creates, if necessary, an arroyo.KafkaProducer client configured for region to control communication and returns\n it, caching it for future calls. Installs an exit handler to close the worker thread processes.\n ", "language": "en", "n_whitespaces": 41, "n_words": 30, "vocab_size": 27 }
https://github.com/getsentry/sentry.git
1
get_subplot
def get_subplot(self, row, col, secondary_y=False): from plotly._subplots import _get_grid_subplot return _get_grid_subplot(self, row, col, secondary_y) # Child property operations # -------------------------
5dc67fa7a7314cab97d4c96a30fdf4c5661c9039
7
basedatatypes.py
47
fix subplot imports
68,972
0
47
31
17
240,862
20
plotly.py
8
packages/python/plotly/plotly/basedatatypes.py
Python
3
{ "docstring": "\n Return an object representing the subplot at the specified row\n and column. May only be used on Figures created using\n plotly.tools.make_subplots\n\n Parameters\n ----------\n row: int\n 1-based index of subplot row\n col: int\n 1-based index of subplot column\n secondary_y: bool\n If True, select the subplot that consists of the x-axis and the\n secondary y-axis at the specified row/col. Only valid if the\n subplot at row/col is an 2D cartesian subplot that was created\n with a secondary y-axis. See the docstring for the specs argument\n to make_subplots for more info on creating a subplot with a\n secondary y-axis.\n Returns\n -------\n subplot\n * None: if subplot is empty\n * plotly.graph_objs.layout.Scene: if subplot type is 'scene'\n * plotly.graph_objs.layout.Polar: if subplot type is 'polar'\n * plotly.graph_objs.layout.Ternary: if subplot type is 'ternary'\n * plotly.graph_objs.layout.Mapbox: if subplot type is 'ternary'\n * SubplotDomain namedtuple with `x` and `y` fields:\n if subplot type is 'domain'.\n - x: length 2 list of the subplot start and stop width\n - y: length 2 list of the subplot start and stop height\n * SubplotXY namedtuple with `xaxis` and `yaxis` fields:\n if subplot type is 'xy'.\n - xaxis: plotly.graph_objs.layout.XAxis instance for subplot\n - yaxis: plotly.graph_objs.layout.YAxis instance for subplot\n ", "language": "en", "n_whitespaces": 533, "n_words": 195, "vocab_size": 99 }
https://github.com/plotly/plotly.py.git
2
_linear_eq_to_dict
def _linear_eq_to_dict(eqs, syms): coeffs = [] ind = [] symset = set(syms) for i, e in enumerate(eqs): c, d = _lin_eq2dict(e, symset) coeffs.append(d) ind.append(c) return coeffs, ind
e0aaa724190c49f2725bb7880eddd13ce4fef4b7
10
linsolve.py
95
more efficient coefficient extraction
49,169
0
66
58
22
199,152
27
sympy
14
sympy/polys/matrices/linsolve.py
Python
9
{ "docstring": "Convert a system Expr/Eq equations into dict form, returning\n the coefficient dictionaries and a list of syms-independent terms\n from each expression in ``eqs```.\n\n Examples\n ========\n\n >>> from sympy.polys.matrices.linsolve import _linear_eq_to_dict\n >>> from sympy.abc import x\n >>> _linear_eq_to_dict([2*x + 3], {x})\n ([{x: 2}], [3])\n ", "language": "en", "n_whitespaces": 70, "n_words": 43, "vocab_size": 37 }
https://github.com/sympy/sympy.git
3
vector_reset
def vector_reset(self): self.cur_obs = [e.reset() for e in self.envs] self._timesteps = [0 for _ in range(self.num_envs)] return self.cur_obs
b52a81b3de6f4b7015c6694049d094f2964e1c96
11
model_vector_env.py
69
[RLlib] Preparation for gymnasium/gym0.26 upgrade: Deprecate `horizon` and `soft_horizon` settings. (#30583)
31,023
0
46
43
14
136,916
18
ray
10
rllib/env/wrappers/model_vector_env.py
Python
4
{ "docstring": "Override parent to store actual env obs for upcoming predictions.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/ray-project/ray.git
1
cache_frame
def cache_frame(self, filename, image) -> None: frame_no = int(re.search(self.re_search, filename).group()) self.cache[frame_no] = image logger.trace("Added to cache. Frame no: %s", frame_no) logger.trace("Current cache: %s", sorted(self.cache.keys()))
60291d49c4da1cd260fbc0b04aa6a312eedfefbb
13
_base.py
105
ffmpeg writer: Create new filename if output pre-exists
20,072
0
59
64
22
100,610
24
faceswap
15
plugins/convert/writer/_base.py
Python
17
{ "docstring": " Add the incoming converted frame to the cache ready for writing out.\n\n Used for ffmpeg and gif writers to ensure that the frames are written out in the correct\n order.\n\n Parameters\n ----------\n filename: str\n The filename of the incoming frame, where the frame index can be extracted from\n image: class:`numpy.ndarray`\n The converted frame corresponding to the given filename\n ", "language": "en", "n_whitespaces": 130, "n_words": 58, "vocab_size": 43 }
https://github.com/deepfakes/faceswap.git
1
allow_regional
def allow_regional(fn): def caller(*args, **kwargs): overrides = frappe.get_hooks("regional_overrides", {}).get(get_region()) function_path = f"{inspect.getmodule(fn).__name__}.{fn.__name__}" if not overrides or function_path not in overrides: return fn(*args, **kwargs) # Priority given to last installed app return frappe.get_attr(overrides[function_path][-1])(*args, **kwargs) return caller
b68a99675d12a1ffbda538ee07a2020ba66fb3cc
13
__init__.py
152
fix: allow `regional_overrides` hook to be set in subsequent apps
13,605
0
26
10
27
64,335
35
erpnext
15
erpnext/__init__.py
Python
3
{ "docstring": "Decorator to make a function regionally overridable\n\n\tExample:\n\[email protected]_regional\n\tdef myfunction():\n\t pass", "language": "en", "n_whitespaces": 9, "n_words": 12, "vocab_size": 12 }
https://github.com/frappe/erpnext.git
3
getchannel
def getchannel(self, channel): self.load() if isinstance(channel, str): try: channel = self.getbands().index(channel) except ValueError as e: msg = f'The image has no channel "{channel}"' raise ValueError(msg) from e return self._new(self.im.getband(channel))
2ae55ccbdad9c842929fb238ea1eb81d1f999024
14
Image.py
112
Improve exception traceback readability
70,094
0
124
65
27
243,725
29
Pillow
14
src/PIL/Image.py
Python
9
{ "docstring": "\n Returns an image containing a single channel of the source image.\n\n :param channel: What channel to return. Could be index\n (0 for \"R\" channel of \"RGB\") or channel name\n (\"A\" for alpha channel of \"RGBA\").\n :returns: An image in \"L\" mode.\n\n .. versionadded:: 4.3.0\n ", "language": "en", "n_whitespaces": 98, "n_words": 44, "vocab_size": 36 }
https://github.com/python-pillow/Pillow.git
12
_update_legacy_config
def _update_legacy_config(self) -> bool: logger.debug("Checking for legacy state file update") priors = ["dssim_loss", "mask_type", "mask_type", "l2_reg_term"] new_items = ["loss_function", "learn_mask", "mask_type", "loss_function_2"] updated = False for old, new in zip(priors, new_items): if old not in self._config: logger.debug("Legacy item '%s' not in config. Skipping update", old) continue # dssim_loss > loss_function if old == "dssim_loss": self._config[new] = "ssim" if self._config[old] else "mae" del self._config[old] updated = True logger.info("Updated config from legacy dssim format. New config loss " "function: '%s'", self._config[new]) continue # Add learn mask option and set to True if model has "penalized_mask_loss" specified if old == "mask_type" and new == "learn_mask" and new not in self._config: self._config[new] = self._config["mask_type"] is not None updated = True logger.info("Added new 'learn_mask' config item for this model. Value set to: %s", self._config[new]) continue # Replace removed masks with most similar equivalent if old == "mask_type" and new == "mask_type" and self._config[old] in ("facehull", "dfl_full"): old_mask = self._config[old] self._config[new] = "components" updated = True logger.info("Updated 'mask_type' from '%s' to '%s' for this model", old_mask, self._config[new]) # Replace l2_reg_term with the correct loss_2_function and update the value of # loss_2_weight if old == "l2_reg_term": self._config[new] = "mse" self._config["loss_weight_2"] = self._config[old] del self._config[old] updated = True logger.info("Updated config from legacy 'l2_reg_term' to 'loss_function_2'") logger.debug("State file updated for legacy config: %s", updated) return updated
94c3dcff7ebd02a5a5758f33a3eb2bfc66282117
13
model.py
471
Training updates - Add multiple selected loss functions - Unlock loss as a model configuration - Phaze-A remove encoder scaling max xap
20,323
0
846
272
116
100,872
217
faceswap
14
plugins/train/model/_base/model.py
Python
60
{ "docstring": " Legacy updates for new config additions.\n\n When new config items are added to the Faceswap code, existing model state files need to be\n updated to handle these new items.\n\n Current existing legacy update items:\n\n * loss - If old `dssim_loss` is ``true`` set new `loss_function` to `ssim` otherwise\n set it to `mae`. Remove old `dssim_loss` item\n\n * l2_reg_term - If this exists, set loss_function_2 to ``mse`` and loss_weight_2 to\n the value held in the old ``l2_reg_term`` item\n\n * masks - If `learn_mask` does not exist then it is set to ``True`` if `mask_type` is\n not ``None`` otherwise it is set to ``False``.\n\n * masks type - Replace removed masks 'dfl_full' and 'facehull' with `components` mask\n\n Returns\n -------\n bool\n ``True`` if legacy items exist and state file has been updated, otherwise ``False``\n ", "language": "en", "n_whitespaces": 269, "n_words": 131, "vocab_size": 82 }
https://github.com/deepfakes/faceswap.git
6
get_memos
def get_memos(self) -> Dict[bytes32, List[bytes]]: memos: Dict[bytes32, List[bytes]] = {} for coin_spend in self.coin_spends: result = Program.from_bytes(bytes(coin_spend.puzzle_reveal)).run( Program.from_bytes(bytes(coin_spend.solution)) ) for condition in result.as_python(): if condition[0] == ConditionOpcode.CREATE_COIN and len(condition) >= 4: # If only 3 elements (opcode + 2 args), there is no memo, this is ph, amount coin_added = Coin(coin_spend.coin.name(), bytes32(condition[1]), int_from_bytes(condition[2])) if type(condition[3]) != list: # If it's not a list, it's not the correct format continue memos[coin_added.name()] = condition[3] return memos # Note that `coin_spends` used to have the bad name `coin_solutions`. # Some API still expects this name. For now, we accept both names. # # TODO: continue this deprecation. Eventually, all code below here should be removed. # 1. set `exclude_modern_keys` to `False` (and manually set to `True` where necessary) # 2. set `include_legacy_keys` to `False` (and manually set to `False` where necessary) # 3. remove all references to `include_legacy_keys=True` # 4. remove all code below this point
89f15f591cc3cc3e8ae40e95ffc802f7f2561ece
17
spend_bundle.py
235
Merge standalone wallet into main (#9793) * wallet changes from pac * cat changes * pool tests * pooling tests passing * offers * lint * mempool_mode * black * linting * workflow files * flake8 * more cleanup * renamed * remove obsolete test, don't cast announcement * memos are not only bytes32 * trade renames * fix rpcs, block_record * wallet rpc, recompile settlement clvm * key derivation * clvm tests * lgtm issues and wallet peers * stash * rename * mypy linting * flake8 * bad initializer * flaky tests * Make CAT wallets only create on verified hints (#9651) * fix clvm tests * return to log lvl warn * check puzzle unhardened * public key, not bytes. api caching change * precommit changes * remove unused import * mypy ci file, tests * ensure balance before creating a tx * Remove CAT logic from full node test (#9741) * Add confirmations and sleeps for wallet (#9742) * use pool executor * rever merge mistakes/cleanup * Fix trade test flakiness (#9751) * remove precommit * older version of black * lint only in super linter * Make announcements in RPC be objects instead of bytes (#9752) * Make announcements in RPC be objects instead of bytes * Lint * misc hint'ish cleanup (#9753) * misc hint'ish cleanup * unremove some ci bits * Use main cached_bls.py * Fix bad merge in main_pac (#9774) * Fix bad merge at 71da0487b9cd5564453ec24b76f1ac773c272b75 * Remove unused ignores * more unused ignores * Fix bad merge at 3b143e705057d6c14e2fb3e00078aceff0552d7e * One more byte32.from_hexstr * Remove obsolete test * remove commented out * remove duplicate payment object * remove long sync * remove unused test, noise * memos type * bytes32 * make it clear it's a single state at a time * copy over asset ids from pacr * file endl linter * Update chia/server/ws_connection.py Co-authored-by: dustinface <[email protected]> Co-authored-by: Matt Hauff <[email protected]> Co-authored-by: Kyle Altendorf <[email protected]> Co-authored-by: dustinface <[email protected]>
21,558
0
394
146
109
102,634
153
chia-blockchain
27
chia/types/spend_bundle.py
Python
18
{ "docstring": "\n Retrieves the memos for additions in this spend_bundle, which are formatted as a list in the 3rd parameter of\n CREATE_COIN. If there are no memos, the addition coin_id is not included. If they are not formatted as a list\n of bytes, they are not included. This is expensive to call, it should not be used in full node code.\n ", "language": "en", "n_whitespaces": 88, "n_words": 59, "vocab_size": 40 }
https://github.com/Chia-Network/chia-blockchain.git
3
test_learning_curve_display_default_usage
def test_learning_curve_display_default_usage(pyplot, data): X, y = data estimator = DecisionTreeClassifier(random_state=0) train_sizes = [0.3, 0.6, 0.9] display = LearningCurveDisplay.from_estimator( estimator, X, y, train_sizes=train_sizes ) import matplotlib as mpl assert display.errorbar_ is None assert isinstance(display.lines_, list) for line in display.lines_: assert isinstance(line, mpl.lines.Line2D) assert isinstance(display.fill_between_, list) for fill in display.fill_between_: assert isinstance(fill, mpl.collections.PolyCollection) assert fill.get_alpha() == 0.5 assert display.score_name == "Score" assert display.ax_.get_xlabel() == "Number of samples in the training set" assert display.ax_.get_ylabel() == "Score" _, legend_labels = display.ax_.get_legend_handles_labels() assert legend_labels == ["Testing metric"] train_sizes_abs, train_scores, test_scores = learning_curve( estimator, X, y, train_sizes=train_sizes ) assert_array_equal(display.train_sizes, train_sizes_abs) assert_allclose(display.train_scores, train_scores) assert_allclose(display.test_scores, test_scores)
758fe0d9c72ba343097003e7992c9239e58bfc63
11
test_plot.py
313
FEA add LearningCurveDisplay to show plot learning curve (#24084) Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Arturo Amor <[email protected]>
76,917
0
199
211
68
261,652
98
scikit-learn
39
sklearn/model_selection/tests/test_plot.py
Python
27
{ "docstring": "Check the default usage of the LearningCurveDisplay class.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
https://github.com/scikit-learn/scikit-learn.git
1
test_climate_find_valid_targets
async def test_climate_find_valid_targets(): valid_targets = [10, 16, 17, 18, 19, 20] assert _find_valid_target_temp(7, valid_targets) == 10 assert _find_valid_target_temp(10, valid_targets) == 10 assert _find_valid_target_temp(11, valid_targets) == 16 assert _find_valid_target_temp(15, valid_targets) == 16 assert _find_valid_target_temp(16, valid_targets) == 16 assert _find_valid_target_temp(18.5, valid_targets) == 19 assert _find_valid_target_temp(20, valid_targets) == 20 assert _find_valid_target_temp(25, valid_targets) == 20
5ee2f4f438f8acb119308738639169138b15662c
8
test_climate.py
135
Sensibo Set temperature improvement (#72992)
102,059
0
81
94
26
303,231
51
core
3
tests/components/sensibo/test_climate.py
Python
10
{ "docstring": "Test function to return temperature from valid targets.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
1
test_send_push_single_worker
def test_send_push_single_worker(self): http_client_mock = Mock(spec_set=["post_json_get_json"]) http_client_mock.post_json_get_json.side_effect = ( lambda *_, **__: defer.succeed({}) ) self.make_worker_hs( "synapse.app.generic_worker", {"worker_name": "pusher1", "pusher_instances": ["pusher1"]}, proxied_blacklisted_http_client=http_client_mock, ) event_id = self._create_pusher_and_send_msg("user") # Advance time a bit, so the pusher will register something has happened self.pump() http_client_mock.post_json_get_json.assert_called_once() self.assertEqual( http_client_mock.post_json_get_json.call_args[0][0], "https://push.example.com/_matrix/push/v1/notify", ) self.assertEqual( event_id, http_client_mock.post_json_get_json.call_args[0][1]["notification"][ "event_id" ], )
854a6884d81c95297bf93badcddc00a4cab93418
13
test_pusher_shard.py
213
Modernize unit tests configuration settings for workers. (#14568) Use the newer foo_instances configuration instead of the deprecated flags to enable specific features (e.g. start_pushers).
73,196
0
261
125
43
249,919
49
synapse
19
tests/replication/test_pusher_shard.py
Python
23
{ "docstring": "Test that registration works when using a pusher worker.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/matrix-org/synapse.git
1
test_load_existing_stream
def test_load_existing_stream(self) -> None: self._insert_rows("foobar1", "first", 3) self._insert_rows("foobar2", "second", 3) self._insert_rows("foobar2", "second", 1, update_stream_table=False) first_id_gen = self._create_id_generator("first", writers=["first", "second"]) second_id_gen = self._create_id_generator("second", writers=["first", "second"]) # The first ID gen will notice that it can advance its token to 7 as it # has no in progress writes... self.assertEqual(first_id_gen.get_positions(), {"first": 7, "second": 6}) self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7) self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 6) self.assertEqual(first_id_gen.get_persisted_upto_position(), 7) # ... but the second ID gen doesn't know that. self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 7}) self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 3) self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) self.assertEqual(first_id_gen.get_persisted_upto_position(), 7)
9d21ecf7ceab55bc19c4457b8b07401b0b1623a7
11
test_id_generators.py
330
Add type hints to tests files. (#12256)
71,927
0
198
190
61
247,794
79
synapse
12
tests/storage/test_id_generators.py
Python
17
{ "docstring": "Test creating ID gens with multiple tables that have rows from after\n the position in `stream_positions` table.\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 17 }
https://github.com/matrix-org/synapse.git
4
test_notification_preferences_panel_reduced_for_non_moderators
def test_notification_preferences_panel_reduced_for_non_moderators(self): response = self.client.get(reverse("wagtailadmin_account")) # Find notifications panel through context notifications_panel = None for panelset in response.context["panels_by_tab"].values(): for panel in panelset: if panel.name == "notifications": notifications_panel = panel break notifications_form = notifications_panel.get_form() self.assertIn("approved_notifications", notifications_form.fields.keys()) self.assertIn("rejected_notifications", notifications_form.fields.keys()) self.assertNotIn("submitted_notifications", notifications_form.fields.keys()) self.assertIn( "updated_comments_notifications", notifications_form.fields.keys() )
d10f15e55806c6944827d801cd9c2d53f5da4186
12
test_account_management.py
200
Reformat with black
15,750
0
195
115
33
71,800
43
wagtail
18
wagtail/admin/tests/test_account_management.py
Python
15
{ "docstring": "\n This tests that a user without publish permissions is not shown the\n notification preference for 'submitted' items\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 17 }
https://github.com/wagtail/wagtail.git
2
as_dict
def as_dict(self) -> Dict[Text, Any]: serializable_graph_schema: Dict[Text, Dict[Text, Any]] = {"nodes": {}} for node_name, node in self.nodes.items(): serializable = dataclasses.asdict(node) # Classes are not JSON serializable (surprise) serializable["uses"] = f"{node.uses.__module__}.{node.uses.__name__}" serializable_graph_schema["nodes"][node_name] = serializable return serializable_graph_schema
9fc462da870f69f9976be3bc081675844b9f64c2
12
graph.py
137
fix type annotation in rasa.engine
38,299
0
107
72
28
159,507
35
rasa
16
rasa/engine/graph.py
Python
12
{ "docstring": "Returns graph schema in a serializable format.\n\n Returns:\n The graph schema in a format which can be dumped as JSON or other formats.\n ", "language": "en", "n_whitespaces": 48, "n_words": 23, "vocab_size": 19 }
https://github.com/RasaHQ/rasa.git
15
page_identity
def page_identity(self, response, request_json=None): request_path = response.request.path_url if request_path == '/migrations_notran/': raise exc.IsMigrating('You have been redirected to the migration-in-progress page.') request_method = response.request.method.lower() self.last_elapsed = response.elapsed if isinstance(request_json, dict) and 'ds' in request_json: ds = request_json.ds else: ds = None data = self.extract_data(response) exc_str = "%s (%s) received" % (http.responses[response.status_code], response.status_code) exception = exception_from_status_code(response.status_code) if exception: raise exception(exc_str, data) if response.status_code in (http.OK, http.CREATED, http.ACCEPTED): # Not all JSON responses include a URL. Grab it from the request # object, if needed. if 'url' in data: endpoint = data['url'] else: endpoint = request_path data = objectify_response_json(response) if request_method in ('get', 'patch', 'put'): # Update existing resource and return it if are_same_endpoint(self.endpoint, request_path): self.json = data self.r = response return self registered_type = get_registered_page(request_path, request_method) return registered_type(self.connection, endpoint=endpoint, json=data, last_elapsed=response.elapsed, r=response, ds=ds) elif response.status_code == http.FORBIDDEN: if is_license_invalid(response): raise exc.LicenseInvalid(exc_str, data) elif is_license_exceeded(response): raise exc.LicenseExceeded(exc_str, data) else: raise exc.Forbidden(exc_str, data) elif response.status_code == http.BAD_REQUEST: if is_license_invalid(response): raise exc.LicenseInvalid(exc_str, data) if is_duplicate_error(response): raise exc.Duplicate(exc_str, data) else: raise exc.BadRequest(exc_str, data) else: raise exc.Unknown(exc_str, data)
68a44529b6b77d2d43d7099b654560bfd8bbf518
13
page.py
536
Register pages for the Instance peers and install bundle endpoints This includes exposing a new interface for Page objects, Page.bytes, to return the full bytestring contents of the response.
17,285
0
694
337
104
81,965
171
awx
47
awxkit/awxkit/api/pages/page.py
Python
44
{ "docstring": "Takes a `requests.Response` and\n returns a new __item_class__ instance if the request method is not a get, or returns\n a __class__ instance if the request path is different than the caller's `endpoint`.\n ", "language": "en", "n_whitespaces": 56, "n_words": 32, "vocab_size": 22 }
https://github.com/ansible/awx.git
2
call_categories
def call_categories(self, other_args): parser = argparse.ArgumentParser( prog="categories", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "-l", "--limit", dest="limit", type=check_positive, help="display N number of records", default=15, ) parser.add_argument( "-s", "--sortby", dest="sortby", type=str, help="Sort by given column. Default: market_cap_desc", default=pycoingecko_model.SORT_VALUES[0], choices=pycoingecko_model.SORT_VALUES, ) parser.add_argument( "--pie", action="store_true", help="Flag to show pie chart", dest="pie", default=False, ) ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_categories( limit=ns_parser.limit, export=ns_parser.export, sortby=ns_parser.sortby, pie=ns_parser.pie, ) # TODO: solve sort (similar to losers from discovery)
09f753da1c2a2f03c41fe6a3ca2eb79f6ea58995
11
overview_controller.py
254
More Fixes to Crypto + key sort (#3244) * fix #3095 - autocomplete and command working + key sort * fix #3056 * fix [Bug] bugs #3048 * fix [Bug] bug #3017 * sort -> sortby, not ascend, tests * fix my goof ups Co-authored-by: james <[email protected]>
85,786
0
494
161
63
286,399
72
OpenBBTerminal
31
openbb_terminal/cryptocurrency/overview/overview_controller.py
Python
43
{ "docstring": "Process top_categories commandShows top cryptocurrency categories by market capitalization. It includes categories like:\n stablecoins, defi, solana ecosystem, polkadot ecosystem and many others.\n You can sort by {}, using --sortby parameter", "language": "en", "n_whitespaces": 51, "n_words": 30, "vocab_size": 28 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
3
listify_tensors
def listify_tensors(x): if tf.is_tensor(x): x = x.numpy() if isinstance(x, np.ndarray): x = x.tolist() return x
84afc5193d38057e2e2badf9c889ea87d80d8fbf
10
preprocessing_utils.py
68
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,118
0
41
40
11
273,362
15
keras
9
keras/layers/preprocessing/preprocessing_utils.py
Python
6
{ "docstring": "Convert any tensors or numpy arrays to lists for config serialization.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/keras-team/keras.git
1
test_key_query_cancellation
def test_key_query_cancellation(self) -> None: self.register_user("alice", "wonderland") alice_token = self.login("alice", "wonderland") bob = self.register_user("bob", "uncle") channel = make_request_with_cancellation_test( "test_key_query_cancellation", self.reactor, self.site, "POST", "/_matrix/client/r0/keys/query", { "device_keys": { # Empty list means we request keys for all bob's devices bob: [], }, }, token=alice_token, ) self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertIn(bob, channel.json_body["device_keys"])
d3d9ca156e323fe194b1bcb1af1628f65a2f3c1c
13
test_keys.py
177
Cancel the processing of key query requests when they time out. (#13680)
72,939
0
259
104
42
249,472
47
synapse
17
tests/rest/client/test_keys.py
Python
23
{ "docstring": "\n Tests that /keys/query is cancellable and does not swallow the\n CancelledError.\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
https://github.com/matrix-org/synapse.git
4
trigsimp
def trigsimp(expr, inverse=False, **opts): from sympy.simplify.fu import fu expr = sympify(expr) _eval_trigsimp = getattr(expr, '_eval_trigsimp', None) if _eval_trigsimp is not None: return _eval_trigsimp(**opts) old = opts.pop('old', False) if not old: opts.pop('deep', None) opts.pop('recursive', None) method = opts.pop('method', 'matching') else: method = 'old'
5fc97f8ef40cbc9363c7f7e0ff25f12c45a2203e
11
trigsimp.py
160
implemented inverse option for trigsimp
47,355
0
101
202
32
195,664
42
sympy
13
sympy/simplify/trigsimp.py
Python
26
{ "docstring": "Returns a reduced expression by using known trig identities.\n\n Parameters\n ==========\n\n inverse : bool, optional\n If ``inverse=True``, it will be assumed that a composition of inverse\n functions, such as sin and asin, can be cancelled in any order.\n For example, ``asin(sin(x))`` will yield ``x`` without checking whether\n x belongs to the set where this relation is true. The default is False.\n Default : True\n\n method : string, optional\n Specifies the method to use. Valid choices are:\n\n - ``'matching'``, default\n - ``'groebner'``\n - ``'combined'``\n - ``'fu'``\n - ``'old'``\n\n If ``'matching'``, simplify the expression recursively by targeting\n common patterns. If ``'groebner'``, apply an experimental groebner\n basis algorithm. In this case further options are forwarded to\n ``trigsimp_groebner``, please refer to\n its docstring. If ``'combined'``, it first runs the groebner basis\n algorithm with small default parameters, then runs the ``'matching'``\n algorithm. If ``'fu'``, run the collection of trigonometric\n transformations described by Fu, et al. (see the\n :py:func:`~sympy.simplify.fu.fu` docstring). If ``'old'``, the original\n SymPy trig simplification function is run.\n opts :\n Optional keyword arguments passed to the method. See each method's\n function docstring for details.\n\n Examples\n ========\n\n >>> from sympy import trigsimp, sin, cos, log\n >>> from sympy.abc import x\n >>> e = 2*sin(x)**2 + 2*cos(x)**2\n >>> trigsimp(e)\n 2\n\n Simplification occurs wherever trigonometric functions are located.\n\n >>> trigsimp(log(e))\n log(2)\n\n Using ``method='groebner'`` (or ``method='combined'``) might lead to\n greater simplification.\n\n The old trigsimp routine can be accessed as with method ``method='old'``.\n\n >>> from sympy import coth, tanh\n >>> t = 3*tanh(x)**7 - 2/coth(x)**7\n >>> trigsimp(t, method='old') == t\n True\n >>> trigsimp(t)\n tanh(x)**7\n\n ", "language": "en", "n_whitespaces": 491, "n_words": 255, "vocab_size": 181 }
https://github.com/sympy/sympy.git
1
db_supports_json
def db_supports_json(self): return not conf.get("database", "sql_alchemy_conn").startswith("mssql")
d8889da29ccfcbecd2c89b9e8e278c480767d678
11
sqlalchemy.py
42
Move the database configuration to a new section (#22284) Co-authored-by: gitstart-airflow <[email protected]> Co-authored-by: GitStart <[email protected]> Co-authored-by: Egbosi Kelechi <[email protected]>
9,072
0
20
21
6
47,330
6
airflow
5
airflow/utils/sqlalchemy.py
Python
2
{ "docstring": "Checks if the database supports JSON (i.e. is NOT MSSQL)", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/apache/airflow.git
8
coord_map_from_to
def coord_map_from_to(top_from, top_to): # We need to find a common ancestor of top_from and top_to. # We'll assume that all ancestors are equivalent here (otherwise the graph # is an inconsistent state (which we could improve this to check for)). # For now use a brute-force algorithm.
cc4d0564756ca067516f71718a3d135996525909
6
coord_map.py
19
Balanced joint maximum mean discrepancy for deep transfer learning
12,025
0
62
177
42
60,232
47
transferlearning
3
code/deep/BJMMD/caffe/python/caffe/coord_map.py
Python
28
{ "docstring": "\n Determine the coordinate mapping betweeen a top (from) and a top (to).\n Walk the graph to find a common ancestor while composing the coord maps for\n from and to until they meet. As a last step the from map is inverted.\n ", "language": "en", "n_whitespaces": 54, "n_words": 41, "vocab_size": 31 }
https://github.com/jindongwang/transferlearning.git
4
plot_feature_importance
def plot_feature_importance(model, feature_names, pair, train_dir, count_max=50) -> None: try: import plotly.graph_objects as go from plotly.subplots import make_subplots except ImportError: logger.exception("Module plotly not found \n Please install using `pip3 install plotly`") exit(1) from freqtrade.plot.plotting import store_plot_file # Gather feature importance from model if "catboost.core" in str(model.__class__): feature_importance = model.get_feature_importance() elif "lightgbm.sklearn" in str(model.__class__): feature_importance = model.feature_importances_ else: raise NotImplementedError(f"Cannot extract feature importance for {model.__class__}") # Data preparation fi_df = pd.DataFrame({ "feature_names": np.array(feature_names), "feature_importance": np.array(feature_importance) }) fi_df_top = fi_df.nlargest(count_max, "feature_importance")[::-1] fi_df_worst = fi_df.nsmallest(count_max, "feature_importance")[::-1] # Plotting
86aa875bc9d5edeba04f908fe45b011e52045c83
13
utils.py
261
plot features as html instead of png
34,972
0
189
229
67
151,197
84
freqtrade
34
freqtrade/freqai/utils.py
Python
37
{ "docstring": "\n Plot Best and Worst Features by importance for CatBoost model.\n Called once per sub-train.\n Usage: plot_feature_importance(\n model=model,\n feature_names=dk.training_features_list,\n pair=pair,\n train_dir=dk.data_path)\n ", "language": "en", "n_whitespaces": 89, "n_words": 20, "vocab_size": 20 }
https://github.com/freqtrade/freqtrade.git
5
get_objects
async def get_objects(self) -> dict: replies = await asyncio.gather( *[ self._client.get_object_info(node_id, timeout=DEFAULT_RPC_TIMEOUT) for node_id in self._client.get_all_registered_raylet_ids() ] ) worker_stats = [] for reply in replies: for core_worker_stat in reply.core_workers_stats: # NOTE: Set preserving_proto_field_name=False here because # `construct_memory_table` requires a dictionary that has # modified protobuf name # (e.g., workerId instead of worker_id) as a key. worker_stats.append( self._message_to_dict( message=core_worker_stat, fields_to_decode=["object_id"], preserving_proto_field_name=False, ) ) result = {} memory_table = memory_utils.construct_memory_table(worker_stats) for entry in memory_table.table: data = entry.as_dict() # `construct_memory_table` returns object_ref field which is indeed # object_id. We do transformation here. # TODO(sang): Refactor `construct_memory_table`. data["object_id"] = data["object_ref"] del data["object_ref"] data = filter_fields(data, ObjectState) result[data["object_id"]] = data return result
30ab5458a7e4ba2351d5e1beef8c8797b5946493
16
state_aggregator.py
234
[State Observability] Tasks and Objects API (#23912) This PR implements ray list tasks and ray list objects APIs. NOTE: You can ignore the merge conflict for now. It is because the first PR was reverted. There's a fix PR open now.
31,404
0
518
140
80
138,395
107
ray
31
dashboard/state_aggregator.py
Python
32
{ "docstring": "List all object information from the cluster.\n\n Returns:\n {object_id -> object_data_in_dict}\n object_data_in_dict's schema is in ObjectState\n ", "language": "en", "n_whitespaces": 52, "n_words": 16, "vocab_size": 16 }
https://github.com/ray-project/ray.git
4
exec_
def exec_(_code_, _globs_=None, _locs_=None): if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec() exec_() if sys.version_info[:2] > (3,): exec_() else:
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
six.py
136
upd; format
13,487
0
140
56
22
63,729
38
transferlearning
11
.venv/lib/python3.8/site-packages/pip/_vendor/six.py
Python
10
{ "docstring": "Execute code in a namespace.exec _code_ in _globs_, _locs_def reraise(tp, value, tb=None):\n try:\n raise tp, value, tb\n finally:\n tb = None\ndef raise_from(value, from_value):\n try:\n raise value from from_value\n finally:\n value = None\n", "language": "en", "n_whitespaces": 71, "n_words": 33, "vocab_size": 24 }
https://github.com/jindongwang/transferlearning.git
1
test_create_single_object_with_values
def test_create_single_object_with_values(self): data = { 'name': 'Site 3', 'slug': 'site-3', 'custom_fields': { 'text_field': 'bar', 'longtext_field': 'blah blah blah', 'number_field': 456, 'boolean_field': True, 'date_field': '2020-01-02', 'url_field': 'http://example.com/2', 'json_field': '{"foo": 1, "bar": 2}', 'choice_field': 'Bar', 'object_field': VLAN.objects.get(vid=2).pk, }, } url = reverse('dcim-api:site-list') self.add_permissions('dcim.add_site') response = self.client.post(url, data, format='json', **self.header) self.assertHttpStatus(response, status.HTTP_201_CREATED) # Validate response data response_cf = response.data['custom_fields'] data_cf = data['custom_fields'] self.assertEqual(response_cf['text_field'], data_cf['text_field']) self.assertEqual(response_cf['longtext_field'], data_cf['longtext_field']) self.assertEqual(response_cf['number_field'], data_cf['number_field']) self.assertEqual(response_cf['boolean_field'], data_cf['boolean_field']) self.assertEqual(response_cf['date_field'], data_cf['date_field']) self.assertEqual(response_cf['url_field'], data_cf['url_field']) self.assertEqual(response_cf['json_field'], data_cf['json_field']) self.assertEqual(response_cf['choice_field'], data_cf['choice_field']) self.assertEqual(response_cf['object_field']['id'], data_cf['object_field']) # Validate database data site = Site.objects.get(pk=response.data['id']) self.assertEqual(site.custom_field_data['text_field'], data_cf['text_field']) self.assertEqual(site.custom_field_data['longtext_field'], data_cf['longtext_field']) self.assertEqual(site.custom_field_data['number_field'], data_cf['number_field']) self.assertEqual(site.custom_field_data['boolean_field'], data_cf['boolean_field']) self.assertEqual(str(site.custom_field_data['date_field']), data_cf['date_field']) self.assertEqual(site.custom_field_data['url_field'], data_cf['url_field']) self.assertEqual(site.custom_field_data['json_field'], data_cf['json_field']) self.assertEqual(site.custom_field_data['choice_field'], data_cf['choice_field']) self.assertEqual(site.custom_field_data['object_field'], data_cf['object_field'])
fa1e28e860c4bdb3e585a968bd248a2ac666e1f6
14
test_customfields.py
735
Initial work on #7006
77,615
0
491
420
82
264,128
102
netbox
26
netbox/extras/tests/test_customfields.py
Python
41
{ "docstring": "\n Create a single new site with a value for each type of custom field.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 13 }
https://github.com/netbox-community/netbox.git
7
shift
def shift(self, periods=1, freq=None, axis=0, fill_value=None, meta=no_default): if meta is no_default: with raise_on_meta_error("groupby.shift()", udf=False): meta_kwargs = _extract_meta( { "periods": periods, "freq": freq, "axis": axis, "fill_value": fill_value, }, nonempty=True, ) meta = self._meta_nonempty.shift(**meta_kwargs) msg = ( "`meta` is not specified, inferred from partial data. " "Please provide `meta` if the result is unexpected.\n" " Before: .shift(1)\n" " After: .shift(1, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n" " or: .shift(1, meta=('x', 'f8')) for series result" ) warnings.warn(msg, stacklevel=2) meta = make_meta(meta, parent_meta=self._meta.obj) # Validate self.by if isinstance(self.by, list) and any( isinstance(item, Series) for item in self.by ): raise NotImplementedError( "groupby-shift with a multiple Series is currently not supported" ) df = self.obj should_shuffle = not (df.known_divisions and df._contains_index_name(self.by)) if should_shuffle: df2, by = self._shuffle(meta) else: df2 = df by = self.by # Perform embarrassingly parallel groupby-shift result = map_partitions( _groupby_slice_shift, df2, by, self._slice, periods=periods, freq=freq, axis=axis, fill_value=fill_value, token="groupby-shift", group_keys=self.group_keys, meta=meta, **self.observed, **self.dropna, ) return result
336aac39ee8a616ac2645e532392123ae1bfddd1
15
groupby.py
391
Add groupby shift method (#8522) Implements the shift `method` following the `transform` and `apply` methods.
36,420
0
807
246
115
155,537
153
dask
43
dask/dataframe/groupby.py
Python
51
{ "docstring": "Parallel version of pandas GroupBy.shift\n\n This mimics the pandas version except for the following:\n\n If the grouper does not align with the index then this causes a full\n shuffle. The order of rows within each group may not be preserved.\n\n Parameters\n ----------\n periods : Delayed, Scalar or int, default 1\n Number of periods to shift.\n freq : Delayed, Scalar or str, optional\n Frequency string.\n axis : axis to shift, default 0\n Shift direction.\n fill_value : Scalar, Delayed or object, optional\n The scalar value to use for newly introduced missing values.\n $META\n\n Returns\n -------\n shifted : Series or DataFrame shifted within each group.\n\n Examples\n --------\n >>> import dask\n >>> ddf = dask.datasets.timeseries(freq=\"1H\")\n >>> result = ddf.groupby(\"name\").shift(1, meta={\"id\": int, \"x\": float, \"y\": float})\n ", "language": "en", "n_whitespaces": 299, "n_words": 121, "vocab_size": 89 }
https://github.com/dask/dask.git
2
nice_decrease
def nice_decrease(self, pid): p = psutil.Process(pid) try: p.nice(p.nice() - 1) logger.info('Set nice level of process {} to {} (higher the priority)'.format(pid, p.nice())) except psutil.AccessDenied: logger.warning( 'Can not decrease (higher the priority) the nice level of process {} (access denied)'.format(pid) )
917f01a8306055b21437deac35333dddd1210e39
13
processes.py
109
Update formater in the Makefile with flake8 and autopep8/autoflake
15,339
0
127
63
31
70,108
40
glances
12
glances/processes.py
Python
9
{ "docstring": "Decrease nice level\n On UNIX this is a number which usually goes from -20 to 20.\n The higher the nice value, the lower the priority of the process.", "language": "en", "n_whitespaces": 41, "n_words": 28, "vocab_size": 24 }
https://github.com/nicolargo/glances.git
4
_get_device_count
def _get_device_count(self): if self._is_plaidml: self._device_count = self._plaid.device_count elif IS_MACOS: self._device_count = metal.get_device_count() else: try: self._device_count = pynvml.nvmlDeviceGetCount() except pynvml.NVMLError: self._device_count = 0 self._log("debug", "GPU Device count: {}".format(self._device_count))
444762114c1b1ad2e72c871e825373bd74880aba
13
gpu_stats.py
121
Initial somewhat working version
19,777
0
136
70
21
100,267
27
faceswap
14
lib/gpu_stats.py
Python
11
{ "docstring": " Detect the number of GPUs attached to the system and allocate to\n :attr:`_device_count`. ", "language": "en", "n_whitespaces": 21, "n_words": 13, "vocab_size": 11 }
https://github.com/deepfakes/faceswap.git
2
_handle_analyzed_df_message
def _handle_analyzed_df_message(self, type, data): key, value = data["key"], data["value"] pair, timeframe, candle_type = key # Skip any pairs that we don't have in the pairlist? # leader_pairlist = self._freqtrade.pairlists._whitelist # if pair not in leader_pairlist: # return dataframe = json_to_dataframe(value) if self._config.get('external_signal', {}).get('remove_signals_analyzed_df', False): dataframe = remove_entry_exit_signals(dataframe) logger.debug(f"Handling analyzed dataframe for {pair}") logger.debug(dataframe.tail()) # Add the dataframe to the dataprovider dataprovider = self._freqtrade.dataprovider dataprovider.add_external_df(pair, timeframe, dataframe, candle_type)
2b5f0678772bea0abaf4abe93efc55de43ea3e0e
10
rpc.py
169
Refactoring, minor improvements, data provider improvements
34,859
0
180
98
49
150,869
67
freqtrade
20
freqtrade/rpc/rpc.py
Python
10
{ "docstring": "\n Handles the analyzed dataframes from the Leaders\n\n :param type: The data_type of the data\n :param data: The data\n ", "language": "en", "n_whitespaces": 47, "n_words": 18, "vocab_size": 13 }
https://github.com/freqtrade/freqtrade.git
7
pdf
def pdf(self, x, *args, **kwds): # override base class version to correct # location for S1 parameterization if self._parameterization() == "S0": return super().pdf(x, *args, **kwds) elif self._parameterization() == "S1": (alpha, beta), delta, gamma = self._parse_args(*args, **kwds) if np.all(np.reshape(alpha, (1, -1))[0, :] != 1): return super().pdf(x, *args, **kwds) else: # correct location for this parameterisation x = np.reshape(x, (1, -1))[0, :] x, alpha, beta = np.broadcast_arrays(x, alpha, beta) data_in = np.dstack((x, alpha, beta))[0] data_out = np.empty(shape=(len(data_in), 1)) # group data in unique arrays of alpha, beta pairs uniq_param_pairs = np.unique(data_in[:, 1:], axis=0) for pair in uniq_param_pairs: _alpha, _beta = pair _delta = ( delta + 2 * _beta * gamma * np.log(gamma) / np.pi if _alpha == 1.0 else delta ) data_mask = np.all(data_in[:, 1:] == pair, axis=-1) _x = data_in[data_mask, 0] data_out[data_mask] = ( super() .pdf(_x, _alpha, _beta, loc=_delta, scale=gamma) .reshape(len(_x), 1) ) output = data_out.T[0] if output.shape == (1,): return output[0] return output
3a3727c022a361a0bc8a519ebc60e7de8124a5d9
21
__init__.py
498
DOC: stats: add levy_stable pdf/cdf/rvs docstring
69,756
0
703
330
101
242,037
154
scipy
37
scipy/stats/_levy_stable/__init__.py
Python
31
{ "docstring": "Probability density function of the Levy-stable distribution\n\n Parameters\n ----------\n x : array_like\n quantiles\n alpha, beta : array_like\n The shape parameters of the distribution. See the `levy_stable`\n object docstring for more information.\n loc : array_like, optional\n location parameter (default=0)\n scale : array_like, optional\n scale parameter (default=1)\n\n Returns\n -------\n pdf : ndarray\n Probability density function evaluated at x\n ", "language": "en", "n_whitespaces": 192, "n_words": 56, "vocab_size": 40 }
https://github.com/scipy/scipy.git
2
project_columns
def project_columns(self, columns): if columns == self.columns: return self func = copy.deepcopy(self) func._columns = columns return func
b946406a30cd12cd6989df3440011a734441a200
8
core.py
53
Add from_map function to Dask-DataFrame (#8911)
36,647
0
63
32
13
156,466
17
dask
7
dask/dataframe/io/orc/core.py
Python
6
{ "docstring": "Return a new ORCFunctionWrapper object with\n a sub-column projection.\n ", "language": "en", "n_whitespaces": 23, "n_words": 9, "vocab_size": 8 }
https://github.com/dask/dask.git
1
replace_embedding
def replace_embedding(embedding, masks): # currently we donnot support replace the embedding layer # because we donnot have the corressponding pruner return embedding
97d067e614243f06ed1f8e2d389512977fff8828
6
compress_modules.py
20
Speedup enhancement (#4925)
24,867
0
34
10
17
113,257
22
nni
3
nni/compression/pytorch/speedup/compress_modules.py
Python
2
{ "docstring": "\n Replace the embedding layer according the infered masks.\n We replace the embedding layer according the weight masks,\n ", "language": "en", "n_whitespaces": 27, "n_words": 17, "vocab_size": 11 }
https://github.com/microsoft/nni.git
5
get_edit_handler
def get_edit_handler(cls): if hasattr(cls, "edit_handler"): edit_handler = cls.edit_handler else: # construct a TabbedInterface made up of content_panels, promote_panels # and settings_panels, skipping any which are empty tabs = [] if cls.content_panels: tabs.append(ObjectList(cls.content_panels, heading=gettext_lazy("Content"))) if cls.promote_panels: tabs.append(ObjectList(cls.promote_panels, heading=gettext_lazy("Promote"))) if cls.settings_panels: tabs.append( ObjectList( cls.settings_panels, heading=gettext_lazy("Settings"), classname="settings", ) ) edit_handler = TabbedInterface(tabs, base_form_class=cls.base_form_class) return edit_handler.bind_to_model(cls) Page.get_edit_handler = get_edit_handler @receiver(setting_changed)
470d39e1fe86084f729997f7c4e13f551e7e8c73
@receiver(setting_changed)
18
panels.py
216
Split out bind_to(model) into a separate bind_to_model method
16,572
1
253
118
47
76,705
56
wagtail
19
wagtail/admin/panels.py
Python
19
{ "docstring": "\n Get the panel to use in the Wagtail admin when editing this page type.\n ", "language": "en", "n_whitespaces": 21, "n_words": 14, "vocab_size": 13 }
https://github.com/wagtail/wagtail.git
4
descendants_at_distance
def descendants_at_distance(G, source, distance): if source not in G: raise nx.NetworkXError(f"The node {source} is not in the graph.") bfs_generator = nx.bfs_layers(G, source) for i, layer in enumerate(bfs_generator): if i == distance: return set(layer) return set()
4a019f04d0e304ecd2f28b15d854e1282e03461d
11
breadth_first_search.py
96
Adds ```nx.bfs_layers``` method (#5879) * reformatted the files * reformatted the files * added final changes * changed descendants_at_distance * fixed comment in bfs_layers * fixed comment in bfs_layers
42,274
0
75
58
30
177,116
35
networkx
12
networkx/algorithms/traversal/breadth_first_search.py
Python
8
{ "docstring": "Returns all nodes at a fixed `distance` from `source` in `G`.\n\n Parameters\n ----------\n G : NetworkX graph\n A graph\n source : node in `G`\n distance : the distance of the wanted nodes from `source`\n\n Returns\n -------\n set()\n The descendants of `source` in `G` at the given `distance` from `source`\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> nx.descendants_at_distance(G, 2, 2)\n {0, 4}\n >>> H = nx.DiGraph()\n >>> H.add_edges_from([(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)])\n >>> nx.descendants_at_distance(H, 0, 2)\n {3, 4, 5, 6}\n >>> nx.descendants_at_distance(H, 5, 0)\n {5}\n >>> nx.descendants_at_distance(H, 5, 1)\n set()\n ", "language": "en", "n_whitespaces": 176, "n_words": 96, "vocab_size": 61 }
https://github.com/networkx/networkx.git
1
test_unknown_device
def test_unknown_device(self) -> None: url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote( self.other_user ) channel = self.make_request( "GET", url, access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body) self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) channel = self.make_request( "PUT", url, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) channel = self.make_request( "DELETE", url, access_token=self.admin_user_tok, ) # Delete unknown device returns status 200 self.assertEqual(200, channel.code, msg=channel.json_body)
c97042f7eef3748e17c90e48a4122389a89c4735
10
test_device.py
215
Use literals in place of `HTTPStatus` constants in tests (#13469)
72,587
0
258
138
31
249,080
50
synapse
18
tests/rest/admin/test_device.py
Python
26
{ "docstring": "\n Tests that a lookup for a device that does not exist returns either HTTPStatus.NOT_FOUND or 200.\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 14 }
https://github.com/matrix-org/synapse.git
1
test_get_cache_path
def test_get_cache_path(setup): assert get_cache_path() == Path(setup.directory, ".spotdl", ".spotipy")
fa2ad657482aca9dc628e6d7062b8badf2706bb6
9
test_config.py
39
v4 init
5,356
0
14
21
8
30,157
8
spotify-downloader
5
tests/utils/test_config.py
Python
2
{ "docstring": "\n Tests if the path to the cache file is correct.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 9 }
https://github.com/spotDL/spotify-downloader.git
2
_get_cluster_uid
def _get_cluster_uid(self) -> str: # Default to an environment variable env_cluster_uid = os.environ.get("PREFECT_KUBERNETES_CLUSTER_UID") if env_cluster_uid: return env_cluster_uid # Read the UID from the cluster namespace with self.get_client() as client: namespace = client.read_namespace("kube-system") cluster_uid = namespace.metadata.uid return cluster_uid
9ab65f6480a31ba022d9846fdfbfca1d17da8164
11
kubernetes.py
91
Add `PREFECT_KUBERNETES_CLUSTER_UID` to allow bypass of `kube-system` namespace read (#7864) Co-authored-by: Peyton <[email protected]>
11,992
0
115
49
29
60,149
37
prefect
14
src/prefect/infrastructure/kubernetes.py
Python
21
{ "docstring": "\n Gets a unique id for the current cluster being used.\n\n There is no real unique identifier for a cluster. However, the `kube-system`\n namespace is immutable and has a persistence UID that we use instead.\n\n PREFECT_KUBERNETES_CLUSTER_UID can be set in cases where the `kube-system`\n namespace cannot be read e.g. when a cluster role cannot be created. If set,\n this variable will be used and we will not attempt to read the `kube-system`\n namespace.\n\n See https://github.com/kubernetes/kubernetes/issues/44954\n ", "language": "en", "n_whitespaces": 138, "n_words": 74, "vocab_size": 53 }
https://github.com/PrefectHQ/prefect.git
1
get_default_mesh
def get_default_mesh(self): return self._default_mesh LayoutMap.get.__doc__ = LayoutMap.__getitem__.__doc__ @keras_export("keras.dtensor.experimental.layout_map_scope", v1=[]) @contextlib.contextmanager
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.dtensor.experimental.layout_map_scope", v1=[]) @contextlib.contextmanager
8
layout_map.py
60
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,492
1
21
10
10
270,593
10
keras
11
keras/dtensor/layout_map.py
Python
2
{ "docstring": "Return the default `Mesh` set at instance creation.\n\n The `Mesh` can be used to create default replicated `Layout` when there\n isn't a match of the input string query.\n ", "language": "en", "n_whitespaces": 49, "n_words": 28, "vocab_size": 25 }
https://github.com/keras-team/keras.git
4
state
def state(self) -> MediaPlayerState: if self._tv.on and (self._tv.powerstate == "On" or self._tv.powerstate is None): return MediaPlayerState.ON return MediaPlayerState.OFF
52b5e1779f1ed6e5005dc0bdff4137040d7216fb
11
media_player.py
68
Use new media player enums [p] (#78058)
105,754
0
50
41
17
306,974
18
core
8
homeassistant/components/philips_js/media_player.py
Python
5
{ "docstring": "Get the device state. An exception means OFF state.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
https://github.com/home-assistant/core.git
1
to_json
def to_json(self, **kwargs): config = self.get_config() tokenizer_config = { "class_name": self.__class__.__name__, "config": config, } return json.dumps(tokenizer_config, **kwargs) @keras_export("keras.preprocessing.text.tokenizer_from_json")
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.preprocessing.text.tokenizer_from_json")
10
text.py
82
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,467
1
74
42
17
275,788
18
keras
11
keras/preprocessing/text.py
Python
7
{ "docstring": "Returns a JSON string containing the tokenizer configuration.\n\n To load a tokenizer from a JSON string, use\n `keras.preprocessing.text.tokenizer_from_json(json_string)`.\n\n Args:\n **kwargs: Additional keyword arguments\n to be passed to `json.dumps()`.\n\n Returns:\n A JSON string containing the tokenizer configuration.\n ", "language": "en", "n_whitespaces": 108, "n_words": 36, "vocab_size": 25 }
https://github.com/keras-team/keras.git
1
test_delete_view
def test_delete_view(self): delete_dict = {"post": "yes"} delete_url = reverse("admin:admin_views_article_delete", args=(self.a1.pk,)) # add user should not be able to delete articles self.client.force_login(self.adduser) response = self.client.get(delete_url) self.assertEqual(response.status_code, 403) post = self.client.post(delete_url, delete_dict) self.assertEqual(post.status_code, 403) self.assertEqual(Article.objects.count(), 3) self.client.logout() # view user should not be able to delete articles self.client.force_login(self.viewuser) response = self.client.get(delete_url) self.assertEqual(response.status_code, 403) post = self.client.post(delete_url, delete_dict) self.assertEqual(post.status_code, 403) self.assertEqual(Article.objects.count(), 3) self.client.logout() # Delete user can delete self.client.force_login(self.deleteuser) response = self.client.get( reverse("admin:admin_views_section_delete", args=(self.s1.pk,)) ) self.assertContains(response, "<h2>Summary</h2>") self.assertContains(response, "<li>Articles: 3</li>") # test response contains link to related Article self.assertContains(response, "admin_views/article/%s/" % self.a1.pk) response = self.client.get(delete_url) self.assertContains(response, "admin_views/article/%s/" % self.a1.pk) self.assertContains(response, "<h2>Summary</h2>") self.assertContains(response, "<li>Articles: 1</li>") post = self.client.post(delete_url, delete_dict) self.assertRedirects(post, self.index_url) self.assertEqual(Article.objects.count(), 2) self.assertEqual(len(mail.outbox), 1) self.assertEqual(mail.outbox[0].subject, "Greetings from a deleted object") article_ct = ContentType.objects.get_for_model(Article) logged = LogEntry.objects.get(content_type=article_ct, action_flag=DELETION) self.assertEqual(logged.object_id, str(self.a1.pk))
9c19aff7c7561e3a82978a272ecdaad40dda5c00
14
tests.py
624
Refs #33476 -- Reformatted code with Black.
52,091
0
410
387
71
207,766
126
django
40
tests/admin_views/tests.py
Python
36
{ "docstring": "Delete view should restrict access and actually delete items.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/django/django.git
4
tasks_from_url
def tasks_from_url(file_upload_ids, project, request, url): # process URL with tasks ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE try: filename = url.rsplit('/', 1)[-1] with urlopen(url, context=ctx) as file: # nosec # check size meta = file.info() file.size = int(meta.get("Content-Length")) file.urlopen = True check_file_sizes_and_number({url: file}) file_content = file.read() if isinstance(file_content, str): file_content = file_content.encode() file_upload = create_file_upload(request, project, SimpleUploadedFile(filename, file_content)) file_upload_ids.append(file_upload.id) tasks, found_formats, data_keys = FileUpload.load_tasks_from_uploaded_files(project, file_upload_ids) except ValidationError as e: raise e except Exception as e: raise ValidationError(str(e)) return data_keys, found_formats, tasks, file_upload_ids
d8d6a0554bfd263f8ce12ff3ce5a69986edd9bc0
15
uploader.py
291
fix: DEV-2361: Fix bandit check in LabelStudio Opensource (#2379)
42,529
0
266
179
62
177,869
84
label-studio
40
label_studio/data_import/uploader.py
Python
22
{ "docstring": " Download file using URL and read tasks from it\n ", "language": "en", "n_whitespaces": 13, "n_words": 9, "vocab_size": 9 }
https://github.com/heartexlabs/label-studio.git
4
_multi_worker_concat
def _multi_worker_concat(v, strategy): replicas = strategy.gather(v, axis=0) # v might not have the same shape on different replicas if _is_per_replica_instance(v): shapes = tf.concat( [ tf.expand_dims(tf.shape(single_value)[0], axis=0) for single_value in v.values ], axis=0, ) all_shapes = strategy.gather(shapes, axis=0) else: # v is a tensor. This may happen when, say, we have 2x1 multi-worker. all_shapes = strategy.gather( tf.expand_dims(tf.shape(v)[0], axis=0), axis=0 ) replicas = tf.split( replicas, num_or_size_splits=all_shapes, num=strategy.num_replicas_in_sync, ) ordered_replicas = [] num_replicas_per_worker = len(strategy.extended.worker_devices) for replica_id in range(num_replicas_per_worker): ordered_replicas += replicas[replica_id::num_replicas_per_worker] return concat(ordered_replicas)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
16
training.py
248
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,811
0
258
161
62
271,583
81
keras
26
keras/engine/training.py
Python
25
{ "docstring": "Order PerReplica objects for CollectiveAllReduceStrategy and concat.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/keras-team/keras.git
2
shape
def shape(source, kind=None): return FunctionCall( 'shape', [_printable(source)] + ([_printable(kind)] if kind else []) )
498015021131af4dbb07eb110e5badaba8250c7b
13
fnodes.py
59
Updated import locations
47,541
0
44
36
14
196,041
14
sympy
5
sympy/codegen/fnodes.py
Python
6
{ "docstring": " Creates an AST node for a function call to Fortran's \"shape(...)\"\n\n Parameters\n ==========\n\n source : Symbol or String\n kind : expr\n\n Examples\n ========\n\n >>> from sympy import fcode\n >>> from sympy.codegen.fnodes import shape\n >>> shp = shape('x')\n >>> fcode(shp, source_format='free')\n 'shape(x)'\n\n ", "language": "en", "n_whitespaces": 78, "n_words": 41, "vocab_size": 35 }
https://github.com/sympy/sympy.git
3
test_interactive_annotating_with_drafts
def test_interactive_annotating_with_drafts(business_client, configured_project): # create project with predefined task set ml_backend = configured_project.ml_backends.first() ml_backend.is_interactive = True ml_backend.save() users = list(User.objects.all()) task = configured_project.tasks.first() AnnotationDraft.objects.create(task=task, user=users[0], result={}, lead_time=1) AnnotationDraft.objects.create(task=task, user=users[1], result={}, lead_time=2) # run prediction with requests_mock.Mocker(real_http=True) as m: m.register_uri('POST', f'{ml_backend.url}/predict', json={'results': [{'x': 'x'}]}, status_code=200) r = business_client.post( f'/api/ml/{ml_backend.pk}/interactive-annotating', data=json.dumps( { 'task': task.id, 'context': {'y': 'y'}, } ), content_type="application/json", ) r.status_code = 200 result = r.json() assert 'data' in result assert 'x' in result['data'] assert result['data']['x'] == 'x' history = [req for req in m.request_history if 'predict' in req.path][0] assert history.text js = json.loads(history.text) assert len(js['tasks'][0]['drafts']) == 1
4ec4614e5e8b74795ecf8620e414f0340c6b94ef
18
test_predictions.py
448
fix: DEV-2138: In interactive prediction only current user's draft should be sent (#2233) * fix: DEV-2138: In interactive prediction only current user's draft should be sent * Add test to check drafts in interactive prediction Co-authored-by: hlomzik <[email protected]>
42,498
0
326
260
74
177,774
97
label-studio
43
label_studio/tests/test_predictions.py
Python
29
{ "docstring": "\n Test interactive annotating with drafts\n :param business_client:\n :param configured_project:\n :return:\n ", "language": "en", "n_whitespaces": 26, "n_words": 10, "vocab_size": 9 }
https://github.com/heartexlabs/label-studio.git
2
_parse_distro_release_file
def _parse_distro_release_file(self, filepath): # type: (str) -> Dict[str, str] try: with open(filepath) as fp: # Only parse the first line. For instance, on SLES there # are multiple lines. We don't want them... return self._parse_distro_release_content(fp.readline()) except (OSError, IOError): # Ignore not being able to read a specific, seemingly version # related file. # See https://github.com/python-distro/distro/issues/162 return {}
f3166e673fe8d40277b804d35d77dcdb760fc3b3
13
distro.py
74
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,218
0
185
39
51
20,072
57
pipenv
9
pipenv/patched/notpip/_vendor/distro.py
Python
6
{ "docstring": "\n Parse a distro release file.\n\n Parameters:\n\n * filepath: Path name of the distro release file.\n\n Returns:\n A dictionary containing all information items.\n ", "language": "en", "n_whitespaces": 69, "n_words": 22, "vocab_size": 19 }
https://github.com/pypa/pipenv.git
5
save_hyperparameters
def save_hyperparameters(self, ignore=[]): frame = inspect.currentframe().f_back _, _, _, local_vars = inspect.getargvalues(frame) self.hparams = {k:v for k, v in local_vars.items() if k not in set(ignore+['self']) and not k.startswith('_')} for k, v in self.hparams.items(): setattr(self, k, v)
19aba1f059efad45e1466d47954b2cf54d45b106
15
mxnet.py
150
simplify d2l lib
74,148
0
105
94
25
253,601
36
d2l-en
17
d2l/mxnet.py
Python
7
{ "docstring": "Save function arguments into class attributes.\n\n Defined in :numref:`sec_utils`", "language": "en", "n_whitespaces": 15, "n_words": 9, "vocab_size": 9 }
https://github.com/d2l-ai/d2l-en.git
1
test_str_structvalue
def test_str_structvalue(self): block = SectionBlock() value = block.to_python({"title": "Hello", "body": "<i>italic</i> world"}) result = str(value) self.assertNotIn("<h1>", result) # The expected rendering should correspond to the native representation of an OrderedDict: # "StructValue([('title', u'Hello'), ('body', <wagtail.core.rich_text.RichText object at 0xb12d5eed>)])" # - give or take some quoting differences between Python versions self.assertIn("StructValue", result) self.assertIn("title", result) self.assertIn("Hello", result)
d10f15e55806c6944827d801cd9c2d53f5da4186
11
test_blocks.py
123
Reformat with black
16,224
0
132
65
48
74,148
55
wagtail
10
wagtail/core/tests/test_blocks.py
Python
8
{ "docstring": "\n The str() representation of a StructValue should NOT render the template, as that's liable\n to cause an infinite loop if any debugging / logging code attempts to log the fact that\n it rendered a template with this object in the context:\n https://github.com/wagtail/wagtail/issues/2874\n https://github.com/jazzband/django-debug-toolbar/issues/950\n ", "language": "en", "n_whitespaces": 86, "n_words": 43, "vocab_size": 39 }
https://github.com/wagtail/wagtail.git
2
revert
def revert(self): if self._backup: self.set_state(self._backup) self._backup = None
b3587b52b25077f68116b9852b041d33e7fc6601
10
flow.py
42
make it black!
73,697
0
44
24
8
251,362
8
mitmproxy
4
mitmproxy/flow.py
Python
4
{ "docstring": "\n Revert to the last backed up state.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/mitmproxy/mitmproxy.git
1
test_fetch_period_api_with_no_timestamp
async def test_fetch_period_api_with_no_timestamp(recorder_mock, hass, hass_client): await async_setup_component(hass, "history", {}) client = await hass_client() response = await client.get("/api/history/period") assert response.status == HTTPStatus.OK
31a787558fd312331b55e5c2c4b33341fc3601fc
10
test_init.py
75
Ensure recorder test fixture is setup before hass fixture (#80528) * Ensure recorder test fixture is setup before hass fixture * Adjust more tests
88,517
0
36
43
18
289,375
21
core
11
tests/components/history/test_init.py
Python
5
{ "docstring": "Test the fetch period view for history with no timestamp.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
3
test_print_info_with_numpy
def test_print_info_with_numpy(self, do_validation): model = keras.models.Sequential( [keras.layers.Dense(1, input_shape=(2,))] ) model.compile(loss="mse", optimizer="sgd") dataset = np.arange(200).reshape(100, 2) if do_validation: val_data = ( np.arange(100).reshape(50, 2), np.arange(50).reshape(50, 1), ) else: val_data = None mock_stdout = io.StringIO() with tf.compat.v1.test.mock.patch.object(sys, "stdout", mock_stdout): model.fit( dataset, batch_size=10, epochs=2, validation_data=val_data ) self.assertIn("Train on 100 samples", mock_stdout.getvalue()) if do_validation: self.assertIn(", validate on 50 samples", mock_stdout.getvalue())
84afc5193d38057e2e2badf9c889ea87d80d8fbf
14
training_arrays_test.py
280
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,835
0
254
175
43
271,626
55
keras
35
keras/engine/training_arrays_test.py
Python
21
{ "docstring": "Print training info should work with val datasets (b/133391839).", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/keras-team/keras.git
1
q_mean_variance
def q_mean_variance(self, x_start, t): mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance
ca86da3a30c4e080d4db8c25fca73de843663cb4
11
ddpm.py
94
release more models
36,898
0
62
66
20
157,317
27
stablediffusion
12
ldm/models/diffusion/ddpm.py
Python
5
{ "docstring": "\n Get the distribution q(x_t | x_0).\n :param x_start: the [N x C x ...] tensor of noiseless inputs.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :return: A tuple (mean, variance, log_variance), all of x_start's shape.\n ", "language": "en", "n_whitespaces": 78, "n_words": 42, "vocab_size": 36 }
https://github.com/Stability-AI/stablediffusion.git
3
getpalette
def getpalette(self, rawmode="RGB"): self.load() try: mode = self.im.getpalettemode() except ValueError: return None # no palette if rawmode is None: rawmode = mode return list(self.im.getpalette(mode, rawmode))
6be87277f71948bc7e4b945c46660cac3e5ce919
11
Image.py
91
Allow rawmode None to return the palette in the current mode
69,846
0
101
53
21
242,362
25
Pillow
9
src/PIL/Image.py
Python
9
{ "docstring": "\n Returns the image palette as a list.\n\n :param rawmode: The mode in which to return the palette. ``None`` will\n return the palette in its current mode.\n :returns: A list of color values [r, g, b, ...], or None if the\n image has no palette.\n ", "language": "en", "n_whitespaces": 93, "n_words": 44, "vocab_size": 36 }
https://github.com/python-pillow/Pillow.git
1
test_get_mutable_invalid_value
def test_get_mutable_invalid_value(self, conf): option = 'keyhint.blacklist' obj = conf.get_mutable_obj(option) assert obj == [] obj.append(42) with pytest.raises(configexc.ValidationError): conf.update_mutables() obj = conf.get_mutable_obj(option) assert obj == []
8eecf3af83fc9a4e465744a83e86856fe1c6df10
10
test_config.py
101
config: Discard prior mutables before applying If we only clear existing mutables *after* applying, we get into an inconsistent state if there was an error in one of the config values: The improper value lingers around in self._mutables, and then gets returned when get_mutable_obj() (or update_mutables()) gets called the next time. Reproducer: qutebrowser --debug --temp-basedir \ ':config-dict-add content.javascript.log_message.levels example.org bla' \ ':later 1000 config-dict-add content.javascript.log_message.levels example.org bla' Results in: ERROR: Invalid value 'bla' - expected a value of type list but got str. ERROR: example.org already exists in content.javascript.log_message - use --replace to overwrite! Fixes the second part of #7343. nb: As before, the mutable updating actually gets interrupted by a failing update, instead of it e.g. collecting all errors but carrying on. With this change, the remaining updates will thus also be discarded, but that does not seem to be a problem with how mutables are currently used. Ideally, we should get rid of the mutable handling entirely anyways, at least for qutebrowser internal code - see #4344.
117,490
0
91
58
15
321,036
24
qutebrowser
12
tests/unit/config/test_config.py
Python
9
{ "docstring": "Make sure invalid values aren't stored in mutables.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/qutebrowser/qutebrowser.git
2
keras_model_summary
def keras_model_summary(name, data, step=None): summary_metadata = tf.compat.v1.SummaryMetadata() # Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for # the rationale. summary_metadata.plugin_data.plugin_name = "graph_keras_model" # version number = 1 summary_metadata.plugin_data.content = b"1" try: json_string = data.to_json() except Exception as exc: # An exception should not break a model code. logging.warning( "Model failed to serialize as JSON. Ignoring... %s", exc ) return False with tf.summary.experimental.summary_scope( name, "graph_keras_model", [data, step] ) as (tag, _): with tf.device("cpu:0"): tensor = tf.constant(json_string, dtype=tf.string) return tf.summary.write( tag=tag, tensor=tensor, step=step, metadata=summary_metadata ) @keras_export("keras.callbacks.TensorBoard", v1=[])
3613c3defc39c236fb1592c4f7ba1a9cc887343a
@keras_export("keras.callbacks.TensorBoard", v1=[])
14
callbacks.py
239
Remove pylint comments. PiperOrigin-RevId: 452353044
82,645
1
215
133
71
278,640
87
keras
31
keras/callbacks.py
Python
19
{ "docstring": "Writes a Keras model as JSON to as a Summary.\n\n Writing the Keras model configuration allows the TensorBoard graph plugin to\n render a conceptual graph, as opposed to graph of ops. In case the model\n fails to serialize as JSON, it ignores and returns False.\n\n Args:\n name: A name for this summary. The summary tag used for TensorBoard will\n be this name prefixed by any active name scopes.\n data: A Keras Model to write.\n step: Explicit `int64`-castable monotonic step value for this summary. If\n omitted, this defaults to `tf.summary.experimental.get_step()`, which\n must not be None.\n\n Returns:\n True on success, or False if no summary was written because no default\n summary writer was available.\n\n Raises:\n ValueError: if a default writer exists, but no step was provided and\n `tf.summary.experimental.get_step()` is None.\n ", "language": "en", "n_whitespaces": 207, "n_words": 128, "vocab_size": 87 }
https://github.com/keras-team/keras.git
10
_update_counters
def _update_counters(self, ti_status, session=None): tis_to_be_scheduled = [] refreshed_tis = [] TI = TaskInstance filter_for_tis = TI.filter_for_tis(list(ti_status.running.values())) if filter_for_tis is not None: refreshed_tis = session.query(TI).filter(filter_for_tis).all() for ti in refreshed_tis: # Here we remake the key by subtracting 1 to match in memory information reduced_key = ti.key.reduced if ti.state == TaskInstanceState.SUCCESS: ti_status.succeeded.add(reduced_key) self.log.debug("Task instance %s succeeded. Don't rerun.", ti) ti_status.running.pop(reduced_key) continue if ti.state == TaskInstanceState.SKIPPED: ti_status.skipped.add(reduced_key) self.log.debug("Task instance %s skipped. Don't rerun.", ti) ti_status.running.pop(reduced_key) continue if ti.state == TaskInstanceState.FAILED: self.log.error("Task instance %s failed", ti) ti_status.failed.add(reduced_key) ti_status.running.pop(reduced_key) continue # special case: if the task needs to run again put it back if ti.state == TaskInstanceState.UP_FOR_RETRY: self.log.warning("Task instance %s is up for retry", ti) ti_status.running.pop(reduced_key) ti_status.to_run[ti.key] = ti # special case: if the task needs to be rescheduled put it back elif ti.state == TaskInstanceState.UP_FOR_RESCHEDULE: self.log.warning("Task instance %s is up for reschedule", ti) # During handling of reschedule state in ti._handle_reschedule, try number is reduced # by one, so we should not use reduced_key to avoid key error ti_status.running.pop(ti.key) ti_status.to_run[ti.key] = ti # special case: The state of the task can be set to NONE by the task itself # when it reaches concurrency limits. It could also happen when the state # is changed externally, e.g. by clearing tasks from the ui. We need to cover # for that as otherwise those tasks would fall outside of the scope of # the backfill suddenly. elif ti.state == State.NONE: self.log.warning( "FIXME: task instance %s state was set to none externally or " "reaching concurrency limits. Re-adding task to queue.", ti, ) tis_to_be_scheduled.append(ti) ti_status.running.pop(reduced_key) ti_status.to_run[ti.key] = ti # Batch schedule of task instances if tis_to_be_scheduled: filter_for_tis = TI.filter_for_tis(tis_to_be_scheduled) session.query(TI).filter(filter_for_tis).update( values={TI.state: TaskInstanceState.SCHEDULED}, synchronize_session=False ) session.flush()
6fc6edf6af7f676bfa54ff3a2e6e6d2edb938f2e
14
backfill_job.py
578
Make `airflow dags test` be able to execute Mapped Tasks (#21210) * Make `airflow dags test` be able to execute Mapped Tasks In order to do this there were two steps required: - The BackfillJob needs to know about mapped tasks, both to expand them, and in order to update it's TI tracking - The DebugExecutor needed to "unmap" the mapped task to get the real operator back I was testing this with the following dag: ``` from airflow import DAG from airflow.decorators import task from airflow.operators.python import PythonOperator import pendulum @task def make_list(): return list(map(lambda a: f'echo "{a!r}"', [1, 2, {'a': 'b'}])) def consumer(*args): print(repr(args)) with DAG(dag_id='maptest', start_date=pendulum.DateTime(2022, 1, 18)) as dag: PythonOperator(task_id='consumer', python_callable=consumer).map(op_args=make_list()) ``` It can't "unmap" decorated operators successfully yet, so we're using old-school PythonOperator We also just pass the whole value to the operator, not just the current mapping value(s) * Always have a `task_group` property on DAGNodes And since TaskGroup is a DAGNode, we don't need to store parent group directly anymore -- it'll already be stored * Add "integation" tests for running mapped tasks via BackfillJob * Only show "Map Index" in Backfill report when relevant Co-authored-by: Tzu-ping Chung <[email protected]>
8,254
0
1,009
350
148
44,415
279
airflow
43
airflow/jobs/backfill_job.py
Python
47
{ "docstring": "\n Updates the counters per state of the tasks that were running. Can re-add\n to tasks to run in case required.\n\n :param ti_status: the internal status of the backfill job tasks\n ", "language": "en", "n_whitespaces": 59, "n_words": 30, "vocab_size": 23 }
https://github.com/apache/airflow.git
1
user_config_dir
def user_config_dir(self) -> str: return self._append_app_name_and_version(os.path.expanduser("~/Library/Preferences/"))
f3166e673fe8d40277b804d35d77dcdb760fc3b3
10
macos.py
40
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,275
0
20
22
6
20,223
6
pipenv
7
pipenv/patched/notpip/_vendor/platformdirs/macos.py
Python
3
{ "docstring": ":return: config directory tied to the user, e.g. ``~/Library/Preferences/$appname/$version``", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/pypa/pipenv.git
6
future_sle_exists
def future_sle_exists(args, sl_entries=None): key = (args.voucher_type, args.voucher_no) if validate_future_sle_not_exists(args, key, sl_entries): return False elif get_cached_data(args, key): return True if not sl_entries: sl_entries = get_sle_entries_against_voucher(args) if not sl_entries: return or_conditions = get_conditions_to_validate_future_sle(sl_entries) data = frappe.db.sql( .format( " or ".join(or_conditions) ), args, as_dict=1, ) for d in data: frappe.local.future_sle[key][(d.item_code, d.warehouse)] = d.total_row return len(data)
494bd9ef78313436f0424b918f200dab8fc7c20b
13
stock_controller.py
190
style: format code with black
13,986
0
31
123
41
65,680
52
erpnext
25
erpnext/controllers/stock_controller.py
Python
32
{ "docstring": "\n\t\tselect item_code, warehouse, count(name) as total_row\n\t\tfrom `tabStock Ledger Entry` force index (item_warehouse)\n\t\twhere\n\t\t\t({})\n\t\t\tand timestamp(posting_date, posting_time)\n\t\t\t\t>= timestamp(%(posting_date)s, %(posting_time)s)\n\t\t\tand voucher_no != %(voucher_no)s\n\t\t\tand is_cancelled = 0\n\t\tGROUP BY\n\t\t\titem_code, warehouse\n\t\t", "language": "en", "n_whitespaces": 23, "n_words": 33, "vocab_size": 30 }
https://github.com/frappe/erpnext.git
8
normalize_histogram_results
def normalize_histogram_results(fields, histogram_params, results): # zerofill and rename the columns while making sure to adjust for precision bucket_maps = {field: {} for field in fields} # Only one row in metrics result data = results["data"][0] for field in fields: histogram_column = f"histogram({field})" histogram_alias = get_function_alias(histogram_column) bucket_maps[field] = {start: height for start, end, height in data[histogram_alias]} new_data = {field: [] for field in fields} for i in range(histogram_params.num_buckets): bucket = histogram_params.start_offset + histogram_params.bucket_size * i for field in fields: row = { "bin": bucket, "count": bucket_maps[field].get(bucket, 0), } # make sure to adjust for the precision if necessary if histogram_params.multiplier > 1: row["bin"] /= float(histogram_params.multiplier) new_data[field].append(row) return new_data
6307cf52c4c7f185f9023c6279e565dd7812c202
15
metrics_performance.py
249
feat(mep): Adding histogram support to metrics enhanced perf (#34462) - This uses the metrics dataset to supply histogram data in the same format discover expects - Outlier is currently based on p25 and p75, may change to using tags later
18,673
0
269
157
69
90,529
107
sentry
25
src/sentry/snuba/metrics_performance.py
Python
19
{ "docstring": "\n Normalizes the histogram results by renaming the columns to key and bin\n and make sure to zerofill any missing values.\n\n :param [str] fields: The list of fields for which you want to generate the\n histograms for.\n :param str key_column: The column of the key name.\n :param HistogramParams histogram_params: The histogram parameters used.\n :param any results: The results from the histogram query that may be missing\n bins and needs to be normalized.\n :param str array_column: Array column prefix\n ", "language": "en", "n_whitespaces": 116, "n_words": 77, "vocab_size": 51 }
https://github.com/getsentry/sentry.git
5
inplace_swap_column
def inplace_swap_column(X, m, n): if m < 0: m += X.shape[1] if n < 0: n += X.shape[1] if isinstance(X, sp.csc_matrix): inplace_swap_row_csr(X, m, n) elif isinstance(X, sp.csr_matrix): inplace_swap_row_csc(X, m, n) else: _raise_typeerror(X)
a2c4d8b1f4471f52a4fcf1026f495e637a472568
10
sparsefuncs.py
120
DOC Ensures that inplace_swap_column passes numpydoc validation (#23476) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: harshit5674 <[email protected]>
76,051
0
85
78
20
260,053
32
scikit-learn
12
sklearn/utils/sparsefuncs.py
Python
11
{ "docstring": "\n Swap two columns of a CSC/CSR matrix in-place.\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_features)\n Matrix whose two columns are to be swapped. It should be of\n CSR or CSC format.\n\n m : int\n Index of the column of X to be swapped.\n\n n : int\n Index of the column of X to be swapped.\n ", "language": "en", "n_whitespaces": 108, "n_words": 58, "vocab_size": 34 }
https://github.com/scikit-learn/scikit-learn.git
1
map
def map(self, mapper): mapped = self._values.map(mapper) return Index(mapped, name=self.name)
521259299f7829da667ba39302ec77acedde9e5e
9
category.py
47
DOC: Improve doc summaries in series.rst (#45237)
39,400
0
30
29
9
163,192
9
pandas
7
pandas/core/indexes/category.py
Python
3
{ "docstring": "\n Map values using input an input mapping or function.\n\n Maps the values (their categories, not the codes) of the index to new\n categories. If the mapping correspondence is one-to-one the result is a\n :class:`~pandas.CategoricalIndex` which has the same order property as\n the original, otherwise an :class:`~pandas.Index` is returned.\n\n If a `dict` or :class:`~pandas.Series` is used any unmapped category is\n mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`\n will be returned.\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n\n Returns\n -------\n pandas.CategoricalIndex or pandas.Index\n Mapped index.\n\n See Also\n --------\n Index.map : Apply a mapping correspondence on an\n :class:`~pandas.Index`.\n Series.map : Apply a mapping correspondence on a\n :class:`~pandas.Series`.\n Series.apply : Apply more complex functions on a\n :class:`~pandas.Series`.\n\n Examples\n --------\n >>> idx = pd.CategoricalIndex(['a', 'b', 'c'])\n >>> idx\n CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],\n ordered=False, dtype='category')\n >>> idx.map(lambda x: x.upper())\n CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],\n ordered=False, dtype='category')\n >>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})\n CategoricalIndex(['first', 'second', 'third'], categories=['first',\n 'second', 'third'], ordered=False, dtype='category')\n\n If the mapping is one-to-one the ordering of the categories is\n preserved:\n\n >>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)\n >>> idx\n CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],\n ordered=True, dtype='category')\n >>> idx.map({'a': 3, 'b': 2, 'c': 1})\n CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,\n dtype='category')\n\n If the mapping is not one-to-one an :class:`~pandas.Index` is returned:\n\n >>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})\n Index(['first', 'second', 'first'], dtype='object')\n\n If a `dict` is used, all unmapped categories are mapped to `NaN` and\n the result is an :class:`~pandas.Index`:\n\n >>> idx.map({'a': 'first', 'b': 'second'})\n Index(['first', 'second', nan], dtype='object')\n ", "language": "en", "n_whitespaces": 734, "n_words": 256, "vocab_size": 131 }
https://github.com/pandas-dev/pandas.git
4
get_original_fromname_by_regex
def get_original_fromname_by_regex(config_string, fromname): c = parse_config(config_string) for control in c: item = c[control].get('regex', {}) expression = control for key in item: expression = expression.replace(key, item[key]) pattern = re.compile(expression) full_match = pattern.fullmatch(fromname) if full_match: return control return fromname
583b3cb3b03a36a30b3ce9fe96eb4fb28548a070
13
label_config.py
123
fix: DEV-1462: Fix changing label config for repeater tag (#2725) * fix: DEV-1462: Fix changing label config for repeater tag with created annotations
42,571
0
113
77
26
178,013
37
label-studio
16
label_studio/core/label_config.py
Python
12
{ "docstring": "\n Get from_name from config on from_name key from data after applying regex search or original fromname\n ", "language": "en", "n_whitespaces": 23, "n_words": 16, "vocab_size": 14 }
https://github.com/heartexlabs/label-studio.git