complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
4
11
def check_compressionlib(cls, value): try: compresser = from_qualified_name(value) except (ImportError, AttributeError) as exc: raise ValueError( f"Failed to import requested compression library: {value!r}." ) from exc if not callable(getattr(compresser, "compress", None)): raise ValueError( f"Compression library at {value!r} does not have a 'compress' method." ) if not callable(getattr(compresser, "decompress", None)): raise ValueError( f"Compression library at {value!r} does not have a 'decompress' method." ) return value
src/prefect/serializers.py
139
prefect
{ "docstring": "\n Check that the given pickle library is importable and has compress/decompress\n methods.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
62
Python
42
295fd5d4b65dc967d8ddc99817b52d8273301063
serializers.py
59,407
16
75
check_compressionlib
https://github.com/PrefectHQ/prefect.git
Add `CompressedSerializer` for compression of other result serializers (#7164) Co-authored-by: Terrence Dorsey <[email protected]>
226
0
11,900
12
4
10
def prefer_url(self, url1, url2): result = url2 if url1: s1 = self.score_url(url1) s2 = self.score_url(url2) if s1 > s2: result = url1 if result != url2: logger.debug('Not replacing %r with %r', url1, url2) else: logger.debug('Replacing %r with %r', url1, url2) return result
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py
113
transferlearning
{ "docstring": "\n Choose one of two URLs where both are candidates for distribution\n archives for the same version of a distribution (for example,\n .tar.gz vs. zip).\n\n The current implementation favours https:// URLs over http://, archives\n from PyPI over those from other locations, wheel compatibility (if a\n wheel) and then the archive name.\n ", "language": "en", "n_whitespaces": 100, "n_words": 50, "vocab_size": 41 }
42
Python
27
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
locators.py
62,018
12
69
prefer_url
https://github.com/jindongwang/transferlearning.git
upd; format
170
0
12,828
13
2
15
def system_exec(command): try: res = subprocess.run(command.split(' '), stdout=subprocess.PIPE).stdout.decode('utf-8') except Exception as e: logger.debug('Can not evaluate command {} ({})'.format(command, e)) res = '' return res.rstrip()
glances/compat.py
109
glances
{ "docstring": "Execute a system command and return the result as a str", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
24
Python
22
b3c009b22ef6c47a54faa4c8bf4e10bb62caeef4
compat.py
69,944
7
61
system_exec
https://github.com/nicolargo/glances.git
Correct unitary test failed
57
0
15,190
16
1
4
def getdata(im, offset=(0, 0), **params):
src/PIL/GifImagePlugin.py
28
Pillow
{ "docstring": "\n Legacy Method\n\n Return a list of strings representing this image.\n The first string is a local image header, the rest contains\n encoded image data.\n\n To specify duration, add the time in milliseconds,\n e.g. ``getdata(im_frame, duration=1000)``\n\n :param im: Image object\n :param offset: Tuple of (x, y) pixels. Defaults to (0, 0)\n :param \\\\**params: e.g. duration or other encoder info parameters\n :returns: List of bytes containing GIF encoded frame data\n\n ", "language": "en", "n_whitespaces": 102, "n_words": 68, "vocab_size": 59 }
5
Python
5
1997c814abcbc071fb9f289fda021e8d08cad4a7
GifImagePlugin.py
242,759
24
50
getdata
https://github.com/python-pillow/Pillow.git
Move useful comment into docstring
8
0
69,911
6
2
20
def test_shared_deployment_handle(serve_instance): ray_dag, _ = get_shared_deployment_handle_dag() with DAGNodeNameGenerator() as node_name_generator: serve_root_dag = ray_dag.apply_recursive( lambda node: transform_ray_dag_to_serve_dag(node, node_name_generator) ) print(f"Serve DAG: \n{serve_root_dag}") deployments = extract_deployments_from_serve_dag(serve_root_dag) assert len(deployments) == 2 for deployment in deployments: deployment.deploy() _validate_consistent_python_output( deployments[1], ray_dag, "Combine", input=1, output=4 )
python/ray/serve/pipeline/tests/test_generate.py
143
ray
{ "docstring": "\n Test we can re-use the same deployment handle multiple times or in\n multiple places, without incorrectly parsing duplicated deployments.\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 18 }
40
Python
36
5c06e3f14900e3812061416759c25ff2b88c8a23
test_generate.py
138,804
14
83
test_shared_deployment_handle
https://github.com/ray-project/ray.git
[DAG] add basic plotting on Ray DAGs (#24223) To add basic plotting feature for Ray DAGs. `ray.experimental.dag.plot(dag: DAGNode, to_file=None)` ### Behavior 1. dump the dag plot (Dot) to file. 2. also render the image whenever possible. E.g. if running in Jupyter notebook, the image will not only be saved, but also rendered in the notebook. 3. when to_file is not set (i.e. None), it will be saved to a tempfile for rendering purpose only. This is common when users plot DAGs in notebook env to explore the DAG structure without wanting to save it to a file.
106
0
31,529
13
1
4
async def async_disable_motion_detection(self) -> None: self._attr_motion_detection_enabled = False self.async_write_ha_state()
homeassistant/components/demo/camera.py
34
core
{ "docstring": "Disable the motion detection in base station (Disarm).", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
9
Python
9
57fd84e20c9e98df52a6e81af1fa84ee86028aa8
camera.py
315,082
4
18
async_disable_motion_detection
https://github.com/home-assistant/core.git
Improve type hints in demo (#74236)
30
0
113,679
7
2
18
def directed_modularity_matrix(G, nodelist=None, weight=None): import numpy as np if nodelist is None: nodelist = list(G) A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") k_in = A.sum(axis=0) k_out = A.sum(axis=1) m = k_in.sum() # Expected adjacency matrix X = np.outer(k_out, k_in) / m return A - X
networkx/linalg/modularitymatrix.py
147
networkx
{ "docstring": "Returns the directed modularity matrix of G.\n\n The modularity matrix is the matrix B = A - <A>, where A is the adjacency\n matrix and <A> is the expected adjacency matrix, assuming that the graph\n is described by the configuration model.\n\n More specifically, the element B_ij of B is defined as\n\n .. math::\n B_{ij} = A_{ij} - k_i^{out} k_j^{in} / m\n\n where :math:`k_i^{in}` is the in degree of node i, and :math:`k_j^{out}` is the out degree\n of node j, with m the number of edges in the graph. When weight is set\n to a name of an attribute edge, Aij, k_i, k_j and m are computed using\n its value.\n\n Parameters\n ----------\n G : DiGraph\n A NetworkX DiGraph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default=None)\n The edge attribute that holds the numerical value used for\n the edge weight. If None then all edge weights are 1.\n\n Returns\n -------\n B : Numpy array\n The modularity matrix of G.\n\n Examples\n --------\n >>> G = nx.DiGraph()\n >>> G.add_edges_from(\n ... (\n ... (1, 2),\n ... (1, 3),\n ... (3, 1),\n ... (3, 2),\n ... (3, 5),\n ... (4, 5),\n ... (4, 6),\n ... (5, 4),\n ... (5, 6),\n ... (6, 4),\n ... )\n ... )\n >>> B = nx.directed_modularity_matrix(G)\n\n\n Notes\n -----\n NetworkX defines the element A_ij of the adjacency matrix as 1 if there\n is a link going from node i to node j. Leicht and Newman use the opposite\n definition. This explains the different expression for B_ij.\n\n See Also\n --------\n to_numpy_array\n modularity_spectrum\n adjacency_matrix\n modularity_matrix\n\n References\n ----------\n .. [1] E. A. Leicht, M. E. J. Newman,\n \"Community structure in directed networks\",\n Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008.\n ", "language": "en", "n_whitespaces": 598, "n_words": 303, "vocab_size": 177 }
44
Python
35
8a325d26aa7fdd3a72580c4720fa97f971bbefcb
modularitymatrix.py
177,335
10
92
directed_modularity_matrix
https://github.com/networkx/networkx.git
Use scipy.sparse array datastructure (#6037) * Use scipy.sparse array datastructure * Add reminder to rm wrapper when scipy adds creation fns. * Rm mention of np matrix from code comment. * Update networkx/algorithms/bipartite/matrix.py Co-authored-by: Stefan van der Walt <[email protected]> Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Stefan van der Walt <[email protected]>
81
0
42,354
10
2
5
def get_nccl_reduce_op(reduce_op): if reduce_op not in NCCL_REDUCE_OP_MAP: raise RuntimeError("NCCL does not support reduce op: '{}'.".format(reduce_op)) return NCCL_REDUCE_OP_MAP[reduce_op]
python/ray/util/collective/collective_group/nccl_util.py
47
ray
{ "docstring": "Map the reduce op to NCCL reduce op type.\n\n Args:\n reduce_op (ReduceOp): ReduceOp Enum (SUM/PRODUCT/MIN/MAX).\n Returns:\n (nccl.ncclRedOp_t): the mapped NCCL reduce op.\n ", "language": "en", "n_whitespaces": 45, "n_words": 22, "vocab_size": 17 }
17
Python
16
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
nccl_util.py
133,019
4
27
get_nccl_reduce_op
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
33
0
29,926
12
2
6
def audit_enum(self) -> AuditMode: try: return AuditMode(self.audit) except ValueError: raise ValueError(f'Docker completion entry "{self.name}" has an invalid value "{self.audit}" for the "audit" setting.') from None
test/lib/ansible_test/_internal/completion.py
62
ansible
{ "docstring": "The audit requirements for the container. Raises an exception if the value is invalid.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
25
Python
25
cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc
completion.py
268,709
6
28
audit_enum
https://github.com/ansible/ansible.git
ansible-test - Improve container management. (#78550) See changelogs/fragments/ansible-test-container-management.yml for details.
68
0
79,610
13
1
24
def test_post_build_adapt_update_dataset(self): input_dataset = tf.data.Dataset.from_tensor_slices( np.array([[1], [2], [3], [4], [5], [0]]) ) input_data = keras.Input(shape=(1,)) layer = AddingPreprocessingLayer() output = layer(input_data) model = keras.Model(input_data, output) model._run_eagerly = test_utils.should_run_eagerly() layer.adapt(input_dataset) self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0]))
keras/engine/base_preprocessing_layer_test.py
193
keras
{ "docstring": "Test that preproc layers can adapt() after build() is called.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
35
Python
30
84afc5193d38057e2e2badf9c889ea87d80d8fbf
base_preprocessing_layer_test.py
271,003
11
133
test_post_build_adapt_update_dataset
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
116
0
80,650
12
1
2
def iconsize(self): return self["iconsize"]
packages/python/plotly/plotly/graph_objs/layout/mapbox/layer/_symbol.py
22
plotly.py
{ "docstring": "\n Sets the symbol icon size (mapbox.layer.layout.icon-size). Has\n an effect only when `type` is set to \"symbol\".\n\n The 'iconsize' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 94, "n_words": 35, "vocab_size": 34 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_symbol.py
232,063
2
11
iconsize
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
63,507
7
10
17
def gnu_getopt(args, shortopts, longopts = []): opts = [] prog_args = [] if isinstance(longopts, str): longopts = [longopts] else: longopts = list(longopts) # Allow options after non-option arguments? if shortopts.startswith('+'): shortopts = shortopts[1:] all_options_first = True elif os.environ.get("POSIXLY_CORRECT"): all_options_first = True else: all_options_first = False while args: if args[0] == '--': prog_args += args[1:] break if args[0][:2] == '--': opts, args = do_longs(opts, args[0][2:], longopts, args[1:]) elif args[0][:1] == '-' and args[0] != '-': opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:]) else: if all_options_first: prog_args += args break else: prog_args.append(args[0]) args = args[1:] return opts, prog_args
python3.10.4/Lib/getopt.py
339
XX-Net
{ "docstring": "getopt(args, options[, long_options]) -> opts, args\n\n This function works like getopt(), except that GNU style scanning\n mode is used by default. This means that option and non-option\n arguments may be intermixed. The getopt() function stops\n processing options as soon as a non-option argument is\n encountered.\n\n If the first character of the option string is `+', or if the\n environment variable POSIXLY_CORRECT is set, then option\n processing stops as soon as a non-option argument is encountered.\n\n ", "language": "en", "n_whitespaces": 102, "n_words": 75, "vocab_size": 53 }
96
Python
54
8198943edd73a363c266633e1aa5b2a9e9c9f526
getopt.py
217,531
30
209
gnu_getopt
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
325
0
54,836
16
2
6
def _dict_like(x): try: _ = dict(x) except (TypeError, ValueError): return False return True
ludwig/utils/numerical_test_utils.py
43
ludwig
{ "docstring": "Returns true if an object is a dict or convertible to one, false if not.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
13
Python
12
caaab8ba561850c1b274088f278ff2d27a6f5227
numerical_test_utils.py
8,509
6
25
_dict_like
https://github.com/ludwig-ai/ludwig.git
Check for nans before testing equality in test_training_determinism (#2687) * Adds test_numerical_test_utils * Check finite metrics before checking equality. * Catch TypeError and ValueError in _dict_like and _enumerable. * Edits comments.
39
0
1,440
10
3
31
def to_rotation_matrix(self, v=None, normal=False): q = self s = q.norm()**-2 # diagonal elements are different according to parameter normal if normal: m00 = s*(q.a**2 + q.b**2 - q.c**2 - q.d**2) m11 = s*(q.a**2 - q.b**2 + q.c**2 - q.d**2) m22 = s*(q.a**2 - q.b**2 - q.c**2 + q.d**2) else: m00 = 1 - 2*s*(q.c**2 + q.d**2) m11 = 1 - 2*s*(q.b**2 + q.d**2) m22 = 1 - 2*s*(q.b**2 + q.c**2) m01 = 2*s*(q.b*q.c - q.d*q.a) m02 = 2*s*(q.b*q.d + q.c*q.a) m10 = 2*s*(q.b*q.c + q.d*q.a) m12 = 2*s*(q.c*q.d - q.b*q.a) m20 = 2*s*(q.b*q.d - q.c*q.a) m21 = 2*s*(q.c*q.d + q.b*q.a) if not v: return Matrix([[m00, m01, m02], [m10, m11, m12], [m20, m21, m22]]) else: (x, y, z) = v m03 = x - x*m00 - y*m01 - z*m02 m13 = y - x*m10 - y*m11 - z*m12 m23 = z - x*m20 - y*m21 - z*m22 m30 = m31 = m32 = 0 m33 = 1 return Matrix([[m00, m01, m02, m03], [m10, m11, m12, m13], [m20, m21, m22, m23], [m30, m31, m32, m33]])
sympy/algebras/quaternion.py
690
sympy
{ "docstring": "Returns the equivalent rotation transformation matrix of the quaternion\n which represents rotation about the origin if v is not passed.\n\n Parameters\n ==========\n\n v : tuple or None\n Default value: None\n normal : bool\n When True, gives an expression that may be more efficient for\n symbolic calculations but less so for direct evaluation. Both\n formulas are mathematically equivalent.\n Default value: False\n\n Returns\n =======\n\n tuple\n Returns the equivalent rotation transformation matrix of the quaternion\n which represents rotation about the origin if v is not passed.\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy import symbols, trigsimp, cos, sin\n >>> x = symbols('x')\n >>> q = Quaternion(cos(x/2), 0, 0, sin(x/2))\n >>> trigsimp(q.to_rotation_matrix())\n Matrix([\n [cos(x), -sin(x), 0],\n [sin(x), cos(x), 0],\n [ 0, 0, 1]])\n\n Generates a 4x4 transformation matrix (used for rotation about a point\n other than the origin) if the point(v) is passed as an argument.\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy import symbols, trigsimp, cos, sin\n >>> x = symbols('x')\n >>> q = Quaternion(cos(x/2), 0, 0, sin(x/2))\n >>> trigsimp(q.to_rotation_matrix((1, 1, 1)))\n Matrix([\n [cos(x), -sin(x), 0, sin(x) - cos(x) + 1],\n [sin(x), cos(x), 0, -sin(x) - cos(x) + 1],\n [ 0, 0, 1, 0],\n [ 0, 0, 0, 1]])\n\n ", "language": "en", "n_whitespaces": 589, "n_words": 202, "vocab_size": 100 }
173
Python
92
34555f1ebe2a2ed1fab2a0a2ae9a8457a75eaa26
quaternion.py
200,685
28
464
to_rotation_matrix
https://github.com/sympy/sympy.git
changed homogeneous to normal
450
0
49,764
15
1
24
def test_overriding_has_module_permission(self): articles = Article._meta.verbose_name_plural.title() sections = Section._meta.verbose_name_plural.title() index_url = reverse("admin7:index") self.client.force_login(self.superuser) response = self.client.get(index_url) self.assertContains(response, sections) self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.viewuser) response = self.client.get(index_url) self.assertNotContains(response, "admin_views") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.adduser) response = self.client.get(index_url) self.assertNotContains(response, "admin_views") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.changeuser) response = self.client.get(index_url) self.assertNotContains(response, "admin_views") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.deleteuser) response = self.client.get(index_url) self.assertNotContains(response, articles) # The app list displays Sections but not Articles as the latter has # ModelAdmin.has_module_permission() = False. self.client.force_login(self.superuser) response = self.client.get(reverse("admin7:app_list", args=("admin_views",))) self.assertContains(response, sections) self.assertNotContains(response, articles)
tests/admin_views/tests.py
459
django
{ "docstring": "\n If has_module_permission() always returns False, the module shouldn't\n be displayed on the admin index page for any users.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
79
Python
39
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,841
31
280
test_overriding_has_module_permission
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
310
0
52,128
13
1
11
def test_precedence(self): with self.settings( INSTALLED_APPS=[ "admin_scripts.complex_app", "admin_scripts.simple_app", "django.contrib.auth", "django.contrib.contenttypes", ] ): out = StringIO() call_command("duplicate", stdout=out) self.assertEqual(out.getvalue().strip(), "complex_app") with self.settings( INSTALLED_APPS=[ "admin_scripts.simple_app", "admin_scripts.complex_app", "django.contrib.auth", "django.contrib.contenttypes", ] ): out = StringIO() call_command("duplicate", stdout=out) self.assertEqual(out.getvalue().strip(), "simple_app")
tests/admin_scripts/tests.py
187
django
{ "docstring": "\n Apps listed first in INSTALLED_APPS have precedence.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
34
Python
19
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,308
23
102
test_precedence
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
299
0
51,924
13
2
6
def clear(self): for key in self.conn.keys(): self.conn.delete(key)
.venv/lib/python3.8/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py
43
transferlearning
{ "docstring": "Helper for clearing all the keys in a database. Use with\n caution!", "language": "en", "n_whitespaces": 18, "n_words": 12, "vocab_size": 12 }
7
Python
7
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
redis_cache.py
61,481
3
25
clear
https://github.com/jindongwang/transferlearning.git
upd; format
32
0
12,588
10
1
15
def upfirdn_2d(x, k, upx=1, upy=1, downx=1, downy=1, padx0=0, padx1=0, pady0=0, pady1=0, impl='cuda'): r impl_dict = { 'ref': _upfirdn_2d_ref, 'cuda': _upfirdn_2d_cuda, } return impl_dict[impl](x=x, k=k, upx=upx, upy=upy, downx=downx, downy=downy, padx0=padx0, padx1=padx1, pady0=pady0, pady1=pady1) #----------------------------------------------------------------------------
reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/upfirdn_2d.py
144
insightface
{ "docstring": "Pad, upsample, FIR filter, and downsample a batch of 2D images.\n\n Accepts a batch of 2D images of the shape `[majorDim, inH, inW, minorDim]`\n and performs the following operations for each image, batched across\n `majorDim` and `minorDim`:\n\n 1. Pad the image with zeros by the specified number of pixels on each side\n (`padx0`, `padx1`, `pady0`, `pady1`). Specifying a negative value\n corresponds to cropping the image.\n\n 2. Upsample the image by inserting the zeros after each pixel (`upx`, `upy`).\n\n 3. Convolve the image with the specified 2D FIR filter (`k`), shrinking the\n image so that the footprint of all output pixels lies within the input image.\n\n 4. Downsample the image by throwing away pixels (`downx`, `downy`).\n\n This sequence of operations bears close resemblance to scipy.signal.upfirdn().\n The fused op is considerably more efficient than performing the same calculation\n using standard TensorFlow ops. It supports gradients of arbitrary order.\n\n Args:\n x: Input tensor of the shape `[majorDim, inH, inW, minorDim]`.\n k: 2D FIR filter of the shape `[firH, firW]`.\n upx: Integer upsampling factor along the X-axis (default: 1).\n upy: Integer upsampling factor along the Y-axis (default: 1).\n downx: Integer downsampling factor along the X-axis (default: 1).\n downy: Integer downsampling factor along the Y-axis (default: 1).\n padx0: Number of pixels to pad on the left side (default: 0).\n padx1: Number of pixels to pad on the right side (default: 0).\n pady0: Number of pixels to pad on the top side (default: 0).\n pady1: Number of pixels to pad on the bottom side (default: 0).\n impl: Name of the implementation to use. Can be `\"ref\"` or `\"cuda\"` (default).\n\n Returns:\n Tensor of the shape `[majorDim, outH, outW, minorDim]`, and same datatype as `x`.\n ", "language": "en", "n_whitespaces": 442, "n_words": 277, "vocab_size": 153 }
33
Python
33
7375ee364e0df2a417f92593e09557f1b2a3575a
upfirdn_2d.py
9,408
43
103
upfirdn_2d
https://github.com/deepinsight/insightface.git
initialize ostec
58
0
1,608
9
1
2
def start(self) -> 'BasePod': ...
jina/peapods/pods/__init__.py
20
jina
{ "docstring": "Start to run all :class:`Pea` in this BasePod.\n\n .. note::\n If one of the :class:`Pea` fails to start, make sure that all of them\n are properly closed.\n ", "language": "en", "n_whitespaces": 63, "n_words": 27, "vocab_size": 23 }
5
Python
5
933415bfa1f9eb89f935037014dfed816eb9815d
__init__.py
9,880
8
9
start
https://github.com/jina-ai/jina.git
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
19
0
1,750
6
6
19
def get_date_list(self, queryset, date_type=None, ordering="ASC"): date_field = self.get_date_field() allow_empty = self.get_allow_empty() if date_type is None: date_type = self.get_date_list_period() if self.uses_datetime_field: date_list = queryset.datetimes(date_field, date_type, ordering) else: date_list = queryset.dates(date_field, date_type, ordering) if date_list is not None and not date_list and not allow_empty: raise Http404( _("No %(verbose_name_plural)s available") % { "verbose_name_plural": queryset.model._meta.verbose_name_plural, } ) return date_list
django/views/generic/dates.py
175
django
{ "docstring": "\n Get a date list by calling `queryset.dates/datetimes()`, checking\n along the way for empty lists that aren't allowed.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 17 }
55
Python
38
9c19aff7c7561e3a82978a272ecdaad40dda5c00
dates.py
206,860
17
108
get_date_list
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
230
0
51,761
15
6
16
def unquote_unreserved(uri): parts = uri.split("%") for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): try: c = chr(int(h, 16)) except ValueError: raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: parts[i] = f"%{parts[i]}" else: parts[i] = f"%{parts[i]}" return "".join(parts)
pipenv/patched/pip/_vendor/requests/utils.py
215
pipenv
{ "docstring": "Un-escape any percent-escape sequences in a URI that are unreserved\n characters. This leaves all reserved, illegal and non-ASCII bytes encoded.\n\n :rtype: str\n ", "language": "en", "n_whitespaces": 31, "n_words": 22, "vocab_size": 22 }
50
Python
37
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
utils.py
22,163
16
119
unquote_unreserved
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
198
0
4,233
16
1
9
def set_3d_properties(self, path, zs=0, zdir='z'): Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir) self._code3d = path.codes
lib/mpl_toolkits/mplot3d/art3d.py
63
matplotlib
{ "docstring": "\n Set the *z* position and direction of the path patch.\n\n Parameters\n ----------\n path :\n zs : float\n The location along the *zdir* axis in 3D space to position the\n path patch.\n zdir : {'x', 'y', 'z', 3-tuple}\n Plane to plot path patch orthogonal to. Default: 'z'.\n See `.get_dir_vector` for a description of the values.\n ", "language": "en", "n_whitespaces": 148, "n_words": 54, "vocab_size": 41 }
12
Python
12
df6f95703b60348e01603f98a439b133da2938a0
art3d.py
109,925
3
41
set_3d_properties
https://github.com/matplotlib/matplotlib.git
Improve mpl_toolkit documentation
33
0
23,832
8
1
4
def name(self) -> str: return self._name
airbyte-cdk/python/airbyte_cdk/sources/declarative/declarative_stream.py
22
airbyte
{ "docstring": "\n :return: Stream name. By default this is the implementing class name, but it can be overridden as needed.\n ", "language": "en", "n_whitespaces": 33, "n_words": 18, "vocab_size": 18 }
6
Python
6
150ab593f8ca1f1aa960a0811aece26c46ba6c75
declarative_stream.py
5,311
5
12
name
https://github.com/airbytehq/airbyte.git
Low code connectors: core structure (#12850) * checkout from alex/cac * doc * doc * remove broken test * rename * rename file * delete unused file * rename * abstract property * isort * update state * Update comment * remove incremental mixin * delete comment * update comments * update comments * remove no_state * rename package * pass parameters through kwargs * update interface to pass source in interface * update interface to pass source in interface * rename to stream_slicer * Low code connectors: string interpolation with jinja (#12852) * checkout from alex/cac * Add missing tests * Add missing files * missing file * rename * jinja dependency * Add comment * comment * comment * Revert "delete unused file" This reverts commit 758e939367775ddbefcd52c6e1e832976d3ba9fe. * delete unused field * delete unused field * rename * pass kwargs directly * isort * Revert "isort" This reverts commit 4a792239440bc9950813ccc6ed368641ce2a96e4. * format * decoder * better error handling * remove nostate * isort * delete dead code * Update mapping type to [str, Any] * add comment * Add comment * pass parameters through kwargs * move test to right module * Add missing test * Use authbase instead of deprecated class * leverage generator * rename to declarative * rename the classes too
20
0
749
6
15
58
def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None): import numpy as np from networkx.utils import cumulative_distribution, discrete_sequence local_conn = nx.connectivity.local_edge_connectivity if len(G) < 4: raise nx.NetworkXError("Graph has fewer than four nodes.") if len(G.edges) < 2: raise nx.NetworkXError("Graph has fewer that 2 edges") # Instead of choosing uniformly at random from a generated edge list, # this algorithm chooses nonuniformly from the set of nodes with # probability weighted by degree. G = G.copy() keys, degrees = zip(*G.degree()) # keys, degree cdf = cumulative_distribution(degrees) # cdf of degree nnodes = len(G) nedges = nx.number_of_edges(G) if D is None: D = np.zeros((nnodes, nnodes)) un = np.arange(1, nnodes) um = np.arange(nnodes - 1, 0, -1) u = np.append((0,), np.where(un < um, un, um)) for v in range(int(np.ceil(nnodes / 2))): D[nnodes - v - 1, :] = np.append(u[v + 1 :], u[: v + 1]) D[v, :] = D[nnodes - v - 1, :][::-1] niter = niter * nedges # maximal number of rewiring attempts per 'niter' max_attempts = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2)) for _ in range(niter): n = 0 while n < max_attempts: # pick two random edges without creating edge list # choose source node indices from discrete distribution (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed) if ai == ci: continue # same source, skip a = keys[ai] # convert index to label c = keys[ci] # choose target uniformly from neighbors b = seed.choice(list(G.neighbors(a))) d = seed.choice(list(G.neighbors(c))) bi = keys.index(b) di = keys.index(d) if b in [a, c, d] or d in [a, b, c]: continue # all vertices should be different # don't create parallel edges if (d not in G[a]) and (b not in G[c]): if D[ai, bi] + D[ci, di] >= D[ai, ci] + D[bi, di]: # only swap if we get closer to the diagonal G.add_edge(a, d) G.add_edge(c, b) G.remove_edge(a, b) G.remove_edge(c, d) # Check if the graph is still connected if connectivity and local_conn(G, a, b) == 0: # Not connected, revert the swap G.remove_edge(a, d) G.remove_edge(c, b) G.add_edge(a, b) G.add_edge(c, d) else: break n += 1 return G @py_random_state(3) @not_implemented_for("directed") @not_implemented_for("multigraph")
networkx/algorithms/smallworld.py
858
@py_random_state(3) @not_implemented_for("directed") @not_implemented_for("multigraph")
networkx
{ "docstring": "Latticize the given graph by swapping edges.\n\n Parameters\n ----------\n G : graph\n An undirected graph.\n\n niter : integer (optional, default=1)\n An edge is rewired approximatively niter times.\n\n D : numpy.array (optional, default=None)\n Distance to the diagonal matrix.\n\n connectivity : boolean (optional, default=True)\n Ensure connectivity for the latticized graph when set to True.\n\n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n See :ref:`Randomness<randomness>`.\n\n Returns\n -------\n G : graph\n The latticized graph.\n\n Raises\n ------\n NetworkXError\n If there are fewer than 4 nodes or 2 edges in `G`\n\n Notes\n -----\n The implementation is adapted from the algorithm by Sporns et al. [1]_.\n which is inspired from the original work by Maslov and Sneppen(2002) [2]_.\n\n References\n ----------\n .. [1] Sporns, Olaf, and Jonathan D. Zwi.\n \"The small world of the cerebral cortex.\"\n Neuroinformatics 2.2 (2004): 145-162.\n .. [2] Maslov, Sergei, and Kim Sneppen.\n \"Specificity and stability in topology of protein networks.\"\n Science 296.5569 (2002): 910-913.\n ", "language": "en", "n_whitespaces": 302, "n_words": 156, "vocab_size": 119 }
350
Python
221
9d5e11f27033049282e2d244132b0e946df6557d
smallworld.py
177,545
52
533
lattice_reference
https://github.com/networkx/networkx.git
bug fix in smallworld.py: random_reference and lattice_reference (#6151) * raise exception if graph has less than 2 edges in random_reference and lattice_reference and tested * Updated lattice_reference doc * Update networkx/algorithms/smallworld.py Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/tests/test_smallworld.py Co-authored-by: Ross Barnowski <[email protected]> * Added some suggestions * Added some final suggestions Co-authored-by: Ross Barnowski <[email protected]>
997
1
42,433
17
1
10
def binary_matches(y_true, y_pred, threshold=0.5): y_pred = tf.convert_to_tensor(y_pred) threshold = tf.cast(threshold, y_pred.dtype) y_pred = tf.cast(y_pred > threshold, y_pred.dtype) return tf.cast(tf.equal(y_true, y_pred), tf.int8)
keras/utils/metrics_utils.py
98
keras
{ "docstring": "Creates int Tensor, 1 for label-prediction match, 0 for mismatch.\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n threshold: (Optional) Float representing the threshold for deciding whether\n prediction values are 1 or 0.\n\n Returns:\n Binary matches. shape = `[batch_size, d0, .. dN]`\n ", "language": "en", "n_whitespaces": 75, "n_words": 55, "vocab_size": 40 }
21
Python
17
119cd4655d01570a70c70879dff4461ea46161bf
metrics_utils.py
268,987
5
66
binary_matches
https://github.com/keras-team/keras.git
Added util metric method for binary_matches. Decoupled from public metric binarry_acc
26
0
79,806
9
2
5
def _flatten_parameters(self): [m.flatten_parameters() for m in self._to_flatten]
synthesizer/models/sublayer/cbhg.py
33
MockingBird
{ "docstring": "Calls `flatten_parameters` on all the rnns used by the WaveRNN. Used\n to improve efficiency and avoid PyTorch yelling at us.", "language": "en", "n_whitespaces": 26, "n_words": 20, "vocab_size": 19 }
7
Python
7
6abdd0ebf06ddede5cdf91329143b56167492a17
cbhg.py
161,297
2
19
_flatten_parameters
https://github.com/babysor/MockingBird.git
Refactor (#649) * Refactor model * Refactor and fix bug to save plots
21
0
38,959
8
2
7
def compat_system(source_dir): try: system = load_system(source_dir) except (FileNotFoundError, KeyError): system = {} system.setdefault( 'build-backend', 'setuptools.build_meta:__legacy__', ) system.setdefault('requires', ['setuptools', 'wheel']) return system
.venv/lib/python3.8/site-packages/pip/_vendor/pep517/build.py
87
transferlearning
{ "docstring": "\n Given a source dir, attempt to get a build system backend\n and requirements from pyproject.toml. Fallback to\n setuptools but only if the file was not found or a build\n system was not indicated.\n ", "language": "en", "n_whitespaces": 49, "n_words": 33, "vocab_size": 26 }
21
Python
18
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
build.py
62,961
11
48
compat_system
https://github.com/jindongwang/transferlearning.git
upd; format
70
0
13,077
10
2
7
def table(self, data=None): if _use_arrow(): return self.dg._arrow_table(data) else: return self.dg._legacy_table(data)
lib/streamlit/elements/dataframe_selector.py
59
streamlit
{ "docstring": "Display a static table.\n\n This differs from `st.dataframe` in that the table in this case is\n static: its entire contents are laid out directly on the page.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, Iterable, dict, or None\n The table data.\n Pyarrow tables are not supported by Streamlit's legacy DataFrame serialization\n (i.e. with `config.dataFrameSerialization = \"legacy\"`).\n To use pyarrow tables, please enable pyarrow by changing the config setting,\n `config.dataFrameSerialization = \"arrow\"`.\n\n Example\n -------\n >>> df = pd.DataFrame(\n ... np.random.randn(10, 5),\n ... columns=('col %d' % i for i in range(5)))\n ...\n >>> st.table(df)\n\n .. output::\n https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/data.table.py\n height: 480px\n\n ", "language": "en", "n_whitespaces": 277, "n_words": 98, "vocab_size": 83 }
10
Python
9
72703b38029f9358a0ec7ca5ed875a6b438ece19
dataframe_selector.py
118,729
5
35
table
https://github.com/streamlit/streamlit.git
Replace static apps with live Cloud apps (#4317) Co-authored-by: kajarenc <[email protected]>
53
0
26,386
11
8
41
def _galois_group_degree_4_simple(T, max_tries=30, randomize=False): r from sympy.combinatorics.permutations import Permutation from sympy.combinatorics.named_groups import ( CyclicGroup, AbelianGroup, DihedralGroup, AlternatingGroup, SymmetricGroup ) # Consider the resolvent for the form # F = X0*X1^2 + X1*X2^2 + X2*X3^2 + X3*X0^2 # and the group G = S4. In this case, the stabilizer H is C4 = < (0123) >, # and a set of representatives of G/H is {I, (01), (02), (03), (12), (23)}. X = symbols('X0 X1 X2 X3') F = X[0]*X[1]**2 + X[1]*X[2]**2 + X[2]*X[3]**2 + X[3]*X[0]**2 s = [ Permutation(3), Permutation(3)(0, 1), Permutation(3)(0, 2), Permutation(3)(0, 3), Permutation(3)(1, 2), Permutation(3)(2, 3), ] R = Resolvent(F, X, s) history = set() for i in range(max_tries): R_dup, _, _ = R.eval_for_poly(T) # If R is squarefree, we can proceed. Otherwise, apply a # Tschirnhausen transformation on T and try again. if dup_sqf_p(R_dup, ZZ): break _, T = tschirnhausen_transformation(T, max_tries=max_tries, history=history, fixed_order=not randomize) else: raise MaxTriesException # Compute list L of degrees of irreducible factors of R, in increasing order: fl = dup_factor_list(R_dup, ZZ) L = sorted(sum([ [len(r) - 1] * e for r, e in fl[1] ], [])) if L == [6]: return (AlternatingGroup(4), True) if has_square_disc(T) else (SymmetricGroup(4), False) if L == [1, 1, 4]: return (CyclicGroup(4), False) if L == [2, 2, 2]: return (AbelianGroup(2, 2), True) assert L == [2, 4] return (DihedralGroup(4), False)
sympy/polys/numberfields/galoisgroups.py
522
sympy
{ "docstring": "\n Compute the Galois group of a polynomial of degree 4, using Alg 6.3.6\n of Cohen.\n\n References\n ==========\n\n .. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*.\n\n ", "language": "en", "n_whitespaces": 47, "n_words": 28, "vocab_size": 26 }
223
Python
154
d3c0fc825c4a80904a1fb9a2092137c3d9e0c3fe
galoisgroups.py
195,690
46
353
_galois_group_degree_4_simple
https://github.com/sympy/sympy.git
Add a `galois_group()` function
429
0
47,373
16
1
4
def unsaved_files(self) -> List[str]:
certbot-apache/certbot_apache/_internal/interfaces.py
20
certbot
{ "docstring": "\n Returns a list of file paths that have been changed since the last save\n (or the initial configuration parse). The intended use for this method\n is to tell the Reverter which files need to be included in a checkpoint.\n\n This is typically called for the root of the ParserNode tree.\n\n :returns: list of file paths of files that have been changed but not yet\n saved to disk.\n ", "language": "en", "n_whitespaces": 121, "n_words": 67, "vocab_size": 47 }
4
Python
4
7d9e9a49005de7961e84d2a7c608db57dbab3046
interfaces.py
186,652
11
11
unsaved_files
https://github.com/certbot/certbot.git
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <[email protected]>
11
0
45,560
6
4
31
def adjust_bbox(fig, bbox_inches, fixed_dpi=None): origBbox = fig.bbox origBboxInches = fig.bbox_inches orig_layout = fig.get_layout_engine() fig.set_layout_engine(None) _boxout = fig.transFigure._boxout old_aspect = [] locator_list = [] sentinel = object() for ax in fig.axes: locator_list.append(ax.get_axes_locator()) current_pos = ax.get_position(original=False).frozen() ax.set_axes_locator(lambda a, r, _pos=current_pos: _pos) # override the method that enforces the aspect ratio on the Axes if 'apply_aspect' in ax.__dict__: old_aspect.append(ax.apply_aspect) else: old_aspect.append(sentinel) ax.apply_aspect = lambda pos=None: None
lib/matplotlib/_tight_bbox.py
221
matplotlib
{ "docstring": "\n Temporarily adjust the figure so that only the specified area\n (bbox_inches) is saved.\n\n It modifies fig.bbox, fig.bbox_inches,\n fig.transFigure._boxout, and fig.patch. While the figure size\n changes, the scale of the original figure is conserved. A\n function which restores the original values are returned.\n ", "language": "en", "n_whitespaces": 66, "n_words": 42, "vocab_size": 33 }
63
Python
51
ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22
_tight_bbox.py
107,130
32
274
adjust_bbox
https://github.com/matplotlib/matplotlib.git
ENH: implement and use base layout_engine for more flexible layout.
164
0
22,596
13
2
7
def get_currency(symbol) -> str: ticker_info = yf.Ticker(symbol).info if "financialCurrency" in ticker_info: return ticker_info["financialCurrency"] return "Not Specified"
openbb_terminal/stocks/fundamental_analysis/yahoo_finance_model.py
56
OpenBBTerminal
{ "docstring": "Quick helper to get currency for financial statements", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
16
Python
15
92991fc4e3097fdd9ac9f4f39bdd8e46289176cd
yahoo_finance_model.py
285,622
6
30
get_currency
https://github.com/OpenBB-finance/OpenBBTerminal.git
Get rid of option expirations in the past for Nasdaq + bugs (#2498) * Get rid of option expirations in the past for Nasdaq + clean up bug * Add in currency for yfinance financials * Added fixes Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: colin99d <[email protected]>
35
0
85,330
9
1
23
def _solve_eigen(self, X, y, shrinkage, covariance_estimator):
sklearn/discriminant_analysis.py
46
"""Eigenvalue solver. The eigenvalue solver computes the optimal solution of thecoefficient (basically the ratio of between class scatter to within
scikit-learn
{ "docstring": "Eigenvalue solver.\n\n The eigenvalue solver computes the optimal solution of the Rayleigh\n coefficient (basically the ratio of between class scatter to within", "language": "en", "n_whitespaces": 35, "n_words": 22, "vocab_size": 19 }
6
Python
6
e1db2a8173ca37e561cdfa4384481501c4d50868
discriminant_analysis.py
258,768
18
187
_solve_eigen
https://github.com/scikit-learn/scikit-learn.git
Use check_finite=False in discriminant analysis (#18909) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
13
2
75,415
5
2
7
def model_call_inputs(model, keep_original_batch_size=False): input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size) if input_specs is None: return None, None input_specs = _enforce_names_consistency(input_specs) return input_specs
keras/saving/saving_utils.py
63
keras
{ "docstring": "Inspect model to get its input signature.\n\n The model's input signature is a list with a single (possibly-nested) object.\n This is due to the Keras-enforced restriction that tensor inputs must be\n passed in as the first argument.\n\n For example, a model with input {'feature1': <Tensor>, 'feature2': <Tensor>}\n will have input signature: [{'feature1': TensorSpec, 'feature2': TensorSpec}]\n\n Args:\n model: Keras Model object.\n keep_original_batch_size: A boolean indicating whether we want to keep using\n the original batch size or set it to None. Default is `False`, which means\n that the batch dim of the returned input signature will always be set to\n `None`.\n\n Returns:\n A tuple containing `(args, kwargs)` TensorSpecs of the model call function\n inputs.\n `kwargs` does not contain the `training` argument.\n ", "language": "en", "n_whitespaces": 189, "n_words": 119, "vocab_size": 87 }
19
Python
14
84afc5193d38057e2e2badf9c889ea87d80d8fbf
saving_utils.py
276,243
6
38
model_call_inputs
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
41
0
81,601
10
1
10
def test_login_redirect_for_direct_get(self): response = self.client.get(reverse("admin:login")) self.assertEqual(response.status_code, 200) self.assertEqual(response.context[REDIRECT_FIELD_NAME], reverse("admin:index"))
tests/admin_views/tests.py
77
django
{ "docstring": "\n Login redirect should be to the admin index page when going directly to\n /admin/login/.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
9
Python
9
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,740
4
45
test_login_redirect_for_direct_get
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
37
0
52,079
11
3
19
def tokenizer_from_json(json_string): tokenizer_config = json.loads(json_string) config = tokenizer_config.get("config") word_counts = json.loads(config.pop("word_counts")) word_docs = json.loads(config.pop("word_docs")) index_docs = json.loads(config.pop("index_docs")) # Integer indexing gets converted to strings with json.dumps() index_docs = {int(k): v for k, v in index_docs.items()} index_word = json.loads(config.pop("index_word")) index_word = {int(k): v for k, v in index_word.items()} word_index = json.loads(config.pop("word_index")) tokenizer = Tokenizer(**config) tokenizer.word_counts = word_counts tokenizer.word_docs = word_docs tokenizer.index_docs = index_docs tokenizer.word_index = word_index tokenizer.index_word = index_word return tokenizer
keras/preprocessing/text.py
274
keras
{ "docstring": "Parses a JSON tokenizer configuration and returns a tokenizer instance.\n\n Deprecated: `tf.keras.preprocessing.text.Tokenizer` does not operate on\n tensors and is not recommended for new code. Prefer\n `tf.keras.layers.TextVectorization` which provides equivalent functionality\n through a layer which accepts `tf.Tensor` input. See the\n [text loading tutorial](https://www.tensorflow.org/tutorials/load_data/text)\n for an overview of the layer and text handling in tensorflow.\n\n Args:\n json_string: JSON string encoding a tokenizer configuration.\n\n Returns:\n A Keras Tokenizer instance\n ", "language": "en", "n_whitespaces": 107, "n_words": 66, "vocab_size": 53 }
70
Python
41
84afc5193d38057e2e2badf9c889ea87d80d8fbf
text.py
275,787
17
161
tokenizer_from_json
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
124
0
81,466
11
1
4
def shape(self): return (self.rows, self.cols)
sympy/matrices/common.py
27
sympy
{ "docstring": "The shape (dimensions) of the matrix as the 2-tuple (rows, cols).\n\n Examples\n ========\n\n >>> from sympy import zeros\n >>> M = zeros(2, 3)\n >>> M.shape\n (2, 3)\n >>> M.rows\n 2\n >>> M.cols\n 3\n ", "language": "en", "n_whitespaces": 110, "n_words": 33, "vocab_size": 27 }
5
Python
5
59d22b6bb7287613d598611027f640d068ca5748
common.py
196,368
2
16
shape
https://github.com/sympy/sympy.git
Moved imports to higher level
19
0
47,868
7
3
20
def test_sync_call_healthy_only(self): actors = [Actor.remote(i) for i in range(4)] manager = FaultTolerantActorManager(actors=actors) results = [] for _ in range(10): results.extend( manager.foreach_actor( lambda w: w.call(), healthy_only=True ).ignore_errors() ) # Wait for actors to recover. wait_for_restore() # Notice that since we only fire calls against healthy actors, # we wouldn't be aware that the actors have been recovered. # So once an actor is taken out of the lineup (10% chance), # it will not go back in, and we should have few results here. # Basically takes us 10 calls to kill all the actors. # Note that we can hardcode 10 here because we are using deterministic # sequences of random numbers. self.assertEqual(len(results), 10)
rllib/utils/tests/test_actor_manager.py
144
ray
{ "docstring": "Test synchronous remote calls to only healthy actors.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
114
Python
86
d329147ae28c57b290f6b932f9f3044523f67c4e
test_actor_manager.py
135,567
12
83
test_sync_call_healthy_only
https://github.com/ray-project/ray.git
[RLlib] Introduce FaultTolerantActorManager (#29703) Signed-off-by: Jun Gong <[email protected]>
298
0
30,658
16
5
23
def _get_time_micros(self) -> npt.NDArray[np.int64]: values = self._data._local_timestamps() reso = self._data._reso ppd = periods_per_day(reso) frac = values % ppd if reso == NpyDatetimeUnit.NPY_FR_ns.value: micros = frac // 1000 elif reso == NpyDatetimeUnit.NPY_FR_us.value: micros = frac elif reso == NpyDatetimeUnit.NPY_FR_ms.value: micros = frac * 1000 elif reso == NpyDatetimeUnit.NPY_FR_s.value: micros = frac * 1_000_000 else: # pragma: no cover raise NotImplementedError(reso) micros[self._isnan] = -1 return micros
pandas/core/indexes/datetimes.py
185
pandas
{ "docstring": "\n Return the number of microseconds since midnight.\n\n Returns\n -------\n ndarray[int64_t]\n ", "language": "en", "n_whitespaces": 46, "n_words": 10, "vocab_size": 10 }
64
Python
35
80c005e67f96f431674a37ecd8a9e8a2808f7db4
datetimes.py
167,469
24
113
_get_time_micros
https://github.com/pandas-dev/pandas.git
ENH: DatetimeIndex.indexer_between_time support non-nano (#47535)
204
0
40,025
10
2
5
def test_whether_worker_leaked_when_task_finished_with_errors(ray_start_regular): driver_template =
python/ray/tests/test_advanced_2.py
22
driver_template = """ import ray import os import ray import numpy as np import time ray.init(address="{address}", namespace="test")@ray.remote
ray
{ "docstring": "\nimport ray\nimport os\nimport ray\nimport numpy as np\nimport time\n\nray.init(address=\"{address}\", namespace=\"test\")\n\n# The util actor to store the pid cross jobs.\[email protected]", "language": "en", "n_whitespaces": 17, "n_words": 25, "vocab_size": 20 }
4
Python
4
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
test_advanced_2.py
131,209
60
139
test_whether_worker_leaked_when_task_finished_with_errors
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
7
2
29,485
5
1
14
async def endpoint_discovery(self, empty, context) -> jina_pb2.EndpointsProto: endpointsProto = jina_pb2.EndpointsProto() endpointsProto.endpoints.extend( list(self._data_request_handler._executor.requests.keys()) ) return endpointsProto
jina/serve/runtimes/worker/__init__.py
73
jina
{ "docstring": "\n Process the the call requested and return the list of Endpoints exposed by the Executor wrapped inside this Runtime\n\n :param empty: The service expects an empty protobuf message\n :param context: grpc context\n :returns: the response request\n ", "language": "en", "n_whitespaces": 72, "n_words": 36, "vocab_size": 31 }
15
Python
14
65d6d6da50cb795499ea5e361bf14908f62a3168
__init__.py
12,303
13
44
endpoint_discovery
https://github.com/jina-ai/jina.git
feat: gateway endpoint discovery (#4756)
61
0
2,252
14
1
7
def to_json(self) -> dict: return { "name": self.name, "type": self.type.name, "class": self.class_.name, } @dataclass
mitmproxy/dns.py
61
@dataclass
mitmproxy
{ "docstring": "\n Converts the question into json for mitmweb.\n Sync with web/src/flow.ts.\n ", "language": "en", "n_whitespaces": 32, "n_words": 10, "vocab_size": 10 }
14
Python
14
ea6f9727dab03b0811c180bab761d28b7e57ef50
dns.py
250,941
10
33
to_json
https://github.com/mitmproxy/mitmproxy.git
[dns] use snake_case in web flows
67
1
73,570
9
4
23
def test_higher_rank_inputs_for_importance_weights(self): for fw in framework_iterator(frameworks=("torch", "tf"), session=True): vtrace = vtrace_tf if fw != "torch" else vtrace_torch if fw == "tf": inputs_ = { "log_rhos": tf1.placeholder( dtype=tf.float32, shape=[None, None, 1] ), "discounts": tf1.placeholder( dtype=tf.float32, shape=[None, None, 1] ), "rewards": tf1.placeholder( dtype=tf.float32, shape=[None, None, 42] ), "values": tf1.placeholder(dtype=tf.float32, shape=[None, None, 42]), "bootstrap_value": tf1.placeholder( dtype=tf.float32, shape=[None, 42] ), } else: inputs_ = { "log_rhos": Box(-1.0, 1.0, (8, 10, 1)).sample(), "discounts": Box(-1.0, 1.0, (8, 10, 1)).sample(), "rewards": Box(-1.0, 1.0, (8, 10, 42)).sample(), "values": Box(-1.0, 1.0, (8, 10, 42)).sample(), "bootstrap_value": Box(-1.0, 1.0, (10, 42)).sample(), } output = vtrace.from_importance_weights(**inputs_) check(int(output.vs.shape[-1]), 42)
rllib/agents/impala/tests/test_vtrace.py
447
ray
{ "docstring": "Checks support for additional dimensions in inputs.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
96
Python
47
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
test_vtrace.py
133,740
29
315
test_higher_rank_inputs_for_importance_weights
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
583
0
30,091
18
11
36
def CCompiler_spawn(self, cmd, display=None, env=None): env = env if env is not None else dict(os.environ) if display is None: display = cmd if is_sequence(display): display = " ".join(list(display)) log.info(display) try: if self.verbose: subprocess.check_output(cmd, env=env) else: subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) except subprocess.CalledProcessError as exc: o = exc.output s = exc.returncode except OSError as e: # OSError doesn't have the same hooks for the exception # output, but exec_command() historically would use an # empty string for EnvironmentError (base class for # OSError) # o = b'' # still that would make the end-user lost in translation! o = f"\n\n{e}\n\n\n" try: o = o.encode(sys.stdout.encoding) except AttributeError: o = o.encode("utf8") # status previously used by exec_command() for parent # of OSError s = 127 else: # use a convenience return here so that any kind of # caught exception will execute the default code after the # try / except block, which handles various exceptions return None if is_sequence(cmd): cmd = " ".join(list(cmd)) if self.verbose: forward_bytes_to_stdout(o) if re.search(b"Too many open files", o): msg = "\nTry rerunning setup command until build succeeds." else: msg = "" raise DistutilsExecError( 'Command "%s" failed with exit status %d%s' % (cmd, s, msg) )
sklearn/externals/_numpy_compiler_patch.py
374
scikit-learn
{ "docstring": "\n Execute a command in a sub-process.\n\n Parameters\n ----------\n cmd : str\n The command to execute.\n display : str or sequence of str, optional\n The text to add to the log file kept by `numpy.distutils`.\n If not given, `display` is equal to `cmd`.\n env: a dictionary for environment variables, optional\n\n Returns\n -------\n None\n\n Raises\n ------\n DistutilsExecError\n If the command failed, i.e. the exit status was not 0.\n\n ", "language": "en", "n_whitespaces": 134, "n_words": 66, "vocab_size": 51 }
195
Python
126
8a6cf1a33e80d0e4caa16205ce199a9e1bea7657
_numpy_compiler_patch.py
259,371
35
213
CCompiler_spawn
https://github.com/scikit-learn/scikit-learn.git
BLD Monkeypatch windows build to stablize build (#22693)
481
0
75,736
15
1
4
def not_public(self): return self.filter(self.private_q())
wagtail/query.py
31
wagtail
{ "docstring": "\n Filters the QuerySet to only contain pages that are in a private\n section and their descendants.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 16 }
4
Python
4
180d43a200163f5b7c75280f7bbf7cb4e5de1b91
query.py
79,209
2
17
not_public
https://github.com/wagtail/wagtail.git
Fix Page queryset.not_public returning all pages when no page restrictions exist. (#9067) Fixes #8952
18
0
16,893
9
1
10
def get_previous_release(self, project): return ( ReleaseProject.objects.filter(project=project, release__date_added__lt=self.date_added) .order_by("-release__date_added") .first() )
src/sentry/models/release.py
60
sentry
{ "docstring": "Get the release prior to this one. None if none exists", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
10
Python
10
272d35503a2d5174dfa8cad57f94a2354e453bf3
release.py
93,634
6
36
get_previous_release
https://github.com/getsentry/sentry.git
feat(ingest): Automatically associate commits to checksum release (#36491) Feature for Workflow 2.0. If the SDK is configured to file an event with the release version matching the release commit SHA, ingest will look to see if there have been commits between the release version and the previous release on Github. If there has been, it will register those GH commits as sentry commits and add them to the release. This will allow sentry to only notify developers who worked on the current release and reduce notification spam.
64
0
19,000
14
2
6
def trace(log_dir, create_perfetto_link=False, create_perfetto_trace=False): start_trace(log_dir, create_perfetto_link, create_perfetto_trace) try: yield finally: stop_trace()
jax/_src/profiler.py
51
jax
{ "docstring": "Context manager to take a profiler trace.\n\n The trace will capture CPU, GPU, and/or TPU activity, including Python\n functions and JAX on-device operations.\n\n The resulting trace can be viewed with TensorBoard. Note that TensorBoard\n doesn't need to be running when collecting the trace.\n\n Only once trace may be collected a time. A RuntimeError will be raised if a\n trace is started while another trace is running.\n\n Args:\n log_dir: The directory to save the profiler trace to (usually the\n TensorBoard log directory).\n create_perfetto_link: A boolean which, if true, creates and prints link to\n the Perfetto trace viewer UI (https://ui.perfetto.dev). The program will\n block until the link is opened and Perfetto loads the trace.\n create_perfetto_trace: A boolean which, if true, additionally dumps a\n ``perfetto_trace.json.gz`` file that is compatible for upload with the\n Perfetto trace viewer UI (https://ui.perfetto.dev). The file will also be\n generated if ``create_perfetto_link`` is true. This could be useful if you\n want to generate a Perfetto-compatible trace without blocking the\n processs.\n ", "language": "en", "n_whitespaces": 218, "n_words": 161, "vocab_size": 97 }
11
Python
11
260f1d8b843483df46cf397ae5a1afc0abc9c64f
profiler.py
121,807
6
30
trace
https://github.com/google/jax.git
Add option to generate perfetto trace without generating link
21
0
27,075
10
4
13
def repartition(self, axis=None): if StorageFormat.get() == "Hdk": # Hdk uses only one partition, it makes # no sense for it to repartition the dataframe. return self axes = [0, 1] if axis is None else [axis] new_query_compiler = self for _ax in axes: new_query_compiler = new_query_compiler.__constructor__( new_query_compiler._modin_frame.apply_full_axis( _ax, lambda df: df, keep_partitioning=False ) ) return new_query_compiler # End of DataFrame methods
modin/core/storage_formats/base/query_compiler.py
113
modin
{ "docstring": "\n Repartitioning QueryCompiler objects to get ideal partitions inside.\n\n Allows to improve performance where the query compiler can't improve\n yet by doing implicit repartitioning.\n\n Parameters\n ----------\n axis : {0, 1, None}, optional\n The axis along which the repartitioning occurs.\n `None` is used for repartitioning along both axes.\n\n Returns\n -------\n BaseQueryCompiler\n The repartitioned BaseQueryCompiler.\n ", "language": "en", "n_whitespaces": 156, "n_words": 52, "vocab_size": 45 }
61
Python
49
704ded959541bcf55acadfb49f3fda804267b767
query_compiler.py
155,395
12
70
repartition
https://github.com/modin-project/modin.git
FEAT-#5367: Introduce new API for repartitioning Modin objects (#5366) Co-authored-by: Iaroslav Igoshev <[email protected]> Co-authored-by: Vasily Litvinov <[email protected]> Signed-off-by: Anatoly Myachev <[email protected]>
210
0
36,375
14
2
12
def show_compilers(): # XXX this "knows" that the compiler option it's describing is # "--compiler", which just happens to be the case for the three # commands that use it. from distutils.fancy_getopt import FancyGetopt compilers = [] for compiler in compiler_class.keys(): compilers.append(("compiler="+compiler, None, compiler_class[compiler][2])) compilers.sort() pretty_printer = FancyGetopt(compilers) pretty_printer.print_help("List of available compilers:")
python3.10.4/Lib/distutils/ccompiler.py
106
XX-Net
{ "docstring": "Print list of available compilers (used by the \"--help-compiler\"\n options to \"build\", \"build_ext\", \"build_clib\").\n ", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 14 }
52
Python
44
8198943edd73a363c266633e1aa5b2a9e9c9f526
ccompiler.py
222,587
9
61
show_compilers
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
114
0
56,655
12
1
12
def test_acquire_unavailable(ray_start_4_cpus): manager = PlacementGroupResourceManager(update_interval_s=0) assert not manager.acquire_resources(REQUEST_2_CPU) manager.request_resources(REQUEST_2_CPU) ray.wait(manager.get_resource_futures(), num_returns=1) assert manager.acquire_resources(REQUEST_2_CPU)
python/ray/air/tests/test_resource_manager_placement_group.py
82
ray
{ "docstring": "Test that acquiring resources that are not available returns None.\n\n - Try to acquire\n - Assert this does not work\n - Request resources\n - Wait until ready\n - Acquire\n - Assert this did work\n ", "language": "en", "n_whitespaces": 55, "n_words": 34, "vocab_size": 23 }
13
Python
11
edb17fd2069844f12237c85ba6607afae536401d
test_resource_manager_placement_group.py
138,061
6
49
test_acquire_unavailable
https://github.com/ray-project/ray.git
[air/tune] Internal resource management 1 - Ray AIR resource manager implementation (#30777) Prerequisite to #30016 This PR adds a new Ray AIR resource manager to replace the PlacementGroupManager of Ray Tune. Details can be found in #30016. Specifically, this PR - Adds the main resource manager abstractions - Renames (and moves) PlacementGroupFactory to ResourceRequest - Adds implementations and tests for a placement group based manager and a budget based manager Signed-off-by: Kai Fricke <[email protected]> Signed-off-by: Kai Fricke <[email protected]> Co-authored-by: matthewdeng <[email protected]>
31
0
31,300
9
23
25
def multiset_derangements(s): ms = multiset(s) mx = max(ms.values()) n = len(s) # special cases # 0) impossible case if mx*2 > n: return # 1) singletons if len(ms) == n: for p in generate_derangements(s): yield p return for M in ms: if ms[M] == mx: break inonM = [i for i in range(n) if s[i] != M] iM = [i for i in range(n) if s[i] == M] rv = [None]*n # 2) half are the same if 2*mx == n: for i in inonM: rv[i] = M for p in multiset_permutations([s[i] for i in inonM]): for i, pi in zip(iM, p): rv[i] = pi yield rv return # 3) single repeat covers all but 1 of the non-repeats if n - 2*mx == 1 and len(ms.values()) - 1 == n - mx: for i in range(len(inonM)): i1 = inonM[i] ifill = inonM[:i] + inonM[i+1:] for j in ifill: rv[j] = M for p in permutations([s[j] for j in ifill]): rv[i1] = s[i1] for j, pi in zip(iM, p): rv[j] = pi k = i1 for j in iM: rv[j], rv[k] = rv[k], rv[j] yield rv k = j return
sympy/utilities/iterables.py
486
sympy
{ "docstring": "Generate derangements of the elements of s *in place*.\n\n Examples\n ========\n\n >>> from sympy.utilities.iterables import multiset_derangements, uniq\n\n Because the derangements of multisets (not sets) are generated\n in place, copies of the return value must be made if a collection\n of derangements is desired or else all values will be the same:\n\n >>> list(uniq([i for i in multiset_derangements('1233')]))\n [['3', '3', '2', '1']]\n >>> [i.copy() for i in multiset_derangements('1233')]\n [['3', '3', '1', '2'], ['3', '3', '2', '1']]\n ", "language": "en", "n_whitespaces": 108, "n_words": 75, "vocab_size": 54 }
190
Python
90
25aaf2c3a6ac0d39da710d6e67f244930b56d669
iterables.py
195,921
45
358
multiset_derangements
https://github.com/sympy/sympy.git
fix repeat covers all but 1
569
0
47,476
16
1
7
def content_type(model): return ContentType.objects.get_for_model(model) @register.filter()
netbox/utilities/templatetags/builtins/filters.py
38
@register.filter()
netbox
{ "docstring": "\n Return the ContentType for the given object.\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 6 }
5
Python
5
7c105019d8ae9205051c302e7499b33a455f9176
filters.py
264,445
2
15
content_type
https://github.com/netbox-community/netbox.git
Closes #8600: Document built-in template tags & filters
10
1
77,731
8
4
18
def set_weights(self, weights): params = self.weights if len(params) != len(weights): raise ValueError( "Length of the specified weight list (" + str(len(weights)) + ") does not match the number of weights " "of the optimizer (" + str(len(params)) + ")" ) weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError( "Optimizer weight shape " + str(pv.shape) + " not compatible with " "provided weight shape " + str(w.shape) ) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples)
keras/optimizers/optimizer_v1.py
212
keras
{ "docstring": "Sets the weights of the optimizer, from Numpy arrays.\n\n Should only be called after computing the gradients\n (otherwise the optimizer has no weights).\n\n Args:\n weights: a list of Numpy arrays. The number of arrays and their shape\n must match number of the dimensions of the weights of the optimizer\n (i.e. it should match the output of `get_weights`).\n\n Raises:\n ValueError: in case of incompatible weight shapes.\n ", "language": "en", "n_whitespaces": 148, "n_words": 65, "vocab_size": 45 }
82
Python
56
84afc5193d38057e2e2badf9c889ea87d80d8fbf
optimizer_v1.py
275,342
21
125
set_weights
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
341
0
81,383
17
1
11
def test__render_filenames_undefined_template(): path = "/srv/salt/saltines" dest = "/srv/salt/cheese" saltenv = "base" template = "biscuits" ret = (path, dest) pytest.raises( CommandExecutionError, cp._render_filenames, path, dest, saltenv, template )
tests/pytests/unit/modules/test_cp.py
73
salt
{ "docstring": "\n Test if _render_filenames fails upon getting a template not in\n TEMPLATE_REGISTRY.\n ", "language": "en", "n_whitespaces": 21, "n_words": 11, "vocab_size": 11 }
26
Python
21
ba58c71c55f8d65e702525faf435c2de91aae85c
test_cp.py
215,688
9
42
test__render_filenames_undefined_template
https://github.com/saltstack/salt.git
move cp exec module tests to pytest
57
0
54,099
8
9
10
def match(self, node, results=None): if self.type is not None and node.type != self.type: return False if self.content is not None: r = None if results is not None: r = {} if not self._submatch(node, r): return False if r: results.update(r) if results is not None and self.name: results[self.name] = node return True
python3.10.4/Lib/lib2to3/pytree.py
145
XX-Net
{ "docstring": "\n Does this pattern exactly match a node?\n\n Returns True if it matches, False if not.\n\n If results is not None, it must be a dict which will be\n updated with the nodes matching named subpatterns.\n\n Default implementation for non-wildcard patterns.\n ", "language": "en", "n_whitespaces": 83, "n_words": 40, "vocab_size": 36 }
52
Python
29
8198943edd73a363c266633e1aa5b2a9e9c9f526
pytree.py
218,860
14
93
match
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
198
0
55,510
11
2
5
def endswith_cr(line): return line.endswith("\r" if isinstance(line, str) else b"\r")
django/core/files/base.py
43
django
{ "docstring": "Return True if line (a text or bytestring) ends with '\\r'.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
9
Python
9
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
204,478
2
23
endswith_cr
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
15
0
50,741
10
3
10
def _get_n_args(self, args, example, n): # type: (List[str], str, int) -> Any if len(args) != n: msg = ( 'Got unexpected number of arguments, expected {}. ' '(example: "{} config {}")' ).format(n, get_prog(), example) raise PipError(msg) if n == 1: return args[0] else: return args
.venv/lib/python3.8/site-packages/pip/_internal/commands/configuration.py
93
transferlearning
{ "docstring": "Helper to make sure the command got the right number of arguments\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 11 }
45
Python
43
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
configuration.py
60,598
11
56
_get_n_args
https://github.com/jindongwang/transferlearning.git
upd; format
165
0
12,217
13
5
19
def tmpfile(extension="", dir=None): extension = extension.lstrip(".") if extension: extension = "." + extension handle, filename = tempfile.mkstemp(extension, dir=dir) os.close(handle) os.remove(filename) try: yield filename finally: if os.path.exists(filename): with suppress(OSError): # sometimes we can't remove a generated temp file if os.path.isdir(filename): shutil.rmtree(filename) else: os.remove(filename) @contextmanager
dask/utils.py
179
@contextmanager
dask
{ "docstring": "\n Function to create and return a unique temporary file with the given extension, if provided.\n\n Parameters\n ----------\n extension : str\n The extension of the temporary file to be created\n dir : str\n If ``dir`` is not None, the file will be created in that directory; otherwise,\n Python's default temporary directory is used.\n\n Returns\n -------\n out : str\n Path to the temporary file\n\n See Also\n --------\n NamedTemporaryFile : Built-in alternative for creating temporary files\n tmp_path : pytest fixture for creating a temporary directory unique to the test invocation\n\n Notes\n -----\n This context manager is particularly useful on Windows for opening temporary files multiple times.\n ", "language": "en", "n_whitespaces": 180, "n_words": 103, "vocab_size": 69 }
43
Python
35
bf66221722cce8f09a9b09895bdb4596f14a5430
utils.py
156,915
16
100
tmpfile
https://github.com/dask/dask.git
`tmpfile` does not end files with period on empty extension (#9429)
167
1
36,805
17
2
19
def test_normalized_P5_directed(self): G = nx.DiGraph() nx.add_path(G, range(5)) b_answer = {0: 0, 1: 1.0 / 12.0, 2: 1.0 / 12.0, 3: 0, 4: 0, 5: 0} b = nx.betweenness_centrality_subset( G, sources=[0], targets=[3], normalized=True, weight=None ) for n in sorted(G): assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py
162
networkx
{ "docstring": "Betweenness Centrality Subset: Normalized Directed P5", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
43
Python
36
4376a6f751874dceff9dadc0a6a6bfc2dfa04000
test_betweenness_centrality_subset.py
177,474
9
120
test_normalized_P5_directed
https://github.com/networkx/networkx.git
PR for issue #6033 Improve test coverage for algorithms in betweenness_subset.py #6033 (#6083) * Updated test_betweenness_centrality_subset.py * add test of normalized in test_betweenness_centrality_subset.py * add test of normalized in test_betweenness_centrality_subset.py * update test of normalized in test_betweenness_centrality_subset.py * update weight of test_betweenness_centrality_subset.py * add docstring * add docstring in test_betweenness_centrality_subset.py * add docstring in test_betweenness_centrality_subset.py
114
0
42,386
11
2
10
def window_frame_rows_start_end(self, start=None, end=None): if not self.connection.features.supports_over_clause: raise NotSupportedError("This backend does not support window expressions.") return self.window_frame_start(start), self.window_frame_end(end)
django/db/backends/base/operations.py
71
django
{ "docstring": "\n Return SQL for start and end points in an OVER clause window frame.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 13 }
18
Python
17
9c19aff7c7561e3a82978a272ecdaad40dda5c00
operations.py
204,877
4
43
window_frame_rows_start_end
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
50
0
50,950
10
4
17
def completion_item_yank(self, sel=False): text = self._cmd.selectedText() if not text: index = self.currentIndex() if not index.isValid(): raise cmdutils.CommandError("No item selected!") text = self._model().data(index) if not utils.supports_selection(): sel = False utils.set_clipboard(text, selection=sel)
qutebrowser/completion/completionwidget.py
133
qutebrowser
{ "docstring": "Yank the current completion item into the clipboard.\n\n Args:\n sel: Use the primary selection instead of the clipboard.\n ", "language": "en", "n_whitespaces": 43, "n_words": 18, "vocab_size": 14 }
30
Python
22
a20bb67a878b2e68abf8268c1b0a27f018d01352
completionwidget.py
320,775
10
78
completion_item_yank
https://github.com/qutebrowser/qutebrowser.git
mypy: Upgrade to PyQt5-stubs 5.15.6.0 For some unknown reason, those new stubs cause a *lot* of things now to be checked by mypy which formerly probably got skipped due to Any being implied somewhere. The stubs themselves mainly improved, with a couple of regressions too. In total, there were some 337 (!) new mypy errors. This commit fixes almost all of them, and the next commit improves a fix to get things down to 0 errors again. Overview of the changes: ==== qutebrowser/app.py - Drop type ignore due to improved stubs. ==== qutebrowser/browser/browsertab.py - Specify the type of _widget members more closely than just QWidget. This is debatable: I suppose the abstract stuff shouldn't need to know anything about the concrete backends at all. But it seems like we cut some corners when initially implementing things, and put some code in browsertab.py just because the APIs of both backends happened to be compatible. Perhaps something to reconsider once we drop QtWebKit and hopefully implement a dummy backend. - Add an additional assertion in AbstractAction.run_string. This is already covered by the isinstance(member, self.action_base) above it, but that's too dynamic for mypy to understand. - Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x and y components), not a single int. - Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x and y components), not a single int. - Fix the argument types of AbstractScroller.to_perc, as it's possible to pass fractional percentages too. - Specify the type for AbstractHistoryPrivate._history. See above (_widget) re this being debatable. - Fix the return type of AbstractTabPrivate.event_target(), which can be None (see #3888). - Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS return value), not None. - Fix the argument type for AbstractTabPrivate.toggle_inspector: position can be None to use the last used position. - Declare the type of sub-objects of AbstractTab. - Fix the return value of AbstractTab.icon(), which is the QIcon, not None. ==== qutebrowser/browser/commands.py - Make sure the active window is a MainWindow (with a .win_id attribute). ==== qutebrowser/browser/downloadview.py - Add _model() which makes sure that self.model() is a DownloadModel, not None or any other model. This is needed because other methods access a variety of custom attributes on it, e.g. last_index(). ==== qutebrowser/browser/greasemonkey.py - Add an ignore for AbstractDownload.requested_url which we patch onto the downloads. Probably would be nicer to add it as a proper attribute which always gets set by the DownloadManager. ==== qutebrowser/browser/hints.py - Remove type ignores for QUrl.toString(). - Add a new type ignore for combining different URL flags (which works, but is not exactly type safe... still probably a regression in the stubs). - Make sure the things we get back from self._get_keyparser are what we actually expect. Probably should introduce a TypedDict (and/or overloads for _get_keyparser with typing.Literal) to teach mypy about the exact return value. See #7098. This is needed because we access Hint/NormalKeyParser-specific attributes such as .set_inhibited_timout() or .update_bindings(). ==== qutebrowser/browser/inspector.py - Similar changes than in browsertab.py to make some types where we share API (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next commit. ==== qutebrowser/browser/network/pac.py - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/qtnetworkdownloads.py - Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an AbstractDownload), so that we can call ._uses_nam() on it. ==== qutebrowser/browser/qutescheme.py - Remove now unneeded type ignore for QUrl flags. ==== qutebrowser/browser/urlmarks.py - Specify the type of UrlMarkManager._lineparser, as those only get initialized in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist. ==== qutebrowser/browser/webelem.py - New casts to turn single KeyboardModifier (enum) entries into KeyboardModifiers (flags). Might not be needed anymore with Qt 6. - With that, casting the final value is now unneeded. ==== qutebrowser/browser/webengine/notification.py - Remove now unneeded type ignore for signal. - Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished() is a QProcess, not just any QObject. ==== qutebrowser/browser/webengine/webenginedownloads.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webengine/webengineelem.py - Specify the type of WebEngineElement._tab. - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webengineinspector.py - See changes to inspector.py and next commit. - Remove now unneeded type ignore for signal. ==== qutebrowser/browser/webengine/webenginequtescheme.py - Remove now unneeded type ignore for mixed flags. ==== qutebrowser/browser/webengine/webenginesettings.py - Ignore access of .setter attribute which we patch onto QWebEngineProfile. Would be nice to have a subclass or wrapper-class instead. ==== qutebrowser/browser/webengine/webenginetab.py - Specified the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Remove some now-unneeded type ignores for creating FindFlags. - Specify more concrete types for WebEngineTab members where we actually need to access WebEngine-specific attributes. - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webengine/webview.py - Make sure the page we get is our custom WebEnginePage subclass, not just any QWebEnginePage. This is needed because we access custom attributes on it. ==== qutebrowser/browser/webkit/network/networkreply.py - Remove now unneeded type ignores for signals. ==== qutebrowser/browser/webkit/webkitinspector.py - See changes to inspector.py and next commit. ==== qutebrowser/browser/webkit/webkittab.py - Specify the type of _widget members more closely than just QWidget. See browsertab.py changes for details. - Add a type ignore for WebKitAction because our workaround needs to treat them as ints (which is allowed by PyQt, even if not type-safe). - Add new ignores for findText calls: The text is a QString and can be None; the flags are valid despite mypy thinking they aren't (stubs regression?). - Specify the type for WebKitHistoryPrivate._history, because we access WebKit-specific attributes. See above (_widget) re this being debatable. - Make mypy aware that .currentFrame() and .frameAt() can return None (stubs regression?). - Make sure the .page() and .page().networkAccessManager() are our subclasses rather than the more generic QtWebKit objects, as we use custom attributes. - Add new type ignores for signals (stubs regression!) ==== qutebrowser/browser/webkit/webpage.py - Make sure the .networkAccessManager() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. - Replace a cast by a type ignore. The cast didn't work anymore. ==== qutebrowser/browser/webkit/webview.py - Make sure the .page() is our subclass rather than the more generic QtWebKit object, as we use custom attributes. ==== qutebrowser/commands/userscripts.py - Remove now unneeded type ignore for signal. ==== qutebrowser/completion/completer.py - Add a new _completion() getter (which ensures it actually gets the completion view) rather than accessing the .parent() directly (which could be any QObject). ==== qutebrowser/completion/completiondelegate.py - Make sure self.parent() is a CompletionView (no helper method as there is only one instance). - Remove a now-unneeded type ignore for adding QSizes. ==== qutebrowser/completion/completionwidget.py - Add a ._model() getter which ensures that we get a CompletionModel (with custom attributes) rather than Qt's .model() which can be any QAbstractItemModel (or None). - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/completion/models/completionmodel.py - Remove now unneeded type ignores for signals. - Ignore a complaint about .set_pattern() not being defined. Completion categories don't share any common parent class, so it would be good to introduce a typing.Protocol for this. See #7098. ==== qutebrowser/components/misccommands.py - Removed a now-unneeded type ignore for OR-ing flags. ==== qutebrowser/components/readlinecommands.py - Make sure QApplication.instance() is a QApplication (and not just a QCoreApplication). This includes the former "not None" check. ==== qutebrowser/components/scrollcommands.py - Add basic annotation for "funcs" dict. Could have a callable protocol to specify it needs a count kwarg, see #7098. ==== qutebrowser/config/stylesheet.py - Correctly specify that stylesheet apply to QWidgets, not any QObject. - Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy about this with overloads and protocols (stylesheet for set_register being None => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not worth the troble. See #7098. ==== qutebrowser/keyinput/keyutils.py - Remove some now-unneeded type ignores and add a cast for using a single enum value as flags. Might need to look at this again with Qt 6 support. ==== qutebrowser/keyinput/modeman.py - Add a FIXME for using a TypedDict, see comments for hints.py above. ==== qutebrowser/mainwindow/mainwindow.py - Remove now-unneeded type ignores for calling with OR-ed flags. - Improve where we cast from WindowType to WindowFlags, no int needed - Use new .tab_bar() getter, see below. ==== qutebrowser/mainwindow/prompt.py - Remove now-unneeded type ignores for calling with OR-ed flags. ==== qutebrowser/mainwindow/statusbar/bar.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/statusbar/command.py - Fix type for setText() override (from QLineEdit): text can be None (QString in C++). ==== qutebrowser/mainwindow/statusbar/url.py - Adjust type ignores around @pyqtProperty. The fact one is still needed seems like a stub regression. ==== qutebrowser/mainwindow/tabbedbrowser.py - Specify that TabDeque manages browser tabs, not any QWidgets. It accesses AbstractTab-specific attributes. - Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access .maybe_hide. - Fix the annotations for stored marks: Scroll positions are a QPoint, not int. - Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and .widget(), which ensures that the return values are valid AbstractTabs (or None for _tab_by_idx). This is needed because we access AbstractTab-specific attributes. - For some places, where the tab can be None, continue using .currentTab() but add asserts. - Remove some now-unneeded [unreachable] ignores, as mypy knows about the None possibility now. ==== qutebrowser/mainwindow/tabwidget.py - Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and .widget() are of type TabBar and AbstractTab, respectively. - Add additional assertions where we expect ._tab_by_idx() to never be None. - Remove dead code in get_tab_fields for handling a None y scroll position. I was unable to find any place in the code where this could be set to None. - Remove some now-unneeded type ignores and casts, as mypy now knows that _type_by_idx() could be None. - Work around a strange instance where mypy complains about not being able to find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility, despite it clearly being shown as a bool *inside* that class without any annotation. - Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in fact a TabWidget. ==== qutebrowser/misc/crashsignal.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/editor.py - Remove now unneeded type ignores for signals. ==== qutebrowser/misc/ipc.py - Remove now unneeded type ignores for signals. - Add new type ignores for .error() which is both a signal and a getter (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was renamed to errorOccurred in 5.15. ==== qutebrowser/misc/objects.py - Make sure mypy knows that objects.app is our custom Application (with custom attributes) rather than any QApplication. ==== qutebrowser/utils/objreg.py - Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol, but ideally, the whole objreg stuff should die one day anyways. ==== tests/unit/completion/test_completer.py - Make CompletionWidgetStub inherit from CompletionView so that it passes the new isinstance() asserts in completer.py (see above).
124
0
117,342
12
1
3
def show_panel_furniture(self): return self.is_shown()
wagtail/admin/panels.py
23
wagtail
{ "docstring": "\n Whether this panel shows the panel furniture instead of being rendered outside of it.\n ", "language": "en", "n_whitespaces": 37, "n_words": 14, "vocab_size": 12 }
4
Python
4
9a1606c809b2daee005591d98e9e2058e4823c79
panels.py
79,326
2
12
show_panel_furniture
https://github.com/wagtail/wagtail.git
Add show_panel_furniture() in BoundPanel This allows TabbedInterface to hide a tab but still render its children
26
0
16,917
7
2
11
def header_encode(header_bytes, charset='iso-8859-1'): # Return empty headers as an empty string. if not header_bytes: return '' # Iterate over every byte, encoding if necessary. encoded = header_bytes.decode('latin1').translate(_QUOPRI_HEADER_MAP) # Now add the RFC chrome to each encoded chunk and glue the chunks # together. return '=?%s?q?%s?=' % (charset, encoded) _QUOPRI_BODY_ENCODE_MAP = _QUOPRI_BODY_MAP[:] for c in b'\r\n': _QUOPRI_BODY_ENCODE_MAP[c] = chr(c)
python3.10.4/Lib/email/quoprimime.py
107
XX-Net
{ "docstring": "Encode a single header line with quoted-printable (like) encoding.\n\n Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but\n used specifically for email header fields to allow charsets with mostly 7\n bit characters (and some 8 bit) to remain more or less readable in non-RFC\n 2045 aware mail clients.\n\n charset names the character set to use in the RFC 2046 header. It\n defaults to iso-8859-1.\n ", "language": "en", "n_whitespaces": 89, "n_words": 67, "vocab_size": 57 }
58
Python
48
8198943edd73a363c266633e1aa5b2a9e9c9f526
quoprimime.py
223,870
5
37
header_encode
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
90
0
57,121
11
3
47
def forward(self, src_word, trg_word): r src_max_len = paddle.shape(src_word)[-1] trg_max_len = paddle.shape(trg_word)[-1] src_slf_attn_bias = paddle.cast( src_word == self.bos_id, dtype=paddle.get_default_dtype()).unsqueeze([1, 2]) * -1e4 src_slf_attn_bias.stop_gradient = True trg_slf_attn_bias = self.transformer.generate_square_subsequent_mask( trg_max_len) trg_slf_attn_bias.stop_gradient = True trg_src_attn_bias = src_slf_attn_bias src_pos = paddle.cast( src_word != self.bos_id, dtype=src_word.dtype) * paddle.arange( start=0, end=src_max_len, dtype=src_word.dtype) trg_pos = paddle.cast( trg_word != self.bos_id, dtype=src_word.dtype) * paddle.arange( start=0, end=trg_max_len, dtype=trg_word.dtype) with paddle.static.amp.fp16_guard(): src_emb = self.src_word_embedding(src_word) src_pos_emb = self.src_pos_embedding(src_pos) src_emb = src_emb + src_pos_emb enc_input = F.dropout( src_emb, p=self.dropout, training=self.training) if self.dropout else src_emb trg_emb = self.trg_word_embedding(trg_word) trg_pos_emb = self.trg_pos_embedding(trg_pos) trg_emb = trg_emb + trg_pos_emb dec_input = F.dropout( trg_emb, p=self.dropout, training=self.training) if self.dropout else trg_emb dec_output = self.transformer( enc_input, dec_input, src_mask=src_slf_attn_bias, tgt_mask=trg_slf_attn_bias, memory_mask=trg_src_attn_bias) predict = self.linear(dec_output) return predict
paddlenlp/transformers/transformer/modeling.py
457
PaddleNLP
{ "docstring": "\n The Transformer forward methods. The input are source/target sequences, and\n returns logits.\n\n Args:\n src_word (Tensor):\n The ids of source sequences words. It is a tensor with shape\n `[batch_size, source_sequence_length]` and its data type can be\n int or int64.\n trg_word (Tensor):\n The ids of target sequences words. It is a tensor with shape\n `[batch_size, target_sequence_length]` and its data type can be\n int or int64.\n\n Returns:\n Tensor:\n Output tensor of the final layer of the model whose data\n type can be float32 or float64 with shape\n `[batch_size, sequence_length, vocab_size]`.\n\n Example:\n .. code-block::\n\n import paddle\n from paddlenlp.transformers import TransformerModel\n\n transformer = TransformerModel(\n src_vocab_size=30000,\n trg_vocab_size=30000,\n max_length=257,\n num_encoder_layers=6,\n num_decoder_layers=6,\n n_head=8,\n d_model=512,\n d_inner_hid=2048,\n dropout=0.1,\n weight_sharing=True,\n bos_id=0,\n eos_id=1)\n\n batch_size = 5\n seq_len = 10\n predict = transformer(\n src_word=paddle.randint(low=3, high=30000, shape=[batch_size, seq_len]),\n trg_word=paddle.randint(low=3, high=30000, shape=[batch_size, seq_len]))\n ", "language": "en", "n_whitespaces": 706, "n_words": 128, "vocab_size": 85 }
115
Python
67
b0c35d5e1ff02a634fa26392b60d3885c2c78677
modeling.py
322,101
84
301
forward
https://github.com/PaddlePaddle/PaddleNLP.git
Fix the attention mask for fp16 (#1585)
528
0
118,058
14
1
5
def is_nan(self, a): a = _convert_other(a, raiseit=True) return a.is_nan()
python3.10.4/Lib/_pydecimal.py
40
XX-Net
{ "docstring": "Return True if the operand is a qNaN or sNaN;\n otherwise return False.\n\n >>> ExtendedContext.is_nan(Decimal('2.50'))\n False\n >>> ExtendedContext.is_nan(Decimal('NaN'))\n True\n >>> ExtendedContext.is_nan(Decimal('-sNaN'))\n True\n >>> ExtendedContext.is_nan(1)\n False\n ", "language": "en", "n_whitespaces": 95, "n_words": 25, "vocab_size": 19 }
9
Python
9
8198943edd73a363c266633e1aa5b2a9e9c9f526
_pydecimal.py
219,742
3
24
is_nan
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
30
0
55,761
9
12
21
def _set_hyperopt_defaults(self): if not self.hyperopt: return scheduler = self.hyperopt.get("executor", {}).get("scheduler") if not scheduler: return if EXECUTOR in self.hyperopt: set_default_value(self.hyperopt[EXECUTOR], TYPE, RAY) # Disable early stopping when using a scheduler. We achieve this by setting the parameter # to -1, which ensures the condition to apply early stopping is never met. early_stop = self.trainer.early_stop if early_stop is not None and early_stop != -1: warnings.warn("Can't utilize `early_stop` while using a hyperopt scheduler. Setting early stop to -1.") self.trainer.early_stop = -1 max_t = scheduler.get("max_t") time_attr = scheduler.get("time_attr") epochs = self.trainer.to_dict().get("epochs", None) if max_t is not None: if time_attr == "time_total_s": if epochs is None: setattr(self.trainer, "epochs", sys.maxsize) # continue training until time limit hit # else continue training until either time or trainer epochs limit hit elif epochs is not None and epochs != max_t: raise ValueError( "Cannot set trainer `epochs` when using hyperopt scheduler w/different training_iteration `max_t`. " "Unset one of these parameters in your config or make sure their values match." ) else: setattr(self.trainer, "epochs", max_t) # run trainer until scheduler epochs limit hit elif epochs is not None: scheduler["max_t"] = epochs # run scheduler until trainer epochs limit hit
ludwig/schema/model_config.py
326
ludwig
{ "docstring": "This function was migrated from defaults.py with the intention of setting some hyperopt defaults while\n the hyperopt section of the config object is not fully complete.\n\n Returns:\n None -> modifies trainer and hyperopt sections\n ", "language": "en", "n_whitespaces": 66, "n_words": 34, "vocab_size": 29 }
189
Python
106
4d2d81f9fdefc52eea6a9bf0826a6f2ffc8d681b
model_config.py
8,418
28
188
_set_hyperopt_defaults
https://github.com/ludwig-ai/ludwig.git
Config Object (#2426) * Fixed loss instances across features * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed binary OneOfImplementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake 8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix custom loss components * Fix gbm category * Remove config object code, out of scope * Fixed more tests * Fixed incorrect text preproc default, added clip to category feature level * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes additional tests * Cache jsonschema validator to reduce memory pressure * Fix imports * Skip neuropod test * Added upgrade audio to default preproc back compat and cleaned up * Small nits * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change backfill constant for audio * Add docstring to compute feature hash * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Unused import * Another backfill constant change * Unused import * remove default population functions * Added config object test * rewired build_inputs * rewired combiner in ecd, added logic to config object * Refactored ecd.py * Fixing up merge_with_defaults, need metadata changes in master * Refactored defaults section and mega upgraded config obj * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed some formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed feature col, proc col, and render config from defaults.py * Fix duplicate import * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added config initializer to merge defaults flow * Refactored update_config_with_metadata * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added dict conversion method to config object and refactored merge config function in config_utils * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Refactored until preproc entrypoint * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed update_config_with_metadata * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed load config base feature method - no longer necessary * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Formatting * Fixed input size assignment * Temp fix * Fixed pretrained encoder path referencing temp until preproc refactor * Solved the WORST BUG EVER * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Switch reduce_input to None for sequence tagger * Fixed another one * Fixed typo * Various test fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake 8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed excess defaults params issue * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Minor fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed some defaults tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed more tests * Formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * More test fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed defaults tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix more tests * Flake 8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix more tests * Fixed more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed more tests * Fixed more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixing ghost tests attempt * Deep copy to smash the ghost failures * Copied top level modules now too * Started fixing hyperopt * Fixed Hyperopt Issues * Flake 8 * Remove commented out code * Address Piero feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake 8 * Removed merge with defaults * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed various issues with preprocessing and splitting positioning * Fixed hyperopt issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Refactored api pipeline to use all config obj references * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed more tests * Flake 8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix more tests * Fixed auto tune learning rate and batch size * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed sequence feature tests * Fixed image feature test * Fixed last test * flake 8 * Marshmallowify Config object, remove manual to dict method, add Factory method constructors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Validate config within config object * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * All Travis feedback addressed * Using all new constructors now * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed from class attributes * Added deep copies back and piped repr inheritance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Format * Small error fix, moved back compat into Config Object * Flake8 * Docstring for hyperopt defaults method * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Address Joppe feedback * Revert "Address Joppe feedback" This reverts commit 42f1665ef917d062a010550bb960594c355285ff. * Fix tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake8 * fix test * Small improvement * Changed repr for input features, added feature enabling/disabling * Added feature enabling/disabling, and better reprs for SDK dev * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake 8 * Added rich to requirements.txt * Add some more CO tests and comment more on CO code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix explain issue * Julian feedback * Added TODOs for future refactor PRs * Fix explain test failure, test shared state improvement and bug fix, remove unncessary code from convert_submodules * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * implement Daniel's feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix residual errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Error fix * Using mixins now so no loose attributes on defaults, fixed height width schema restrictions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed unnecessary filtering from defaults schema logic * Piero's simplification and cleanup * Flake 8 * Fix test and update docstrings from Pieros change * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Address most of Justin's feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix tests and more feedback implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Address feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Renamed files to correspond to ModelConfig class name * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Missing constant import * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed incorrect merge conflict resolution * Flake8 * Fix remaining tests (except old models training from trainer type removal) * Fixed old models not validating trainer type * Add output_feature=False to test_hyperopt_ray.py * Implement Kabir's feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Travis Addair <[email protected]> Co-authored-by: w4nderlust <[email protected]>
517
0
1,427
14
6
29
def update(self, data): data = np.atleast_1d(np.array(data, dtype=object)) # check if convertible to number: convertible = True for val in OrderedDict.fromkeys(data): # OrderedDict just iterates over unique values in data. _api.check_isinstance((str, bytes), value=val) if convertible: # this will only be called so long as convertible is True. convertible = self._str_is_convertible(val) if val not in self._mapping: self._mapping[val] = next(self._counter) if data.size and convertible: _log.info('Using categorical units to plot a list of strings ' 'that are all parsable as floats or dates. If these ' 'strings should be plotted as numbers, cast to the ' 'appropriate data type before plotting.') # Register the converter with Matplotlib's unit framework units.registry[str] = StrCategoryConverter() units.registry[np.str_] = StrCategoryConverter() units.registry[bytes] = StrCategoryConverter() units.registry[np.bytes_] = StrCategoryConverter()
lib/matplotlib/category.py
238
matplotlib
{ "docstring": "\n Map new values to integer identifiers.\n\n Parameters\n ----------\n data : iterable of str or bytes\n\n Raises\n ------\n TypeError\n If elements in *data* are neither str nor bytes.\n ", "language": "en", "n_whitespaces": 95, "n_words": 27, "vocab_size": 26 }
117
Python
85
c0a384e9f41673207eac75e276b293418bd32965
category.py
108,041
14
100
update
https://github.com/matplotlib/matplotlib.git
Fix incorrect deprecation warning
317
0
23,035
13
9
17
def putpixel(self, xy, value): if self.readonly: self._copy() self.load() if self.pyaccess: return self.pyaccess.putpixel(xy, value) if ( self.mode in ("P", "PA") and isinstance(value, (list, tuple)) and len(value) in [3, 4] ): # RGB or RGBA value for a P or PA image if self.mode == "PA": alpha = value[3] if len(value) == 4 else 255 value = value[:3] value = self.palette.getcolor(value, self) if self.mode == "PA": value = (value, alpha) return self.im.putpixel(xy, value)
src/PIL/Image.py
225
Pillow
{ "docstring": "\n Modifies the pixel at the given position. The color is given as\n a single numerical value for single-band images, and a tuple for\n multi-band images. In addition to this, RGB and RGBA tuples are\n accepted for P and PA images.\n\n Note that this method is relatively slow. For more extensive changes,\n use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`\n module instead.\n\n See:\n\n * :py:meth:`~PIL.Image.Image.paste`\n * :py:meth:`~PIL.Image.Image.putdata`\n * :py:mod:`~PIL.ImageDraw`\n\n :param xy: The pixel coordinate, given as (x, y). See\n :ref:`coordinate-system`.\n :param value: The pixel value.\n ", "language": "en", "n_whitespaces": 191, "n_words": 81, "vocab_size": 60 }
71
Python
49
a37593f004247ebf69d5582524da6dc5143cb023
Image.py
243,180
18
142
putpixel
https://github.com/python-pillow/Pillow.git
Allow RGB and RGBA values for PA image putpixel
264
0
70,002
14
11
54
def batch(_func=None, max_batch_size=10, batch_wait_timeout_s=0.0):
python/ray/serve/batching.py
167
"""Converts a function to asynchronously handle batches. The function can be a standalonea class method. Inthe function must betake a list ofits solereturn a list of the sameainvokedthe caller passes a single object. These will beand executed asynchronously oncea batch ofor `batch_wait_timeout_s` hasoccurs first: >>>
ray
{ "docstring": "Converts a function to asynchronously handle batches.\n\n The function can be a standalone function or a class method. In both\n cases, the function must be `async def` and take a list of objects as\n its sole argument and return a list of the same length as a result.\n\n When invoked, the caller passes a single object. These will be batched\n and executed asynchronously once there is a batch of `max_batch_size`\n or `batch_wait_timeout_s` has elapsed, whichever occurs first.\n\n Example:\n\n >>> @serve.batch(max_batch_size=50, batch_wait_timeout_s=0.5)", "language": "en", "n_whitespaces": 104, "n_words": 81, "vocab_size": 59 }
4
Python
4
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
batching.py
130,855
21
137
batch
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
7
14
29,405
10
1
3
def n(self): return self.args[0]
sympy/combinatorics/graycode.py
23
sympy
{ "docstring": "\n Returns the dimension of the Gray code.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import GrayCode\n >>> a = GrayCode(5)\n >>> a.n\n 5\n ", "language": "en", "n_whitespaces": 78, "n_words": 21, "vocab_size": 18 }
4
Python
4
498015021131af4dbb07eb110e5badaba8250c7b
graycode.py
196,096
2
13
n
https://github.com/sympy/sympy.git
Updated import locations
18
0
47,596
7
1
6
def test_session_not_accessed(self): response = self.client.get("/auth_processor_no_attr_access/") self.assertContains(response, "Session not accessed")
tests/auth_tests/test_context_processors.py
45
django
{ "docstring": "\n The session is not accessed simply by including\n the auth context processor\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
9
Python
9
9c19aff7c7561e3a82978a272ecdaad40dda5c00
test_context_processors.py
201,203
3
24
test_session_not_accessed
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
30
0
49,899
9
4
10
def iterencode(iterator, encoding, errors='strict', **kwargs): encoder = getincrementalencoder(encoding)(errors, **kwargs) for input in iterator: output = encoder.encode(input) if output: yield output output = encoder.encode("", True) if output: yield output
python3.10.4/Lib/codecs.py
100
XX-Net
{ "docstring": "\n Encoding iterator.\n\n Encodes the input strings from the iterator using an IncrementalEncoder.\n\n errors and kwargs are passed through to the IncrementalEncoder\n constructor.\n ", "language": "en", "n_whitespaces": 38, "n_words": 22, "vocab_size": 20 }
28
Python
20
8198943edd73a363c266633e1aa5b2a9e9c9f526
codecs.py
221,370
9
60
iterencode
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
75
0
56,383
10
1
9
def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: return self.grouper.indices
pandas/core/groupby/groupby.py
41
pandas
{ "docstring": "\n Dict {group name -> group indices}.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
7
Python
7
f65417656ba8c59438d832b6e2a431f78d40c21c
groupby.py
167,771
5
26
indices
https://github.com/pandas-dev/pandas.git
TYP: more return annotations in core/ (#47618) * TYP: more return annotations in core/ * from __future__ import annotations * more __future__
21
0
40,114
7
1
15
def register(model, field_name, mappings): logger.debug(f'Registering denormalized field {model}.{field_name}') field = model._meta.get_field(field_name) rel_model = field.related_model registry['denormalized_fields'][rel_model].append( (model, field_name, mappings) ) @receiver(post_save)
netbox/netbox/denormalized.py
97
@receiver(post_save)
netbox
{ "docstring": "\n Register a denormalized model field to ensure that it is kept up-to-date with the related object.\n\n Args:\n model: The class being updated\n field_name: The name of the field related to the triggering instance\n mappings: Dictionary mapping of local to remote fields\n ", "language": "en", "n_whitespaces": 72, "n_words": 41, "vocab_size": 33 }
20
Python
17
e96620260a6c1b5cf8cff2112d40d061984a7b2c
denormalized.py
265,475
7
50
register
https://github.com/netbox-community/netbox.git
Closes #9903: Implement a mechanism for automatically updating denormalized fields
44
1
78,110
10
1
9
def switch_platform_only(): with patch( "homeassistant.components.zha.PLATFORMS", ( Platform.DEVICE_TRACKER, Platform.SENSOR, Platform.SELECT, Platform.SWITCH, ), ): yield @pytest.fixture
tests/components/zha/test_switch.py
61
@pytest.fixture
core
{ "docstring": "Only setup the switch and required base platforms to speed up tests.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
14
Python
14
4bc5d7bfed07c20d6f3438ab91c734a620505a33
test_switch.py
313,987
11
32
switch_platform_only
https://github.com/home-assistant/core.git
Speed up zha tests (#73627)
94
1
112,598
11
3
9
def report_start(self, out, test, example): if self._verbose: if example.want: out('Trying:\n' + _indent(example.source) + 'Expecting:\n' + _indent(example.want)) else: out('Trying:\n' + _indent(example.source) + 'Expecting nothing\n')
python3.10.4/Lib/doctest.py
104
XX-Net
{ "docstring": "\n Report that the test runner is about to process the given\n example. (Only displays a message if verbose=True)\n ", "language": "en", "n_whitespaces": 41, "n_words": 18, "vocab_size": 17 }
23
Python
16
8198943edd73a363c266633e1aa5b2a9e9c9f526
doctest.py
223,420
8
57
report_start
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
127
0
56,893
17
1
9
def test_cached_file_client(get_loader, minion_opts): with patch("salt.channel.client.ReqChannel.factory", Mock()): loader_a = SaltCacheLoader(minion_opts) loader_b = SaltCacheLoader(minion_opts) assert loader_a._file_client is loader_b._file_client
tests/pytests/unit/utils/jinja/test_salt_cache_loader.py
67
salt
{ "docstring": "\n Multiple instantiations of SaltCacheLoader use the cached file client\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
16
Python
14
56045b0ee4c11b395895cb0a11279dfea8c2242f
test_salt_cache_loader.py
215,577
5
38
test_cached_file_client
https://github.com/saltstack/salt.git
Clean up salt.transport.(client,server) references
39
0
54,037
11
1
8
def get_css_variables(self) -> dict[str, str]: variables = self.design.generate(self.dark) return variables
src/textual/app.py
44
textual
{ "docstring": "Get a mapping of variables used to pre-populate CSS.\n\n Returns:\n dict[str, str]: A mapping of variable name to value.\n ", "language": "en", "n_whitespaces": 44, "n_words": 19, "vocab_size": 16 }
10
Python
9
b115db9d8d4f1c9ab20a3d3bef5d5a729ea8b57a
app.py
182,785
8
27
get_css_variables
https://github.com/Textualize/textual.git
docstring
31
0
43,965
9
1
26
def test_multi_sso_redirect_to_cas(self) -> None: channel = self.make_request( "GET", "/_synapse/client/pick_idp?redirectUrl=" + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) + "&idp=cas", shorthand=False, ) self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers cas_uri = location_headers[0] cas_uri_path, cas_uri_query = cas_uri.split("?", 1) # it should redirect us to the login page of the cas server self.assertEqual(cas_uri_path, CAS_SERVER + "/login") # check that the redirectUrl is correctly encoded in the service param - ie, the # place that CAS will redirect to cas_uri_params = urllib.parse.parse_qs(cas_uri_query) service_uri = cas_uri_params["service"][0] _, service_uri_query = service_uri.split("?", 1) service_uri_params = urllib.parse.parse_qs(service_uri_query) self.assertEqual(service_uri_params["redirectUrl"][0], TEST_CLIENT_REDIRECT_URL)
tests/rest/client/test_login.py
239
synapse
{ "docstring": "If CAS is chosen, should redirect to the CAS server", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
86
Python
66
64c73c6ac88a740ee480a0ad1f9afc8596bccfa4
test_login.py
246,600
20
143
test_multi_sso_redirect_to_cas
https://github.com/matrix-org/synapse.git
Add type hints to `tests/rest/client` (#12066)
260
0
71,290
13
5
7
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): if session_hooks is None or session_hooks.get("response") == []: return request_hooks if request_hooks is None or request_hooks.get("response") == []: return session_hooks return merge_setting(request_hooks, session_hooks, dict_class)
pipenv/patched/pip/_vendor/requests/sessions.py
90
pipenv
{ "docstring": "Properly merges both requests and session hooks.\n\n This is necessary because when request_hooks == {'response': []}, the\n merge breaks Session hooks entirely.\n ", "language": "en", "n_whitespaces": 31, "n_words": 22, "vocab_size": 22 }
28
Python
17
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
sessions.py
22,107
6
55
merge_hooks
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
54
0
4,184
10
1
12
def _decode(self, pre_chars, features, hidden):
ppocr/modeling/heads/table_att_head.py
28
""" Predict tablecoordinates for each
PaddleOCR
{ "docstring": "\n Predict table label and coordinates for each step", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
5
Python
5
ddaa2c2552e19635cd6cdf38619f1f176c358f89
table_att_head.py
24,443
7
60
_decode
https://github.com/PaddlePaddle/PaddleOCR.git
add SLANet
12
2
4,732
7
3
9
def _full_shape(self) -> Tuple[int]: sampled_shape = tuple() for d in self._expected_shape: if isinstance(d, int): sampled_shape += (d,) else: sampled_shape += (1,) return sampled_shape
rllib/models/specs/specs_base.py
76
ray
{ "docstring": "Converts the expected shape to a shape by replacing the unknown dimension\n sizes with a value of 1.", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 15 }
23
Python
19
3e7c207f02e7368e1245e2cfafd27cb0bf179ff7
specs_base.py
128,381
10
47
_full_shape
https://github.com/ray-project/ray.git
[RLlib] Introduce TensorSpec data structure for RLModule / Model definitions (#28946) * added tensor specs Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * lint Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * 1. Added numpy specs 2. Added spec.sample() Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * added unittests for sampling Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * added tensorflow specs Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * added jax Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * lint Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * removed jax test to be able to merge this part Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * lint Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * added docs Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * fixed typo Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * use full/fill instead of sample Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * changed the input delimiter to be comma instead of whitespace. It also ignores whitespaces now. Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * simplified parser code Signed-off-by: Kourosh Hakhamaneshi <[email protected]> * simplified parser code Signed-off-by: Kourosh Hakhamaneshi <[email protected]> Signed-off-by: Kourosh Hakhamaneshi <[email protected]>
103
0
28,689
12
2
21
def laplacian_matrix(G, nodelist=None, weight="weight"): import scipy as sp import scipy.sparse # call as sp.sparse if nodelist is None: nodelist = list(G) A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format="csr") n, m = A.shape # TODO: rm csr_array wrapper when spdiags can produce arrays D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, m, n, format="csr")) return D - A @not_implemented_for("directed")
networkx/linalg/laplacianmatrix.py
167
@not_implemented_for("directed")
networkx
{ "docstring": "Returns the Laplacian matrix of G.\n\n The graph Laplacian is the matrix L = D - A, where\n A is the adjacency matrix and D is the diagonal matrix of node degrees.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default='weight')\n The edge data key used to compute each value in the matrix.\n If None, then each edge has weight 1.\n\n Returns\n -------\n L : SciPy sparse array\n The Laplacian matrix of G.\n\n Notes\n -----\n For MultiGraph, the edges weights are summed.\n\n See Also\n --------\n to_numpy_array\n normalized_laplacian_matrix\n laplacian_spectrum\n ", "language": "en", "n_whitespaces": 213, "n_words": 121, "vocab_size": 76 }
53
Python
43
8a325d26aa7fdd3a72580c4720fa97f971bbefcb
laplacianmatrix.py
177,333
9
98
laplacian_matrix
https://github.com/networkx/networkx.git
Use scipy.sparse array datastructure (#6037) * Use scipy.sparse array datastructure * Add reminder to rm wrapper when scipy adds creation fns. * Rm mention of np matrix from code comment. * Update networkx/algorithms/bipartite/matrix.py Co-authored-by: Stefan van der Walt <[email protected]> Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Stefan van der Walt <[email protected]>
87
1
42,352
13
5
15
def global_efficiency(G): n = len(G) denom = n * (n - 1) if denom != 0: lengths = nx.all_pairs_shortest_path_length(G) g_eff = 0 for source, targets in lengths: for target, distance in targets.items(): if distance > 0: g_eff += 1 / distance g_eff /= denom # g_eff = sum(1 / d for s, tgts in lengths # for t, d in tgts.items() if d > 0) / denom else: g_eff = 0 # TODO This can be made more efficient by computing all pairs shortest # path lengths in parallel. return g_eff @not_implemented_for("directed")
networkx/algorithms/efficiency_measures.py
138
@not_implemented_for("directed")
networkx
{ "docstring": "Returns the average global efficiency of the graph.\n\n The *efficiency* of a pair of nodes in a graph is the multiplicative\n inverse of the shortest path distance between the nodes. The *average\n global efficiency* of a graph is the average efficiency of all pairs of\n nodes [1]_.\n\n Parameters\n ----------\n G : :class:`networkx.Graph`\n An undirected graph for which to compute the average global efficiency.\n\n Returns\n -------\n float\n The average global efficiency of the graph.\n\n Examples\n --------\n >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])\n >>> round(nx.global_efficiency(G), 12)\n 0.916666666667\n\n Notes\n -----\n Edge weights are ignored when computing the shortest path distances.\n\n See also\n --------\n local_efficiency\n\n References\n ----------\n .. [1] Latora, Vito, and Massimo Marchiori.\n \"Efficient behavior of small-world networks.\"\n *Physical Review Letters* 87.19 (2001): 198701.\n <https://doi.org/10.1103/PhysRevLett.87.198701>\n\n ", "language": "en", "n_whitespaces": 248, "n_words": 129, "vocab_size": 86 }
92
Python
55
435b4622d106d14a3627e162ee163b113bac9854
efficiency_measures.py
176,967
14
76
global_efficiency
https://github.com/networkx/networkx.git
added examples to efficiency_measures.py (#5643) * added example on efficiency * added example on global_efficiency * added example on local_efficiency * adjused round up
227
1
42,195
15
1
5
def image(self) -> ImageTk.PhotoImage: assert self._preview_image_tk is not None return self._preview_image_tk
lib/gui/utils/image.py
35
faceswap
{ "docstring": ":class:`PIL.ImageTk.PhotoImage` The preview image for displaying in a tkinter canvas ", "language": "en", "n_whitespaces": 10, "n_words": 10, "vocab_size": 10 }
11
Python
10
2e8ef5e3c8f2df0f1cca9b342baa8aaa6f620650
image.py
101,978
4
21
image
https://github.com/deepfakes/faceswap.git
GUI - Preview updates - Training preview. Embed preview pop-out window - Bugfix - convert/extract previews
32
0
21,352
7
1
20
def test_ddp_sharded_strategy_fit_ckpt_path_downsize_gpus(tmpdir): model = BoringModel() trainer = Trainer(strategy="ddp_sharded_spawn", fast_dev_run=True, gpus=2) trainer.fit(model) checkpoint_path = os.path.join(tmpdir, "model.pt") trainer.save_checkpoint(checkpoint_path) model = BoringModel() trainer = Trainer(strategy="ddp_sharded_spawn", fast_dev_run=True, gpus=1) trainer.fit(model, ckpt_path=checkpoint_path) @RunIf(min_gpus=1, skip_windows=True, fairscale=True)
tests/strategies/test_sharded_strategy.py
158
@RunIf(min_gpus=1, skip_windows=True, fairscale=True)
lightning
{ "docstring": "Test to ensure that resuming from checkpoint works when downsizing number of GPUS.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
29
Python
20
650c710efacd633fa283955145342bb64063c883
test_sharded_strategy.py
241,585
9
82
test_ddp_sharded_strategy_fit_ckpt_path_downsize_gpus
https://github.com/Lightning-AI/lightning.git
Rename training plugin test files & names to strategy (#11303)
55
1
69,610
10
1
4
def test_cancel_logcontexts(self): complete_lookup: "Deferred[None]" = Deferred()
tests/util/caches/test_descriptors.py
27
synapse
{ "docstring": "Test that cancellation does not break logcontexts.\n\n * The `CancelledError` must be raised with the correct logcontext.\n * The inner lookup must not resume with a finished logcontext.\n * The inner lookup must not restore a finished logcontext when done.\n ", "language": "en", "n_whitespaces": 68, "n_words": 40, "vocab_size": 26 }
6
Python
6
2fcf4b3f6cd2a0be6597622664636d2219957c2a
test_descriptors.py
247,588
16
81
test_cancel_logcontexts
https://github.com/matrix-org/synapse.git
Add cancellation support to `@cached` and `@cachedList` decorators (#12183) These decorators mostly support cancellation already. Add cancellation tests and fix use of finished logging contexts by delaying cancellation, as suggested by @erikjohnston. Signed-off-by: Sean Quah <[email protected]>
20
0
71,762
8
1
9
def test_supports_transactions(self): with mock.patch( "django.db.connection.features._mysql_storage_engine", "InnoDB" ): self.assertTrue(connection.features.supports_transactions) del connection.features.supports_transactions with mock.patch( "django.db.connection.features._mysql_storage_engine", "MyISAM" ): self.assertFalse(connection.features.supports_transactions) del connection.features.supports_transactions
tests/backends/mysql/test_features.py
105
django
{ "docstring": "\n All storage engines except MyISAM support transactions.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
18
Python
12
9c19aff7c7561e3a82978a272ecdaad40dda5c00
test_features.py
201,697
11
58
test_supports_transactions
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
111
0
49,980
11
2
16
def _apply_mask(cls, y_true, y_pred, mask_channel, mask_prop=1.0): if mask_channel == -1: logger.debug("No mask to apply") return y_true[..., :3], y_pred[..., :3] logger.debug("Applying mask from channel %s", mask_channel) mask = K.tile(K.expand_dims(y_true[..., mask_channel], axis=-1), (1, 1, 1, 3)) mask_as_k_inv_prop = 1 - mask_prop mask = (mask * mask_prop) + mask_as_k_inv_prop m_true = y_true[..., :3] * mask m_pred = y_pred[..., :3] * mask return m_true, m_pred
lib/model/losses_plaid.py
187
faceswap
{ "docstring": " Apply the mask to the input y_true and y_pred. If a mask is not required then\n return the unmasked inputs.\n\n Parameters\n ----------\n y_true: tensor or variable\n The ground truth value\n y_pred: tensor or variable\n The predicted value\n mask_channel: int\n The channel within y_true that the required mask resides in\n mask_prop: float, optional\n The amount of mask propagation. Default: `1.0`\n\n Returns\n -------\n tuple\n (n_true, n_pred): The ground truth and predicted value tensors with the mask applied\n ", "language": "en", "n_whitespaces": 208, "n_words": 75, "vocab_size": 52 }
61
Python
42
94c3dcff7ebd02a5a5758f33a3eb2bfc66282117
losses_plaid.py
100,868
11
127
_apply_mask
https://github.com/deepfakes/faceswap.git
Training updates - Add multiple selected loss functions - Unlock loss as a model configuration - Phaze-A remove encoder scaling max xap
146
0
20,319
12
11
39
def wasLastResponseDelayed(): # 99.9999999997440% of all non time-based SQL injection affected # response times should be inside +-7*stdev([normal response times]) # Math reference: http://www.answers.com/topic/standard-deviation deviation = stdev(kb.responseTimes.get(kb.responseTimeMode, [])) threadData = getCurrentThreadData() if deviation and not conf.direct and not conf.disableStats: if len(kb.responseTimes[kb.responseTimeMode]) < MIN_TIME_RESPONSES: warnMsg = "time-based standard deviation method used on a model " warnMsg += "with less than %d response times" % MIN_TIME_RESPONSES logger.warning(warnMsg) lowerStdLimit = average(kb.responseTimes[kb.responseTimeMode]) + TIME_STDEV_COEFF * deviation retVal = (threadData.lastQueryDuration >= max(MIN_VALID_DELAYED_RESPONSE, lowerStdLimit)) if not kb.testMode and retVal: if kb.adjustTimeDelay is None: msg = "do you want sqlmap to try to optimize value(s) " msg += "for DBMS delay responses (option '--time-sec')? [Y/n] " kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE if not readInput(msg, default='Y', boolean=True) else ADJUST_TIME_DELAY.YES if kb.adjustTimeDelay is ADJUST_TIME_DELAY.YES: adjustTimeDelay(threadData.lastQueryDuration, lowerStdLimit) return retVal else: delta = threadData.lastQueryDuration - conf.timeSec if Backend.getIdentifiedDbms() in (DBMS.MYSQL,): # MySQL's SLEEP(X) lasts 0.05 seconds shorter on average delta += 0.05 return delta >= 0
lib/core/common.py
327
sqlmap
{ "docstring": "\n Returns True if the last web request resulted in a time-delay\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 11 }
153
Python
110
df4293473d2fb6e887e31522cab5aff95e201581
common.py
123,465
23
200
wasLastResponseDelayed
https://github.com/sqlmapproject/sqlmap.git
Fixing DeprecationWarning (logger.warn)
360
0
27,379
18
2
20
def generate_navigator(os=None, navigator=None, platform=None, device_type=None): if platform is not None: os = platform warn( "The `platform` option is deprecated." " Use `os` option instead.", stacklevel=3, ) device_type, os_id, navigator_id = pick_config_ids(device_type, os, navigator) system = build_system_components(device_type, os_id, navigator_id) app = build_app_components(os_id, navigator_id) ua_template = choose_ua_template(device_type, navigator_id, app) user_agent = ua_template.format(system=system, app=app) app_version = build_navigator_app_version( os_id, navigator_id, system["platform_version"], user_agent ) return { # ids "os_id": os_id, "navigator_id": navigator_id, # system components "platform": system["platform"], "oscpu": system["oscpu"], # app components "build_version": app["build_version"], "build_id": app["build_id"], "app_version": app_version, "app_name": app["name"], "app_code_name": "Mozilla", "product": "Gecko", "product_sub": app["product_sub"], "vendor": app["vendor"], "vendor_sub": "", # compiled user agent "user_agent": user_agent, }
build/pyinstaller/user_agent/base.py
321
OpenBBTerminal
{ "docstring": "\n Generates web navigator's config\n\n :param os: limit list of oses for generation\n :type os: string or list/tuple or None\n :param navigator: limit list of browser engines for generation\n :type navigator: string or list/tuple or None\n :param device_type: limit possible oses by device type\n :type device_type: list/tuple or None, possible values:\n \"desktop\", \"smartphone\", \"tablet\", \"all\"\n\n :return: User-Agent config\n :rtype: dict with keys (os, name, platform, oscpu, build_version,\n build_id, app_version, app_name, app_code_name,\n product, product_sub, vendor, vendor_sub,\n user_agent)\n :raises InvalidOption: if could not generate user-agent for\n any combination of allowed platforms and navigators\n :raise InvalidOption: if any of passed options is invalid\n ", "language": "en", "n_whitespaces": 231, "n_words": 99, "vocab_size": 69 }
102
Python
79
ab4de1dd70fba866930150e440a03e461a6ca6a8
base.py
283,199
31
190
generate_navigator
https://github.com/OpenBB-finance/OpenBBTerminal.git
Create a packaged app bundle with Pyinstaller (#1525) * Add dashboard widget assets * Add ipywidgets and ipyflex to project * Add currencies dashboard notebook * Update docs and docstrings * Add pyinstaller to project deps * Add pyinstaller artifacts to gitignore * Fix linter errors in terminal.py * Update cspell hook and action with a pyinstaller specific word * Add pyinstaller specfile and artifacts * Add splashscreen image * Add app icon * adding splash screen support to terminal.spec and terminal.py * Restore the conda env build files * Sync deps * Add border to the splashscreen image * Clean up terminal launcher * Add support for default feature flags in packages apps * Fix types and linting * Add splashscreen management to app bootup * Check prediction feature flag when entering crypto/pred * Update pyinstaller spec file * fix .spec file to work for splash and icon - removed the ".." * Allows to export when using installer (#1568) * fix export for packaged apps * fix filename * Git : replace commit_hash when it is set in config_terminal * Add update of the git commit hash in gtff default during build * Add packaged app name and feature flag to logs * Add platform specific icon assignment * Add macOS build assets * Add tensorflow to hidden imports * Move LOGGING_COMMIT_HASH to gtff * Adding files/folders needed to .spec and pyinstaller folder. This will make certain commands work again. * Linting * Workflow : ignore ./build/pyinstaller from codespell * Workflow : exclude ./build/pyinstaller from flake8 * Poetry + Workflow : add types-six * Pyinstaller : remove property_cached, user_agent and vaderSentiment * Revert "Pyinstaller : remove property_cached, user_agent and vaderSentiment" This reverts commit dbb3e2b81086f97819ebd21457148c7160a4d703. * Clean up local paths in specfile * Validate deps have correct Jinja version (they do) * Fix logging commit hash to be set correctly for the logger to see it Co-authored-by: Andrew <[email protected]> Co-authored-by: didierlopes.eth <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]>
311
0
84,465
11
1
3
def isTechnical(self): return self.technical
nuitka/nodes/ModuleNodes.py
19
Nuitka
{ "docstring": "Must be present as it's used in CPython library initialization.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
4
Python
4
3b1c76ce9d79543de81353f358b3108df91078fc
ModuleNodes.py
178,747
2
10
isTechnical
https://github.com/Nuitka/Nuitka.git
Standalone: Exclude more of standard library modules * This removes tkinter and many modules expected to never be implicit dependencies. * The real reduction will be achieved using Python PGO once it covers bytecode too. * Don't keep required extension modules as root modules, instead make them proper early init modules.
18
0
42,811
6
8
17
def fes(self, name=None, frame=None): # what frames are we searching in? if frame is not None: if isinstance(frame, int): frames = [self.frame(frame)] elif isinstance(frame, str): frames = self.frames(frame) else: frames = [frame] else: frames = self.frames() return PrettyList( fe for f in frames for fename, fe in f.FE.items() if name is None or re.search(name, fename, re.I) )
nltk/corpus/reader/framenet.py
169
nltk
{ "docstring": "\n Lists frame element objects. If 'name' is provided, this is treated as\n a case-insensitive regular expression to filter by frame name.\n (Case-insensitivity is because casing of frame element names is not always\n consistent across frames.) Specify 'frame' to filter by a frame name pattern,\n ID, or object.\n\n >>> from nltk.corpus import framenet as fn\n >>> fn.fes('Noise_maker')\n [<fe ID=6043 name=Noise_maker>]\n >>> sorted([(fe.frame.name,fe.name) for fe in fn.fes('sound')]) # doctest: +NORMALIZE_WHITESPACE\n [('Cause_to_make_noise', 'Sound_maker'), ('Make_noise', 'Sound'),\n ('Make_noise', 'Sound_source'), ('Sound_movement', 'Location_of_sound_source'),\n ('Sound_movement', 'Sound'), ('Sound_movement', 'Sound_source'),\n ('Sounds', 'Component_sound'), ('Sounds', 'Location_of_sound_source'),\n ('Sounds', 'Sound_source'), ('Vocalizations', 'Location_of_sound_source'),\n ('Vocalizations', 'Sound_source')]\n >>> sorted([(fe.frame.name,fe.name) for fe in fn.fes('sound',r'(?i)make_noise')]) # doctest: +NORMALIZE_WHITESPACE\n [('Cause_to_make_noise', 'Sound_maker'),\n ('Make_noise', 'Sound'),\n ('Make_noise', 'Sound_source')]\n >>> sorted(set(fe.name for fe in fn.fes('^sound')))\n ['Sound', 'Sound_maker', 'Sound_source']\n >>> len(fn.fes('^sound$'))\n 2\n\n :param name: A regular expression pattern used to match against\n frame element names. If 'name' is None, then a list of all\n frame elements will be returned.\n :type name: str\n :return: A list of matching frame elements\n :rtype: list(AttrDict)\n ", "language": "en", "n_whitespaces": 382, "n_words": 156, "vocab_size": 95 }
57
Python
40
8a4cf5d94eb94b6427c5d1d7907ba07b119932c5
framenet.py
42,538
16
108
fes
https://github.com/nltk/nltk.git
Docstring tests (#3050) * fixed pytests * fixed more pytests * fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py * fixed pytests (mainly multiline or rounding issues) * fixed treebank pytests, removed test for return_string=True (deprecated) * fixed destructive.py pytests, removed test for return_string=True (deprecated) * fixed pytest (rounding issues) * fixed pytest (initialised missing object) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * fixed pytest (formatting issues) * added pytest +SKIP for deprecated module stanford * updated AUTHORS.md * changed docstring corrections by usage of ELLIPSIS and different roundings * fixed AUTHORS.md to be consistent * Fix framenet doctest formatting with pprint * Change docstring on MultiListBox.__init__ I believe the original typo was misinterpreted and changed to something that was not originally intended. Co-authored-by: Jan Lennartz <[email protected]> Co-authored-by: Tom Aarsen <[email protected]> Co-authored-by: Tom Aarsen <[email protected]>
232
0
7,600
13
1
41
def test_mark_task_instance_state(test_app): from airflow.models import DAG, DagBag, TaskInstance from airflow.operators.dummy import DummyOperator from airflow.utils.session import create_session from airflow.utils.state import State from airflow.utils.timezone import datetime from airflow.utils.types import DagRunType from airflow.www.views import Airflow from tests.test_utils.db import clear_db_runs clear_db_runs() start_date = datetime(2020, 1, 1) with DAG("test_mark_task_instance_state", start_date=start_date) as dag: task_1 = DummyOperator(task_id="task_1") task_2 = DummyOperator(task_id="task_2") task_3 = DummyOperator(task_id="task_3") task_4 = DummyOperator(task_id="task_4") task_5 = DummyOperator(task_id="task_5") task_1 >> [task_2, task_3, task_4, task_5] dagrun = dag.create_dagrun( start_date=start_date, execution_date=start_date, data_interval=(start_date, start_date), state=State.FAILED, run_type=DagRunType.SCHEDULED, )
tests/www/views/test_views.py
285
airflow
{ "docstring": "\n Test that _mark_task_instance_state() does all three things:\n - Marks the given TaskInstance as SUCCESS;\n - Clears downstream TaskInstances in FAILED/UPSTREAM_FAILED state;\n - Set DagRun to QUEUED.\n ", "language": "en", "n_whitespaces": 42, "n_words": 26, "vocab_size": 24 }
78
Python
57
2b4bf7fe67fc656ceb7bdaad36453b7a5b83ef04
test_views.py
44,001
56
437
test_mark_task_instance_state
https://github.com/apache/airflow.git
Use `DagRun.run_id` instead of `execution_date` when updating state of TIs(UI & REST API) (#18724) We can now use run_id as well as execution_date to update states of task instances Co-authored-by: Tzu-ping Chung <[email protected]> Co-authored-by: Ash Berlin-Taylor <[email protected]>
197
0
8,118
12
1
16
def test_delete_view_uses_get_deleted_objects(self): book = Book.objects.create(name="Test Book") response = self.client.get( reverse("admin2:admin_views_book_delete", args=(book.pk,)) ) # BookAdmin.get_deleted_objects() returns custom text. self.assertContains(response, "a deletable object") @override_settings(ROOT_URLCONF="admin_views.urls")
tests/admin_views/tests.py
98
@override_settings(ROOT_URLCONF="admin_views.urls")
django
{ "docstring": "The delete view uses ModelAdmin.get_deleted_objects().", "language": "en", "n_whitespaces": 4, "n_words": 5, "vocab_size": 5 }
22
Python
21
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,727
6
48
test_delete_view_uses_get_deleted_objects
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
74
1
52,072
13
1
5
def _apply_from_right_to(self, op, **options): return dispatch_method(self, '_apply_from_right_to', op, **options)
sympy/physics/quantum/state.py
37
sympy
{ "docstring": "Apply an Operator to this Ket as Operator*Ket\n\n This method will dispatch to methods having the format::\n\n ``def _apply_from_right_to_OperatorName(op, **options):``\n\n Subclasses should define these methods (one for each OperatorName) to\n teach the Ket how to implement OperatorName*Ket\n\n Parameters\n ==========\n\n op : Operator\n The Operator that is acting on the Ket as op*Ket\n options : dict\n A dict of key/value pairs that control how the operator is applied\n to the Ket.\n ", "language": "en", "n_whitespaces": 170, "n_words": 70, "vocab_size": 51 }
9
Python
8
00ed353dda66aa068dd43d44018f6a394d1fb0a1
state.py
200,169
2
23
_apply_from_right_to
https://github.com/sympy/sympy.git
Fix the Ket*Op->Op*Ket bug
23
0
49,559
8
1
10
def unrank_gray(self, rank, superset): graycode_bitlist = GrayCode.unrank(len(superset), rank) return Subset.subset_from_bitlist(superset, graycode_bitlist)
sympy/combinatorics/subsets.py
50
sympy
{ "docstring": "\n Gets the Gray code ordered subset of the specified rank.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Subset\n >>> Subset.unrank_gray(4, ['a', 'b', 'c']).subset\n ['a', 'b']\n >>> Subset.unrank_gray(0, ['a', 'b', 'c']).subset\n []\n\n See Also\n ========\n\n iterate_graycode, rank_gray\n ", "language": "en", "n_whitespaces": 120, "n_words": 35, "vocab_size": 27 }
11
Python
11
498015021131af4dbb07eb110e5badaba8250c7b
subsets.py
196,203
3
32
unrank_gray
https://github.com/sympy/sympy.git
Updated import locations
32
0
47,703
10
6
12
def convert_ids_to_tokens(self, ids, skip_special_tokens=False): if not isinstance(ids, (list, tuple)): return self._convert_id_to_token(ids) tokens = [self._convert_id_to_token(_id) for _id in ids] if skip_special_tokens: return [ token for token in tokens if token not in self.all_special_tokens ] return tokens
paddlenlp/transformers/prophetnet/tokenizer.py
100
PaddleNLP
{ "docstring": "\r\n Converts a single index or a sequence of indices to a token or\r\n a sequence of tokens, using the vocabulary and added tokens.\r\n\r\n Args:\r\n ids (int or List[int]):\r\n The token id (or token ids) to be converted to token(s).\r\n skip_special_tokens (bool, optional):\r\n Whether or not to remove special tokens in the decoding.\r\n Defaults to `False` and we do not remove special tokens.\r\n\r\n Returns:\r\n str or List[str]: The decoded token(s).\r\n ", "language": "en", "n_whitespaces": 183, "n_words": 69, "vocab_size": 46 }
35
Python
23
487162262196bead8d9b4c2306f313b8f64edf9b
tokenizer.py
322,456
10
66
convert_ids_to_tokens
https://github.com/PaddlePaddle/PaddleNLP.git
Add model Prohetnet (#1698) * add Prohetnet model * update prohetnet * update format * pre commit * add prophetnet example * update tokenizer.py,run_train.sh,train_prophetnet.py * remove evaluate/gigaword/__init__.py Co-authored-by: smallv0221 <[email protected]>
133
0
118,173
11
2
9
def vocabulary_size(self): if tf.executing_eagerly(): return ( int(self.lookup_table.size().numpy()) + self._token_start_index() ) else: return self.lookup_table.size() + self._token_start_index()
keras/layers/preprocessing/index_lookup.py
90
keras
{ "docstring": "Gets the current size of the layer's vocabulary.\n\n Returns:\n The integer size of the vocabulary, including optional mask and oov indices.\n ", "language": "en", "n_whitespaces": 44, "n_words": 21, "vocab_size": 17 }
15
Python
12
84afc5193d38057e2e2badf9c889ea87d80d8fbf
index_lookup.py
273,174
8
52
vocabulary_size
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
99
0
81,097
16
1
7
def get_host_target_type_map() -> t.Dict[t.Type[HostConfig], t.Type[TargetFilter]]: return get_type_map(TargetFilter, HostConfig)
test/lib/ansible_test/_internal/commands/integration/filters.py
48
ansible
{ "docstring": "Create and return a mapping of HostConfig types to TargetFilter types.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
8
Python
8
3eb0485dd92c88cc92152d3656d94492db44b183
filters.py
267,900
3
31
get_host_target_type_map
https://github.com/ansible/ansible.git
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
14
0
79,176
7