complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
14
40
def collect(self): if self.symlink and not self.local: raise CommandError("Can't symlink to a remote destination.") if self.clear: self.clear_dir("") if self.symlink: handler = self.link_file else: handler = self.copy_file found_files = {} for finder in get_finders(): for path, storage in finder.list(self.ignore_patterns): # Prefix the relative path if the source storage contains it if getattr(storage, "prefix", None): prefixed_path = os.path.join(storage.prefix, path) else: prefixed_path = path if prefixed_path not in found_files: found_files[prefixed_path] = (storage, path) handler(path, prefixed_path, storage) else: self.log( "Found another file with the destination path '%s'. It " "will be ignored since only the first encountered file " "is collected. If this is not what you want, make sure " "every static file has a unique path." % prefixed_path, level=1, ) # Storage backends may define a post_process() method. if self.post_process and hasattr(self.storage, "post_process"): processor = self.storage.post_process(found_files, dry_run=self.dry_run) for original_path, processed_path, processed in processor: if isinstance(processed, Exception): self.stderr.write("Post-processing '%s' failed!" % original_path) # Add a blank line before the traceback, otherwise it's # too easy to miss the relevant part of the error message. self.stderr.write() raise processed if processed: self.log( "Post-processed '%s' as '%s'" % (original_path, processed_path), level=2, ) self.post_processed_files.append(original_path) else: self.log("Skipped post-processing '%s'" % original_path) return { "modified": self.copied_files + self.symlinked_files, "unmodified": self.unmodified_files, "post_processed": self.post_processed_files, }
django/contrib/staticfiles/management/commands/collectstatic.py
453
django
{ "docstring": "\n Perform the bulk of the work of collectstatic.\n\n Split off from handle() to facilitate testing.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 13 }
204
Python
144
9c19aff7c7561e3a82978a272ecdaad40dda5c00
collectstatic.py
204,349
47
274
collect
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
957
0
50,706
17
3
5
def test_torch_auto_gpu_to_cpu(ray_start_4_cpus_2_gpus): num_workers = 2 assert os.environ["CUDA_VISIBLE_DEVICES"] == ""
python/ray/train/tests/test_gpu.py
35
ray
{ "docstring": "Tests if GPU tensors are auto converted to CPU on driver.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
9
Python
9
d99eff919bf785f911e4eebc87ddc4960344a139
test_gpu.py
128,859
23
163
test_torch_auto_gpu_to_cpu
https://github.com/ray-project/ray.git
[AIR] Hard deprecate old Trainer, old callbacks (#29015) Hard deprecations for ray.train.Trainer, ray.train.callbacks and ray.train.checkpoint.CheckpointStrategy. Restart-on-failure logic from BackendExecutor has also been removed as it is superseded by Tune. Some tests have been refactored to use the new API. Tests that are no longer applicable have been removed. Signed-off-by: Antoni Baum <[email protected]> Signed-off-by: Amog Kamsetty <[email protected]> Co-authored-by: Amog Kamsetty <[email protected]>
18
0
28,826
8
1
4
def verification_url(self) -> str: return self._device_flow_info.verification_url
homeassistant/components/google/api.py
25
core
{ "docstring": "Return the verification url that the user should visit to enter the code.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 11 }
6
Python
6
7876ffe9e392b20da16f0d0c44c723f526f807e6
api.py
293,617
3
14
verification_url
https://github.com/home-assistant/core.git
Update google calendar integration with a config flow (#68010) * Convert google calendar to config flow and async * Call correct exchange method * Fix async method and reduce unnecessary diffs * Wording improvements * Reduce unnecessary diffs * Run load/update config from executor * Update homeassistant/components/google/calendar.py Co-authored-by: Martin Hjelmare <[email protected]> * Remove unnecessary updating of unexpected multiple config entries. * Remove unnecessary unique_id checks * Improve readability with comments about device code expiration * Update homeassistant/components/google/calendar.py Co-authored-by: Martin Hjelmare <[email protected]> * Update homeassistant/components/google/calendar.py Co-authored-by: Martin Hjelmare <[email protected]> * Update homeassistant/components/google/api.py Co-authored-by: Martin Hjelmare <[email protected]> * Add comment for when code is none on timeout Co-authored-by: Martin Hjelmare <[email protected]>
20
0
92,674
7
10
3
def step_attempt(self) -> ResultDict:
rllib/agents/trainer.py
16
ray
{ "docstring": "Attempts a single training step, including evaluation, if required.\n\n Override this method in your Trainer sub-classes if you would like to\n keep the n step-attempts logic (catch worker failures) in place or\n override `step()` directly if you would like to handle worker\n failures yourself.\n\n Returns:\n The results dict with stats/infos on sampling, training,\n and - if required - evaluation.\n ", "language": "en", "n_whitespaces": 123, "n_words": 59, "vocab_size": 49 }
4
Python
4
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
trainer.py
133,869
60
278
step_attempt
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
11
0
30,129
6
1
2
def zauto(self): return self["zauto"]
packages/python/plotly/plotly/graph_objs/_choropleth.py
22
plotly.py
{ "docstring": "\n Determines whether or not the color domain is computed with\n respect to the input data (here in `z`) or the bounds set in\n `zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`\n are set by the user.\n\n The 'zauto' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 126, "n_words": 54, "vocab_size": 42 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_choropleth.py
226,431
2
11
zauto
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,104
7
15
37
def ravel_multi_index(multi_index, dims, mode='raise', order='C'): assert len(multi_index) == len(dims), f"len(multi_index)={len(multi_index)} != len(dims)={len(dims)}" dims = tuple(core.concrete_or_error(operator.index, d, "in `dims` argument of ravel_multi_index().") for d in dims) _check_arraylike("ravel_multi_index", *multi_index) for index in multi_index: if mode == 'raise': core.concrete_or_error(array, index, "The error occurred because ravel_multi_index was jit-compiled" " with mode='raise'. Use mode='wrap' or mode='clip' instead.") if not issubdtype(_dtype(index), integer): raise TypeError("only int indices permitted") if mode == "raise": if _any(any((i < 0) | (i >= d)) for i, d in zip(multi_index, dims)): raise ValueError("invalid entry in coordinates array") elif mode == "clip": multi_index = [clip(i, 0, d - 1) for i, d in zip(multi_index, dims)] elif mode == "wrap": multi_index = [i % d for i, d in zip(multi_index, dims)] else: raise ValueError(f"invalid mode={mode!r}. Expected 'raise', 'wrap', or 'clip'") if order == "F": strides = np.cumprod((1,) + dims[:-1]) elif order == "C": strides = np.cumprod((1,) + dims[1:][::-1])[::-1] else: raise ValueError(f"invalid order={order!r}. Expected 'C' or 'F'") result = array(0, dtype=dtypes.canonicalize_dtype(int_)) for i, s in zip(multi_index, strides): result = result + i * s return result _UNRAVEL_INDEX_DOC = @_wraps(np.unravel_index, lax_description=_UNRAVEL_INDEX_DOC)
jax/_src/numpy/lax_numpy.py
529
@_wraps(np.unravel_index, lax_description=_UNRAVEL_INDEX_DOC)
jax
{ "docstring": "\\\nUnlike numpy's implementation of unravel_index, negative indices are accepted\nand out-of-bounds indices are clipped.\n", "language": "en", "n_whitespaces": 12, "n_words": 15, "vocab_size": 13 }
175
Python
112
667d63aa2d4fbf7c9da73aab0e24c5c4c33cb5ba
lax_numpy.py
120,278
30
294
ravel_multi_index
https://github.com/google/jax.git
replace int with operator.index part2 This change align the behavior of `ravel_multi_index`, `split` and `indices` to their `numpy` counterparts. Also ensure size argument of `nonzero` should be integer. The changes with `*space` are only simplification
246
1
26,808
16
1
5
def ismethod(object): return isinstance(object, types.MethodType)
python3.10.4/Lib/inspect.py
26
XX-Net
{ "docstring": "Return true if the object is an instance method.\n\n Instance method objects provide these attributes:\n __doc__ documentation string\n __name__ name with which this method was defined\n __func__ function object containing implementation of method\n __self__ instance to which this method is bound", "language": "en", "n_whitespaces": 100, "n_words": 41, "vocab_size": 33 }
5
Python
5
8198943edd73a363c266633e1aa5b2a9e9c9f526
inspect.py
218,370
2
15
ismethod
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
11
0
55,264
8
1
25
async def test_logbook_invalid_entity(recorder_mock, hass, hass_client): await async_setup_component(hass, "logbook", {}) await hass.async_block_till_done() client = await hass_client() # Today time 00:00:00 start = dt_util.utcnow().date() start_date = datetime(start.year, start.month, start.day) # Test today entries with filter by end_time end_time = start + timedelta(hours=24) response = await client.get( f"/api/logbook/{start_date.isoformat()}?end_time={end_time}&entity=invalid" ) assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR
tests/components/logbook/test_init.py
161
core
{ "docstring": "Test the logbook view with requesting an invalid entity.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
50
Python
40
31a787558fd312331b55e5c2c4b33341fc3601fc
test_init.py
289,406
11
87
test_logbook_invalid_entity
https://github.com/home-assistant/core.git
Ensure recorder test fixture is setup before hass fixture (#80528) * Ensure recorder test fixture is setup before hass fixture * Adjust more tests
93
0
88,548
13
2
9
def recorder_or_dbworker(self) -> bool: thread_name = threading.current_thread().name return bool( thread_name == "Recorder" or thread_name.startswith(DB_WORKER_PREFIX) )
homeassistant/components/recorder/pool.py
55
core
{ "docstring": "Check if the thread is a recorder or dbworker thread.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
15
Python
14
bc862e97ed68cce8c437327651f85892787e755e
pool.py
293,721
6
31
recorder_or_dbworker
https://github.com/home-assistant/core.git
Use a dedicated executor pool for database operations (#68105) Co-authored-by: Erik Montnemery <[email protected]> Co-authored-by: Franck Nijhof <[email protected]>
54
0
92,777
10
2
8
def get_network_names(self) -> t.Optional[t.List[str]]: if self.networks is None: return None return sorted(self.networks)
test/lib/ansible_test/_internal/docker_util.py
55
ansible
{ "docstring": "Return a list of the network names the container is attached to.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
12
Python
11
3eb0485dd92c88cc92152d3656d94492db44b183
docker_util.py
267,980
5
34
get_network_names
https://github.com/ansible/ansible.git
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
44
0
79,255
8
1
17
def test_page_number_extraction_on_empty_pages(): preprocessor = PreProcessor(add_page_number=True, split_by="word", split_length=7, split_overlap=0) text_page_one = "This is a text on page one." text_page_three = "This is a text on page three." # this is what we get from PDFToTextConverter in case of an "empty" page document_with_empty_pages = f"{text_page_one}\f\f{text_page_three}" document = Document(content=document_with_empty_pages) documents = preprocessor.process(document) assert documents[0].meta["page"] == 1 assert documents[1].meta["page"] == 3 # verify the placeholder for the empty page has been removed assert documents[0].content.strip() == text_page_one assert documents[1].content.strip() == text_page_three
test/nodes/test_preprocessor.py
179
haystack
{ "docstring": "\n Often \"marketing\" documents contain pages without text (visuals only). When extracting page numbers, these pages should be counted as well to avoid\n issues when mapping results back to the original document.\n ", "language": "en", "n_whitespaces": 41, "n_words": 31, "vocab_size": 29 }
76
Python
52
5fedfb03b03496d7ca25f55788e1fa576ff1b2a4
test_preprocessor.py
257,999
11
101
test_page_number_extraction_on_empty_pages
https://github.com/deepset-ai/haystack.git
fix: Fix the error of wrong page numbers when documents contain empty pages. (#3330) * Fix the error of wrong page numbers when documents contain empty pages. * Reformat using git hooks. * Use a more descriptive placeholder
115
0
75,178
10
5
17
def forward(self, pixel_values, output_attentions=None, output_hidden_states=None, return_dict=None): r output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( , SEGFORMER_START_DOCSTRING, )
src/transformers/models/segformer/modeling_segformer.py
196
@add_start_docstrings( """ SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden states) e.g. for ImageNet. """, SEGFORMER_START_DOCSTRING, )
transformers
{ "docstring": "\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import SegformerFeatureExtractor, SegformerModel\n >>> from PIL import Image\n >>> import requests\n\n >>> feature_extractor = SegformerFeatureExtractor.from_pretrained(\"nvidia/mit-b0\")\n >>> model = SegformerModel.from_pretrained(\"nvidia/mit-b0\")\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = feature_extractor(images=image, return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n >>> sequence_output = outputs.last_hidden_state\n ```\n SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden\n states) e.g. for ImageNet.\n ", "language": "en", "n_whitespaces": 179, "n_words": 71, "vocab_size": 50 }
67
Python
40
ac224bb0797c1ee6522d814139f3eb0a8947267b
modeling_segformer.py
33,933
40
127
forward
https://github.com/huggingface/transformers.git
[Fix doc examples] Add missing from_pretrained (#15044) * fix doc example - ValueError: Parameter config should be an instance of class `PretrainedConfig` * Update src/transformers/models/segformer/modeling_segformer.py Co-authored-by: NielsRogge <[email protected]> * update Co-authored-by: ydshieh <[email protected]> Co-authored-by: NielsRogge <[email protected]>
246
1
6,173
10
2
17
def logout(self): from django.contrib.auth import get_user, logout request = HttpRequest() if self.session: request.session = self.session request.user = get_user(request) else: engine = import_module(settings.SESSION_ENGINE) request.session = engine.SessionStore() logout(request) self.cookies = SimpleCookie()
django/test/client.py
119
django
{ "docstring": "Log out the user by removing the cookies and session object.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
29
Python
23
9c19aff7c7561e3a82978a272ecdaad40dda5c00
client.py
206,351
11
71
logout
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
122
0
51,502
12
1
4
def test_add_prefix_outputs(self) -> None: self._test_add_prefix(rename_outputs=True)
onnx/test/compose_test.py
29
onnx
{ "docstring": "\n Tests prefixing graph outputs only. Relevant node edges should be renamed as well\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 13 }
5
Python
5
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
compose_test.py
255,403
5
16
test_add_prefix_outputs
https://github.com/onnx/onnx.git
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <[email protected]> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <[email protected]> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <[email protected]> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <[email protected]> * fixes Signed-off-by: Gary Miguel <[email protected]> * remove extra blank line Signed-off-by: Gary Miguel <[email protected]> * fix type annotations Signed-off-by: Gary Miguel <[email protected]> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <[email protected]> * fix Operators.md Signed-off-by: Gary Miguel <[email protected]> * fix TestCoverage.md Signed-off-by: Gary Miguel <[email protected]> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <[email protected]>
19
0
74,743
8
4
10
def coroutine(func): warnings.warn('"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead', DeprecationWarning, stacklevel=2) if inspect.iscoroutinefunction(func): # In Python 3.5 that's all we need to do for coroutines # defined with "async def". return func if inspect.isgeneratorfunction(func): coro = func else:
python3.10.4/Lib/asyncio/coroutines.py
72
XX-Net
{ "docstring": "Decorator to mark coroutines.\n\n If the coroutine is not yielded from before it is destroyed,\n an error message is logged.\n ", "language": "en", "n_whitespaces": 29, "n_words": 20, "vocab_size": 18 }
42
Python
37
8198943edd73a363c266633e1aa5b2a9e9c9f526
coroutines.py
220,420
19
83
coroutine
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
119
0
55,987
8
1
5
def update_styles(self) -> None: self.post_message_no_wait(messages.RefreshStyles(self))
src/textual/app.py
34
textual
{ "docstring": "Request update of styles.\n\n Should be called whenever CSS classes / pseudo classes change.\n\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 13 }
5
Python
5
c611fd84ff3a4f67a1f2a5a38d42fad37215cb9a
app.py
181,962
7
19
update_styles
https://github.com/Textualize/textual.git
docstrings
19
0
43,693
9
2
34
def pinv(a, rcond=1e-15, hermitian=False): a, wrap = _makearray(a) rcond = asarray(rcond) if _is_empty_2d(a): m, n = a.shape[-2:] res = empty(a.shape[:-2] + (n, m), dtype=a.dtype) return wrap(res) a = a.conjugate() u, s, vt = svd(a, full_matrices=False, hermitian=hermitian) # discard small singular values cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True) large = s > cutoff s = divide(1, s, where=large, out=s) s[~large] = 0 res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u))) return wrap(res) # Determinant @array_function_dispatch(_unary_dispatcher)
numpy/linalg/linalg.py
289
@array_function_dispatch(_unary_dispatcher)
numpy
{ "docstring": "\n Compute the (Moore-Penrose) pseudo-inverse of a matrix.\n\n Calculate the generalized inverse of a matrix using its\n singular-value decomposition (SVD) and including all\n *large* singular values.\n\n .. versionchanged:: 1.14\n Can now operate on stacks of matrices\n\n Parameters\n ----------\n a : (..., M, N) array_like\n Matrix or stack of matrices to be pseudo-inverted.\n rcond : (...) array_like of float\n Cutoff for small singular values.\n Singular values less than or equal to\n ``rcond * largest_singular_value`` are set to zero.\n Broadcasts against the stack of matrices.\n hermitian : bool, optional\n If True, `a` is assumed to be Hermitian (symmetric if real-valued),\n enabling a more efficient method for finding singular values.\n Defaults to False.\n\n .. versionadded:: 1.17.0\n\n Returns\n -------\n B : (..., N, M) ndarray\n The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so\n is `B`.\n\n Raises\n ------\n LinAlgError\n If the SVD computation does not converge.\n\n See Also\n --------\n scipy.linalg.pinv : Similar function in SciPy.\n scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a\n Hermitian matrix.\n\n Notes\n -----\n The pseudo-inverse of a matrix A, denoted :math:`A^+`, is\n defined as: \"the matrix that 'solves' [the least-squares problem]\n :math:`Ax = b`,\" i.e., if :math:`\\\\bar{x}` is said solution, then\n :math:`A^+` is that matrix such that :math:`\\\\bar{x} = A^+b`.\n\n It can be shown that if :math:`Q_1 \\\\Sigma Q_2^T = A` is the singular\n value decomposition of A, then\n :math:`A^+ = Q_2 \\\\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are\n orthogonal matrices, :math:`\\\\Sigma` is a diagonal matrix consisting\n of A's so-called singular values, (followed, typically, by\n zeros), and then :math:`\\\\Sigma^+` is simply the diagonal matrix\n consisting of the reciprocals of A's singular values\n (again, followed by zeros). [1]_\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,\n FL, Academic Press, Inc., 1980, pp. 139-142.\n\n Examples\n --------\n The following example checks that ``a * a+ * a == a`` and\n ``a+ * a * a+ == a+``:\n\n >>> a = np.random.randn(9, 6)\n >>> B = np.linalg.pinv(a)\n >>> np.allclose(a, np.dot(a, np.dot(B, a)))\n True\n >>> np.allclose(B, np.dot(B, np.dot(a, B)))\n True\n\n ", "language": "en", "n_whitespaces": 600, "n_words": 331, "vocab_size": 211 }
74
Python
57
3ba8be6f748edc3afedc49a320423093acfb43d4
linalg.py
160,361
15
182
pinv
https://github.com/numpy/numpy.git
DOC:linalg: Remove ref to scipy.linalg.pinv2
132
1
38,611
14
1
21
def test_save_as_continue_false(self): post_data = {"_saveasnew": "", "name": "John M", "gender": 1, "age": 42} url = reverse( "admin:admin_views_person_change", args=(self.per1.pk,), current_app=site2.name, ) response = self.client.post(url, post_data) self.assertEqual(len(Person.objects.filter(name="John M")), 1) self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1) self.assertRedirects( response, reverse("admin:admin_views_person_changelist", current_app=site2.name), )
tests/admin_views/tests.py
203
django
{ "docstring": "\n Saving a new object using \"Save as new\" redirects to the changelist\n instead of the change view when ModelAdmin.save_as_continue=False.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 18 }
34
Python
30
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,747
14
123
test_save_as_continue_false
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
152
0
52,083
14
2
9
def get_forward_related_filter(self, obj): return { "%s__%s" % (self.name, rh_field.name): getattr(obj, rh_field.attname) for _, rh_field in self.related_fields }
django/db/models/fields/related.py
61
django
{ "docstring": "\n Return the keyword arguments that when supplied to\n self.model.object.filter(), would select all instances related through\n this field to the remote obj. This is used to build the querysets\n returned by related descriptors. obj is an instance of\n self.related_field.model.\n ", "language": "en", "n_whitespaces": 81, "n_words": 38, "vocab_size": 32 }
17
Python
17
9c19aff7c7561e3a82978a272ecdaad40dda5c00
related.py
205,602
5
39
get_forward_related_filter
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
60
0
51,153
10
2
19
def test_operator_extra_link_override_plugin(dag_run, task_2, viewer_client): response = viewer_client.get( f"{ENDPOINT}?dag_id={task_2.dag_id}&task_id={task_2.task_id}" f"&execution_date={DEFAULT_DATE}&link_name=airflow", follow_redirects=True, ) assert response.status_code == 200 response_str = response.data if isinstance(response.data, bytes): response_str = response_str.decode() assert json.loads(response_str) == {'url': 'https://airflow.apache.org/1.10.5/', 'error': None}
tests/www/views/test_views_extra_links.py
139
airflow
{ "docstring": "\n This tests checks if Operator Link (AirflowLink) defined in the Dummy2TestOperator\n is overridden by Airflow Plugin (AirflowLink2).\n\n AirflowLink returns 'https://airflow.apache.org/' link\n AirflowLink2 returns 'https://airflow.apache.org/1.10.5/' link\n ", "language": "en", "n_whitespaces": 41, "n_words": 25, "vocab_size": 23 }
31
Python
26
08575ddd8a72f96a3439f73e973ee9958188eb83
test_views_extra_links.py
45,548
11
71
test_operator_extra_link_override_plugin
https://github.com/apache/airflow.git
Change BaseOperatorLink interface to take a ti_key, not a datetime (#21798)
80
0
8,640
12
4
8
def histogram(self, mask=None, extrema=None): self.load() if mask: mask.load() return self.im.histogram((0, 0), mask.im) if self.mode in ("I", "F"): if extrema is None: extrema = self.getextrema() return self.im.histogram(extrema) return self.im.histogram()
src/PIL/Image.py
137
Pillow
{ "docstring": "\n Returns a histogram for the image. The histogram is returned as a\n list of pixel counts, one for each pixel value in the source\n image. Counts are grouped into 256 bins for each band, even if\n the image has more than 8 bits per band. If the image has more\n than one band, the histograms for all bands are concatenated (for\n example, the histogram for an \"RGB\" image contains 768 values).\n\n A bilevel image (mode \"1\") is treated as a greyscale (\"L\") image\n by this method.\n\n If a mask is provided, the method returns a histogram for those\n parts of the image where the mask image is non-zero. The mask\n image must have the same size as the image, and be either a\n bi-level image (mode \"1\") or a greyscale image (\"L\").\n\n :param mask: An optional mask.\n :param extrema: An optional tuple of manually-specified extrema.\n :returns: A list containing pixel counts.\n ", "language": "en", "n_whitespaces": 264, "n_words": 151, "vocab_size": 89 }
28
Python
23
de968dd920eaa3d1a27877059c6bbb9043a9d26b
Image.py
242,493
10
84
histogram
https://github.com/python-pillow/Pillow.git
Document that histogram() uses 256 bins per channel
122
0
69,871
12
3
14
def _error_page_workaround(self, js_enabled, html): match = re.search(r'"errorCode":"([^"]*)"', html) if match is None: return error = match.group(1) log.webview.error("Load error: {}".format(error)) if js_enabled: return self._show_error_page(self.url(), error=error)
qutebrowser/browser/webengine/webenginetab.py
109
qutebrowser
{ "docstring": "Check if we're displaying a Chromium error page.\n\n This gets called if we got a loadFinished(False), so we can display at\n least some error page in situations where Chromium's can't be\n displayed.\n\n WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66643\n ", "language": "en", "n_whitespaces": 70, "n_words": 35, "vocab_size": 31 }
24
Python
20
86b5bed388544d2d445a3dba151e3c3a4c8814b7
webenginetab.py
320,618
9
66
_error_page_workaround
https://github.com/qutebrowser/qutebrowser.git
Partially re-revert _error_page_workaround changes The logging part removed in 438b8b46094890a28db6bac07ff1ae67bbc5ee78 is still needed for some tests, and debugging too.
95
0
117,229
10
5
36
def _get_validity_buffer(self) -> Tuple[PandasBuffer, Any]: null, invalid = self.describe_null if self.dtype[0] == DtypeKind.STRING: # For now, use byte array as the mask. # TODO: maybe store as bit array to save space?.. buf = self._col.to_numpy() # Determine the encoding for valid values valid = invalid == 0 invalid = not valid mask = np.zeros(shape=(len(buf),), dtype=np.bool8) for i, obj in enumerate(buf): mask[i] = valid if isinstance(obj, str) else invalid # Convert the mask array to a Pandas "buffer" using # a NumPy array as the backing store buffer = PandasBuffer(mask) # Define the dtype of the returned buffer dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE) return buffer, dtype try: msg = _NO_VALIDITY_BUFFER[null] + " so does not have a separate mask" except KeyError: # TODO: implement for other bit/byte masks? raise NotImplementedError("See self.describe_null") raise NoBufferPresent(msg)
pandas/core/exchange/column.py
237
pandas
{ "docstring": "\n Return the buffer containing the mask values indicating missing data and\n the buffer's associated dtype.\n Raises NoBufferPresent if null representation is not a bit or byte mask.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 25 }
133
Python
88
90140f055892a46f473bd26affab88a7f171e394
column.py
166,209
22
147
_get_validity_buffer
https://github.com/pandas-dev/pandas.git
ENH: Implement DataFrame interchange protocol (#46141)
377
0
39,774
14
4
8
def is_installable_dir(path): # type: (str) -> bool if not os.path.isdir(path): return False setup_py = os.path.join(path, "setup.py") if os.path.isfile(setup_py): return True pyproject_toml = os.path.join(path, "pyproject.toml") if os.path.isfile(pyproject_toml): return True return False
.venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py
116
transferlearning
{ "docstring": "Is path is a directory containing setup.py or pyproject.toml?", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
30
Python
21
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
misc.py
61,273
10
69
is_installable_dir
https://github.com/jindongwang/transferlearning.git
upd; format
75
0
12,484
9
1
17
def put_cluster_metadata(gcs_client, num_retries) -> None: metadata = _generate_cluster_metadata() ray._private.utils.internal_kv_put_with_retry( gcs_client, usage_constant.CLUSTER_METADATA_KEY, json.dumps(metadata).encode(), namespace=ray_constants.KV_NAMESPACE_CLUSTER, num_retries=num_retries, ) return metadata
python/ray/_private/usage/usage_lib.py
82
ray
{ "docstring": "Generate the cluster metadata and store it to GCS.\n\n It is a blocking API.\n\n Params:\n gcs_client (GCSClient): The GCS client to perform KV operation PUT.\n num_retries (int): Max number of times to retry if PUT fails.\n\n Raises:\n gRPC exceptions if PUT fails.\n ", "language": "en", "n_whitespaces": 75, "n_words": 42, "vocab_size": 37 }
17
Python
16
20ab9188c61d91ef0c79cc5d527f17db7c43b604
usage_lib.py
144,436
21
53
put_cluster_metadata
https://github.com/ray-project/ray.git
[Ray Usage Stats] Record cluster metadata + Refactoring. (#22170) This is the first PR to implement usage stats on Ray. Please refer to the file `usage_lib.py` for more details. The full specification is here https://docs.google.com/document/d/1ZT-l9YbGHh-iWRUC91jS-ssQ5Qe2UQ43Lsoc1edCalc/edit#heading=h.17dss3b9evbj. You can see the full PR for phase 1 from here; https://github.com/rkooo567/ray/pull/108/files. The PR is doing some basic refactoring + adding cluster metadata to GCS instead of the version numbers. After this PR, we will add code to enable usage report "off by default".
67
0
33,203
11
1
9
def extra_action_out(self, input_dict, state_batches, model, action_dist): # Return value function outputs. VF estimates will hence be added to # the SampleBatches produced by the sampler(s) to generate the train # batches going into the loss function. return { SampleBatch.VF_PREDS: model.value_function(), }
rllib/policy/torch_mixins.py
44
ray
{ "docstring": "Defines extra fetches per action computation.\n\n Args:\n input_dict (Dict[str, TensorType]): The input dict used for the action\n computing forward pass.\n state_batches (List[TensorType]): List of state tensors (empty for\n non-RNNs).\n model (ModelV2): The Model object of the Policy.\n action_dist (TorchDistributionWrapper): The instantiated distribution\n object, resulting from the model's outputs and the given\n distribution class.\n\n Returns:\n Dict[str, TensorType]: Dict with extra tf fetches to perform per\n action computation.\n ", "language": "en", "n_whitespaces": 217, "n_words": 66, "vocab_size": 52 }
41
Python
35
dea134a4726c46d57567e724bcc7a2de43f5200e
torch_mixins.py
139,719
4
27
extra_action_out
https://github.com/ray-project/ray.git
[RLlib] Clean up Policy mixins. (#24746)
94
0
31,763
9
1
26
async def test_kill_job_actor_in_before_driver_finish(self, job_manager): with tempfile.TemporaryDirectory() as tmp_dir: pid_file, _, job_id = await _run_hanging_command(job_manager, tmp_dir) with open(pid_file, "r") as file: pid = int(file.read()) assert psutil.pid_exists(pid), "driver subprocess should be running" actor = job_manager._get_actor_for_job(job_id) ray.kill(actor, no_restart=True) await async_wait_for_condition_async_predicate( check_job_failed, job_manager=job_manager, job_id=job_id ) # Ensure driver subprocess gets cleaned up after job reached # termination state await async_wait_for_condition(check_subprocess_cleaned, pid=pid)
dashboard/modules/job/tests/test_job_manager.py
168
ray
{ "docstring": "\n Test submitting a long running / blocker driver script, and kill\n the job supervisor actor before script returns and ensure\n\n 1) Job status is correctly marked as failed\n 2) No hanging subprocess from failed job\n ", "language": "en", "n_whitespaces": 71, "n_words": 35, "vocab_size": 32 }
57
Python
49
326b5bd1acc6d3d00ab0546e4ae45da6bed501f7
test_job_manager.py
126,672
12
99
test_kill_job_actor_in_before_driver_finish
https://github.com/ray-project/ray.git
Convert job_manager to be async (#27123) Updates jobs api Updates snapshot api Updates state api Increases jobs api version to 2 Signed-off-by: Alan Guo [email protected] Why are these changes needed? follow-up for #25902 (comment)
215
0
28,224
14
1
8
def mint_data_fixture(): return json.loads(load_fixture("awair/mint.json")) @pytest.fixture(name="no_devices", scope="session")
tests/components/awair/conftest.py
54
@pytest.fixture(name="no_devices", scope="session")
core
{ "docstring": "Fixture representing data returned from Awair mint device.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
6
Python
6
ebbff7b60e43f17d65ead811d314602b9daddfc4
conftest.py
303,766
2
15
mint_data_fixture
https://github.com/home-assistant/core.git
Add Awair Local API support (#75535)
11
1
102,575
10
2
4
async def _async_wait_for_pairing_mode(self) -> None: assert self._discovery device = self._discovery.device
homeassistant/components/snooz/config_flow.py
34
core
{ "docstring": "Process advertisements until pairing mode is detected.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
10
Python
10
7d097d18b0c6041475080b3c400e37b25185faba
config_flow.py
288,775
17
76
_async_wait_for_pairing_mode
https://github.com/home-assistant/core.git
Add support for Snooz BLE devices (#78790) Co-authored-by: J. Nick Koston <[email protected]>
31
0
87,927
8
1
6
def on_chord_header_end(self, chord, **header) -> None: self.on_group_end(chord.tasks, **header)
celery/canvas.py
39
celery
{ "docstring": "Method that is called on сhord header stamping end.\n\n Arguments:\n chord (chord): chord that is stamped.\n headers (Dict): Partial headers that could be merged with existing headers.\n ", "language": "en", "n_whitespaces": 72, "n_words": 27, "vocab_size": 22 }
8
Python
7
1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc
canvas.py
208,063
8
24
on_chord_header_end
https://github.com/celery/celery.git
Canvas Header Stamping (#7384) * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Redo header stamping (#7341) * _freeze_gid dict merge fixed * OptionsVisitor created * Fixed canvas.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test for simple test for chord and fixed chord implementation * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed lint and elements * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type -> isinstance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Omer Katz <[email protected]> * Added stamping mechanism * Manual stamping improved * flake8 fixed * Added subtests * Add comma. * Moved groups to stamps * Fixed chord and added test for that * Strip down the header-stamping PR to the basics. * Serialize groups. * Add groups to result backend meta data. * Fix spelling mistake. * Revert changes to canvas.py * Revert changes to app/base.py * Add stamping implementation to canvas.py * Send task to AMQP with groups. * Successfully pass single group to result. * _freeze_gid dict merge fixed * First draft of the visitor API. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * OptionsVisitor created * Fixed canvas.py * Added test for simple test for chord and fixed chord implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed _IMMUTABLE_OPTIONS * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed chord interface * Fixed list order * Fixed tests (stamp test and chord test), fixed order in groups * Fixed lint and elements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changed implementation of stamp API and fix lint * Added documentation to Stamping API. Added chord with groups test * Implemented stamping inside replace and added test for an implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Added test additonal tests for chord, improved coverage * Splitted into subtests * Group stamping rollback * group.id is None fixed * Added integration test * Added integration test * apply_async fixed * Integration test and test_chord fixed * Lint fixed * chord freeze fixed * Minor fixes. * Chain apply_async fixed and tests fixed * lint fixed * Added integration test for chord * type -> isinstance * Added stamping mechanism * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Manual stamping improved * fail_ci_if_error uncommented * flake8 fixed * Added subtests * Changes * Add comma. * Fixed chord and added test for that * canvas.py fixed * Test chord.py fixed * Fixed stamped_headers * collections import fixed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * collections import fixed * Update celery/backends/base.py Co-authored-by: Omer Katz <[email protected]> * ampq.py fixed * Refrain from using deprecated import path. * Fix test_complex_chain regression. Whenever we stamp a group we need to freeze it first if it wasn't already frozen. Somewhere along the line, the group id changed because we were freezing twice. This commit places the stamping operation after preparing the chain's steps which fixes the problem somehow. We don't know why yet. * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed integration tests * Fixed issues with maybe_list. Add documentation * Fixed potential issue with integration tests * Fixed issues with _regen * Fixed issues with _regen * Fixed test_generator issues * Fixed _regen stamping * Fixed _regen stamping * Fixed TimeOut issue * Fixed TimeOut issue * Fixed TimeOut issue * Update docs/userguide/canvas.rst Co-authored-by: Omer Katz <[email protected]> * Fixed Couchbase * Better stamping intro * New GroupVisitor example * Adjust documentation. Co-authored-by: Naomi Elstein <[email protected]> Co-authored-by: Omer Katz <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Asif Saif Uddin <[email protected]> Co-authored-by: Omer Katz <[email protected]>
22
0
52,189
8
3
32
def test_valid_incremental_read_with_slices(mocker): slices = [{"1": "1"}, {"2": "2"}] stream_output = [{"k1": "v1"}, {"k2": "v2"}, {"k3": "v3"}] s1 = MockStream( [({"sync_mode": SyncMode.incremental, "stream_slice": s, "stream_state": mocker.ANY}, stream_output) for s in slices], name="s1" ) s2 = MockStream( [({"sync_mode": SyncMode.incremental, "stream_slice": s, "stream_state": mocker.ANY}, stream_output) for s in slices], name="s2" ) state = {"cursor": "value"} mocker.patch.object(MockStream, "get_updated_state", return_value=state) mocker.patch.object(MockStream, "supports_incremental", return_value=True) mocker.patch.object(MockStream, "get_json_schema", return_value={}) mocker.patch.object(MockStream, "stream_slices", return_value=slices) src = MockSource(streams=[s1, s2]) catalog = ConfiguredAirbyteCatalog(streams=[_configured_stream(s1, SyncMode.incremental), _configured_stream(s2, SyncMode.incremental)]) expected = [ # stream 1 slice 1 *_as_records("s1", stream_output), _state({"s1": state}), # stream 1 slice 2 *_as_records("s1", stream_output), _state({"s1": state}), # stream 2 slice 1 *_as_records("s2", stream_output), _state({"s1": state, "s2": state}), # stream 2 slice 2 *_as_records("s2", stream_output), _state({"s1": state, "s2": state}), ] messages = _fix_emitted_at(list(src.read(logger, {}, catalog, state=defaultdict(dict)))) assert expected == messages
airbyte-cdk/python/unit_tests/sources/test_abstract_source.py
544
airbyte
{ "docstring": "Tests that an incremental read which uses slices outputs each record in the slice followed by a STATE message, for each slice", "language": "en", "n_whitespaces": 21, "n_words": 22, "vocab_size": 20 }
128
Python
74
f83eca58eaf2129d21b5796a301732ab22675130
test_abstract_source.py
3,353
28
326
test_valid_incremental_read_with_slices
https://github.com/airbytehq/airbyte.git
CDK: Fix typing errors (#9037) * fix typing, drop AirbyteLogger * format * bump the version * use logger instead of fixture logger Co-authored-by: Eugene Kulak <[email protected]> Co-authored-by: auganbay <[email protected]>
280
0
455
15
2
8
def require_docker() -> DockerCommand: if command := get_docker_command(): return command raise ApplicationError(f'No container runtime detected. Supported commands: {", ".join(DOCKER_COMMANDS)}') @cache
test/lib/ansible_test/_internal/docker_util.py
58
@cache
ansible
{ "docstring": "Return the docker command to invoke. Raises an exception if docker is not available.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
20
Python
19
3eb0485dd92c88cc92152d3656d94492db44b183
docker_util.py
267,978
5
22
require_docker
https://github.com/ansible/ansible.git
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
35
1
79,253
12
1
3
def skip(self): self._skip = True
sympy/combinatorics/graycode.py
21
sympy
{ "docstring": "\n Skips the bit generation.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import GrayCode\n >>> a = GrayCode(3)\n >>> for i in a.generate_gray():\n ... if i == '010':\n ... a.skip()\n ... print(i)\n ...\n 000\n 001\n 011\n 010\n 111\n 101\n 100\n\n See Also\n ========\n\n generate_gray\n ", "language": "en", "n_whitespaces": 205, "n_words": 41, "vocab_size": 34 }
5
Python
5
498015021131af4dbb07eb110e5badaba8250c7b
graycode.py
196,099
2
11
skip
https://github.com/sympy/sympy.git
Updated import locations
19
0
47,599
7
1
14
def test_dynamic_sampling_rules_should_contain_single_uniform_rule(self): with Feature({self.universal_ds_flag: True, self.old_ds_flag: True}): response = self.get_response( self.org_slug, self.proj_slug, dynamicSampling=_dyn_sampling_data(multiple_uniform_rules=True), ) assert response.status_code == 400 assert ( response.json()["dynamicSampling"]["non_field_errors"][0] == "Uniform rule " "must be in the last position only" )
tests/sentry/api/endpoints/test_project_details.py
124
sentry
{ "docstring": "\n Tests that ensures you can only have one uniform rule\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
33
Python
30
5462ee11ad11ebb9a50323befcd286816d7898c8
test_project_details.py
87,161
12
74
test_dynamic_sampling_rules_should_contain_single_uniform_rule
https://github.com/getsentry/sentry.git
feat(ds): Support new DS behaviour in project_details endpoint (#40387) Supports new adaptive dynamic sampling behaviour alongside the deprecated dynamic sampling behaviour and achieves that through feature flag differentiation This PR achieve that through the following: - Introducing a new `DynamicSamplingBiasSerializer` which is composed of id representing the bias name and a boolean flag indicating whether that particular flag is active or not - Modifies current existing behavior for both old sampling flag and new sampling flag. Essentially the new setup entails that to be on the old dynamic sampling, the following flags need to be enabled "organizations:server-side-sampling" and "organizations:server-side-sampling-ui", and to be on the new dynamic sampling configurations, you need the following flags to be enabled "organizations:dynamic-sampling-basic" and "organizations:server-side-sampling" P.S. 1: These flags will be replaced "organizations:server-side-sampling-ui" -> "organizations:dynamic-sampling-deprecated" "organizations:server-side-sampling-basic" -> "organizations:dynamic-sampling" Hence, these feature flags need to be updated once this PR lands https://github.com/getsentry/sentry/pull/40388 P.S. 2: If a project is on the new plan and the old plan, the new plan takes precedence - Introduces default biases that are enabled by default and can be overwritten. The motivation to do this is to be able to add new biases that are enabled by default, and both the GET and PUT request honor this list - `GET` and `POST` endpoint does a dictionary update of user's stored biases on the default biases that are hardcoded, and returns them to the UI/ relay. This means that the introduced project option "sentry:dynamic_sampling_biases" might not have all the toggles enabled/disabled through the UI but only the ones that a customer chose to modify Followup: - This new feature flag behaviour needs to be reflected in ProjectConfig computations
177
0
18,235
14
1
16
def test_submessage_event_sent_after_transaction_commits(self) -> None: hamlet = self.example_user("hamlet") message_id = self.send_stream_message(hamlet, "Denmark") with self.tornado_redirected_to_list([], expected_num_events=1): with mock.patch("zerver.actions.submessage.send_event") as m: m.side_effect = AssertionError( "Events should be sent only after the transaction commits." ) do_add_submessage(hamlet.realm, hamlet.id, message_id, "whatever", "whatever")
zerver/tests/test_submessage.py
131
zulip
{ "docstring": "\n Tests that `send_event` is hooked to `transaction.on_commit`. This is important, because\n we don't want to end up holding locks on message rows for too long if the event queue runs\n into a problem.\n ", "language": "en", "n_whitespaces": 62, "n_words": 33, "vocab_size": 31 }
36
Python
33
3a135b04d9e9f84aa2a31d6fc0b1b08e9cf9aeac
test_submessage.py
83,601
14
73
test_submessage_event_sent_after_transaction_commits
https://github.com/zulip/zulip.git
actions: Split out zerver.actions.submessage. Signed-off-by: Anders Kaseorg <[email protected]>
139
0
17,688
13
1
6
def _reverse_pointer(self): reverse_octets = str(self).split('.')[::-1] return '.'.join(reverse_octets) + '.in-addr.arpa'
python3.10.4/Lib/ipaddress.py
60
XX-Net
{ "docstring": "Return the reverse DNS pointer name for the IPv4 address.\n\n This implements the method described in RFC1035 3.5.\n\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 16 }
9
Python
9
8198943edd73a363c266633e1aa5b2a9e9c9f526
ipaddress.py
218,483
3
31
_reverse_pointer
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
30
0
55,338
11
1
2
def configure_architecture_optimizers(self): return None
nni/retiarii/oneshot/pytorch/base_lightning.py
16
nni
{ "docstring": "\n Hook kept for subclasses. A specific NAS method inheriting this base class should return its architecture optimizers here\n if architecture parameters are needed. Note that lr schedulers are not supported now for architecture_optimizers.\n\n Returns\n ----------\n arc_optimizers : List[Optimizer], Optimizer\n Optimizers used by a specific NAS algorithm. Return None if no architecture optimizers are needed.\n ", "language": "en", "n_whitespaces": 108, "n_words": 54, "vocab_size": 44 }
4
Python
4
8b2eb425274cdb4537fbce4a315aec12a378d6db
base_lightning.py
111,759
2
8
configure_architecture_optimizers
https://github.com/microsoft/nni.git
Lightning implementation for retiarii oneshot nas (#4479)
18
0
24,482
6
3
12
def _get_simple_image_viewer(self): # Try to render the env, if required. if not self._render: return None try: from gym.envs.classic_control.rendering import SimpleImageViewer return SimpleImageViewer() except (ImportError, ModuleNotFoundError): self._render = False # disable rendering logger.warning( "Could not import gym.envs.classic_control." "rendering! Try `pip install gym[all]`." ) return None
rllib/evaluation/env_runner_v2.py
89
ray
{ "docstring": "Maybe construct a SimpleImageViewer instance for episode rendering.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
44
Python
36
52bb8e47d483082e528fc8595005e0813a46efb8
env_runner_v2.py
124,016
13
51
_get_simple_image_viewer
https://github.com/ray-project/ray.git
[RLlib] EnvRunnerV2 and EpisodeV2 that support Connectors. (#25922)
183
0
27,497
12
1
5
def _update_api_throttle_limit(self): self._api.account.get_insights()
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/async_job_manager.py
28
airbyte
{ "docstring": "\n Sends <ACCOUNT_ID>/insights GET request with no parameters so it would\n respond with empty list of data so api use \"x-fb-ads-insights-throttle\"\n header to update current insights throttle limit.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 25 }
3
Python
3
a3aae8017a0a40ff2006e2567f71dccb04c997a5
async_job_manager.py
3,757
2
15
_update_api_throttle_limit
https://github.com/airbytehq/airbyte.git
🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805) * Facebook Marketing performance improvement * add comments and little refactoring * fix integration tests with the new config * improve job status handling, limit concurrency to 10 * fix campaign jobs, refactor manager * big refactoring of async jobs, support random order of slices * update source _read_incremental to hook new state logic * fix issues with timeout * remove debugging and clean up, improve retry logic * merge changes from #8234 * fix call super _read_increment * generalize batch execution, add use_batch flag * improve coverage, do some refactoring of spec * update test, remove overrides of source * add split by AdSet * add smaller insights * fix end_date < start_date case * add account_id to PK * add notes * fix new streams * fix reversed incremental stream * update spec.json for SAT * upgrade CDK and bump version Co-authored-by: Dmytro Rezchykov <[email protected]> Co-authored-by: Eugene Kulak <[email protected]>
17
0
545
9
1
9
def test_two_configs_one_default_app(self): with self.settings(INSTALLED_APPS=["apps.two_configs_one_default_app"]): config = apps.get_app_config("two_configs_one_default_app") self.assertIsInstance(config, TwoConfig)
tests/apps/tests.py
63
django
{ "docstring": "\n Load an app that provides two AppConfig classes, one being the default.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
9
Python
9
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
201,113
4
34
test_two_configs_one_default_app
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
41
0
49,879
12
2
7
def variables(self): if isinstance(self.model, tf.keras.Model): return self.model.variables else: return self.model.variables()
rllib/policy/eager_tf_policy_v2.py
60
ray
{ "docstring": "Return the list of all savable variables for this policy.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
10
Python
9
bc3a1d35cf6e9a5fd7eef908a8e76aefb80ce6a9
eager_tf_policy_v2.py
139,488
5
36
variables
https://github.com/ray-project/ray.git
[RLlib] Introduce new policy base classes. (#24742)
53
0
31,714
11
3
9
async def _send_renewal_emails(self) -> None: expiring_users = await self.store.get_users_expiring_soon() if expiring_users: for user_id, expiration_ts_ms in expiring_users: await self._send_renewal_email( user_id=user_id, expiration_ts=expiration_ts_ms )
synapse/handlers/account_validity.py
69
synapse
{ "docstring": "Gets the list of users whose account is expiring in the amount of time\n configured in the ``renew_at`` parameter from the ``account_validity``\n configuration, and sends renewal emails to all of these users as long as they\n have an email 3PID attached to their account.\n ", "language": "en", "n_whitespaces": 72, "n_words": 44, "vocab_size": 35 }
21
Python
19
1783156dbcf4164692e66275d1c29857c434995b
account_validity.py
248,011
12
41
_send_renewal_emails
https://github.com/matrix-org/synapse.git
Add some type hints to datastore (#12423) * Add some type hints to datastore * newsfile * change `Collection` to `List` * refactor return type of `select_users_txn` * correct type hint in `stream.py` * Remove `Optional` in `select_users_txn` * remove not needed return type in `__init__` * Revert change in `get_stream_id_for_event_txn` * Remove import from `Literal`
102
0
72,045
13
1
6
def ones(shape, *, dtype=None, meta=None, **kwargs): raise NotImplementedError
dask/array/backends.py
33
dask
{ "docstring": "Create an array of ones\n\n Returns a new array having a specified shape and filled\n with ones.\n ", "language": "en", "n_whitespaces": 38, "n_words": 17, "vocab_size": 15 }
8
Python
8
c4d35f5515191409913827fd4faa3b69a3d7399a
backends.py
157,100
2
21
ones
https://github.com/dask/dask.git
Backend library dispatching for IO in Dask-Array and Dask-DataFrame (#9475)
22
0
36,851
6
5
8
def selective_find(str, char, index, pos): l = len(str) while 1: pos += 1 if pos == l: return (-1, -1) c = str[pos] if c == char: return index+1, pos elif c < char: index += 1
python3.10.4/Lib/encodings/punycode.py
98
XX-Net
{ "docstring": "Return a pair (index, pos), indicating the next occurrence of\n char in str. index is the position of the character considering\n only ordinals up to and including char, and pos is the position in\n the full string. index/pos is the starting position in the full\n string.", "language": "en", "n_whitespaces": 57, "n_words": 46, "vocab_size": 30 }
37
Python
26
8198943edd73a363c266633e1aa5b2a9e9c9f526
punycode.py
217,168
11
62
selective_find
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
114
0
54,692
11
12
16
def forward(self, input, mask, kv=None, cache=None, head_mask=None, output_attentions=False): # Input is (bs, qlen, dim) # Mask is (bs, klen) (non-causal) or (bs, klen, klen) bs, qlen, dim = input.size() if kv is None: klen = qlen if cache is None else cache["slen"] + qlen else: klen = kv.size(1) # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured' n_heads = self.n_heads dim_per_head = self.dim // n_heads mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)
src/transformers/models/xlm/modeling_xlm.py
161
transformers
{ "docstring": "\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 13 }
85
Python
56
d3cb28886ac68beba9a6646b422a4d727b056c0c
modeling_xlm.py
31,401
42
429
forward
https://github.com/huggingface/transformers.git
Not use -1e4 as attn mask (#17306) * Use torch.finfo(self.dtype).min * for GPTNeoX * for Albert * For Splinter * Update src/transformers/models/data2vec/modeling_data2vec_audio.py Co-authored-by: Patrick von Platen <[email protected]> * fix -inf used in Bart-like models * Fix a few remaining -inf * more fix * clean up * For CLIP * For FSMT * clean up * fix test * Add dtype argument and use it for LayoutLMv3 * update FlaxLongT5Attention Co-authored-by: ydshieh <[email protected]> Co-authored-by: Patrick von Platen <[email protected]>
177
0
5,731
12
1
31
def test_userreport(django_cache, default_project, monkeypatch): event_id = uuid.uuid4().hex start_time = time.time() - 3600 mgr = EventManager(data={"event_id": event_id, "user": {"email": "[email protected]"}}) mgr.normalize() mgr.save(default_project.id) (evtuser,) = EventUser.objects.all() assert not evtuser.name assert not UserReport.objects.all() assert process_userreport( { "type": "user_report", "start_time": start_time, "payload": json.dumps( { "name": "Hans Gans", "event_id": event_id, "comments": "hello world", "email": "[email protected]", } ), "project_id": default_project.id, }, projects={default_project.id: default_project}, ) (report,) = UserReport.objects.all() assert report.comments == "hello world" (evtuser,) = EventUser.objects.all() assert evtuser.name == "Hans Gans" @pytest.mark.django_db
tests/sentry/ingest/ingest_consumer/test_ingest_consumer_processing.py
321
@pytest.mark.django_db
sentry
{ "docstring": "\n Test that user_report-type kafka messages end up in a user report being\n persisted. We additionally test some logic around upserting data in\n eventuser which is also present in the legacy endpoint.\n ", "language": "en", "n_whitespaces": 44, "n_words": 31, "vocab_size": 29 }
75
Python
56
8384b745769e08ffa2b10e9a546fce5d9d435da9
test_ingest_consumer_processing.py
86,390
29
180
test_userreport
https://github.com/getsentry/sentry.git
fix(tests): More django cache clears in ingest consumer tests (#39481)
301
1
18,110
14
1
4
def readinto(self, b): self._unsupported("readinto")
python3.10.4/Lib/_pyio.py
27
XX-Net
{ "docstring": "Read bytes into a pre-allocated bytes-like object b.\n\n Returns an int representing the number of bytes read (0 for EOF), or\n None if the object is set not to block and has no data to read.\n ", "language": "en", "n_whitespaces": 57, "n_words": 36, "vocab_size": 32 }
4
Python
4
8198943edd73a363c266633e1aa5b2a9e9c9f526
_pyio.py
219,903
2
14
readinto
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
18
0
55,891
8
1
19
def test_recovery_cluster_failure_resume_all(tmp_path, shutdown_only): ray.shutdown() tmp_path = tmp_path subprocess.check_call(["ray", "start", "--head"]) time.sleep(1) workflow_dir = tmp_path / "workflow" lock_file = tmp_path / "lock_file" lock = FileLock(lock_file) lock.acquire() proc = run_string_as_driver_nonblocking( f
python/ray/workflow/tests/test_recovery.py
132
proc = run_string_as_driver_nonblocking( f"""@ray.remote
ray
{ "docstring": "\nimport time\nimport ray\nfrom ray import workflow\nfrom filelock import FileLock\n\[email protected]", "language": "en", "n_whitespaces": 8, "n_words": 13, "vocab_size": 8 }
29
Python
21
ddd63aba77b0e4da699e358beba37cd907f7cb37
test_recovery.py
123,977
39
146
test_recovery_cluster_failure_resume_all
https://github.com/ray-project/ray.git
[workflow] Major refactoring - new async workflow executor (#25618) * major workflow refactoring
62
2
27,486
9
7
13
def calc(term): # This part is for reading and converting arithmetic terms. term = term.replace(" ", "") term = term.replace("^", "**") term = term.replace("=", "") term = term.replace("?", "") term = term.replace("%", "/100.00") term = term.replace("rad", "radians") term = term.replace("mod", "%") term = term.replace("aval", "abs") functions = [ "sin", "cos", "tan", "pow", "cosh", "sinh", "tanh", "sqrt", "pi", "radians", "e", ] # This part is for reading and converting function expressions. term = term.lower() for func in functions: if func in term: withmath = "math." + func term = term.replace(func, withmath) try: # here goes the actual evaluating. term = eval(term) # here goes to the error cases. except ZeroDivisionError: print("Can't divide by 0. Please try again.") except NameError: print("Invalid input. Please try again") except AttributeError: print("Please check usage method and try again.") except TypeError: print("please enter inputs of correct datatype ") return term
calculator.py
345
Python
{ "docstring": "\n input: term of type str\n output: returns the result of the computed term.\n purpose: This function is the actual calculator and the heart of the application\n ", "language": "en", "n_whitespaces": 39, "n_words": 26, "vocab_size": 20 }
143
Python
93
f0af0c43340763724f139fa68aa1e5a9ffe458b4
calculator.py
22,594
38
182
calc
https://github.com/geekcomputers/Python.git
refactor: clean code Signed-off-by: slowy07 <[email protected]>
359
0
4,373
12
3
24
def csc_median_axis_0(X): if not isinstance(X, sp.csc_matrix): raise TypeError("Expected matrix of CSC format, got %s" % X.format) indptr = X.indptr n_samples, n_features = X.shape median = np.zeros(n_features) for f_ind, (start, end) in enumerate(zip(indptr[:-1], indptr[1:])): # Prevent modifying X in place data = np.copy(X.data[start:end]) nz = n_samples - data.size median[f_ind] = _get_median(data, nz) return median
sklearn/utils/sparsefuncs.py
173
scikit-learn
{ "docstring": "Find the median across axis 0 of a CSC matrix.\n\n It is equivalent to doing np.median(X, axis=0).\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_features)\n Input data. It should be of CSC format.\n\n Returns\n -------\n median : ndarray of shape (n_features,)\n Median.\n ", "language": "en", "n_whitespaces": 82, "n_words": 44, "vocab_size": 36 }
53
Python
46
15599753b63f10748ffb374aacd37dbb37806a37
sparsefuncs.py
260,971
11
109
csc_median_axis_0
https://github.com/scikit-learn/scikit-learn.git
DOC ensures sklearn.utils.sparsefuncs.csc_median_axis_0 passes numpydoc validation (#24461)
109
0
76,593
12
3
14
def get_block_class(self, name=None, module_path=None): meta = type( "Meta", (self.base_block_class._meta_class,), { "icon": self.icon, }, ) cls = type( name or "%sChooserBlock" % self.model_name, (self.base_block_class,), { "target_model": self.model, "widget": self.widget_class(), "Meta": meta, }, ) if module_path: cls.__module__ = module_path return cls
wagtail/admin/viewsets/chooser.py
144
wagtail
{ "docstring": "\n Returns a StreamField ChooserBlock class using this chooser.\n\n :param name: Name to give to the class; defaults to the model name with \"ChooserBlock\" appended\n :param module_path: The dotted path of the module where the class can be imported from; used when\n deconstructing the block definition for migration files.\n ", "language": "en", "n_whitespaces": 88, "n_words": 48, "vocab_size": 40 }
39
Python
32
b4bc6818659ae785af39965569ed4bca51f0bf0d
chooser.py
79,167
20
90
get_block_class
https://github.com/wagtail/wagtail.git
Fix DocumentChooserBlock deconstruction for custom document models Fixes #8989. The previous fix #9004 failed for custom document models because ChooserViewset assigns an internal name for the ChooserBlock class based on the model name, and if this is anything other than Document it won't match the name DocumentChooserBlock that it's exposed under in wagtail.documents.blocks. Fix this by replacing the `block_class` property with a `get_block_class` method that lets us specify the class name. As a bonus, user code that defines chooser blocks no longer has to directly hack the `__module__` attribute.
247
0
16,885
12
11
28
def unapply(self, project_state, schema_editor, collect_sql=False): # Construct all the intermediate states we need for a reverse migration to_run = [] new_state = project_state # Phase 1 for operation in self.operations: # If it's irreversible, error out if not operation.reversible: raise IrreversibleError( "Operation %s in %s is not reversible" % (operation, self) ) # Preserve new state from previous run to not tamper the same state # over all operations new_state = new_state.clone() old_state = new_state.clone() operation.state_forwards(self.app_label, new_state) to_run.insert(0, (operation, old_state, new_state)) # Phase 2 for operation, to_state, from_state in to_run: if collect_sql: schema_editor.collected_sql.append("--") if not operation.reduces_to_sql: schema_editor.collected_sql.append( "-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:" ) schema_editor.collected_sql.append("-- %s" % operation.describe()) schema_editor.collected_sql.append("--") if not operation.reduces_to_sql: continue atomic_operation = operation.atomic or ( self.atomic and operation.atomic is not False ) if not schema_editor.atomic_migration and atomic_operation: # Force a transaction on a non-transactional-DDL backend or an # atomic operation inside a non-atomic migration. with atomic(schema_editor.connection.alias): operation.database_backwards( self.app_label, schema_editor, from_state, to_state ) else: # Normal behaviour operation.database_backwards( self.app_label, schema_editor, from_state, to_state ) return project_state
django/db/migrations/migration.py
351
django
{ "docstring": "\n Take a project_state representing all migrations prior to this one\n and a schema_editor for a live database and apply the migration\n in a reverse order.\n\n The backwards migration process consists of two phases:\n\n 1. The intermediate states from right before the first until right\n after the last operation inside this migration are preserved.\n 2. The operations are applied in reverse order using the states\n recorded in step 1.\n ", "language": "en", "n_whitespaces": 138, "n_words": 68, "vocab_size": 49 }
172
Python
116
9c19aff7c7561e3a82978a272ecdaad40dda5c00
migration.py
205,312
36
214
unapply
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
767
0
51,088
15
2
5
def do_filter(self) -> bool: return self._active and self._do_filter
plugins/extract/align/_base/processing.py
28
faceswap
{ "docstring": "bool: ``True`` if re-aligning is active and faces which failed the aligner filter test\n should not be re-aligned otherwise ``False``", "language": "en", "n_whitespaces": 26, "n_words": 20, "vocab_size": 20 }
8
Python
8
9e2026f6feba4fc1d60e0d985cbc1ba9c44a4848
processing.py
102,046
4
16
do_filter
https://github.com/deepfakes/faceswap.git
Extract: Implement re-align/2nd pass - implement configurable re-align function in extract - update locales + documentation - re-factor align._base and split to separate modules - move normalization method to plugin parent - bugfix: FAN use zeros for pre-processing crop - lint AlignedFilter
22
0
21,413
7
1
6
def vector_part(self): return Quaternion(0, self.b, self.c, self.d)
sympy/algebras/quaternion.py
36
sympy
{ "docstring": "\n Returns the vector part of the quaternion.\n\n Explanation\n ===========\n\n If q is a quaternion given by q = a + b*i + c*j + d*k where a, b, c and d\n are real numbers then the vector part of q is b*i + c*j + d*k.\n\n Returns\n =======\n\n Quaternion: representing vector part of the quaternion.\n\n Examples\n ========\n\n >>> from sympy.algebras.quaternion import Quaternion\n >>> q = Quaternion(1, 1, 1, 1)\n >>> q.vector_part()\n 0 + 1*i + 1*j + 1*k\n\n >>> q = Quaternion(4, 8, 13, 12)\n >>> q.vector_part()\n 0 + 8*i + 13*j + 12*k\n\n See Also\n ========\n https://en.wikipedia.org/wiki/Versor\n\n ", "language": "en", "n_whitespaces": 246, "n_words": 98, "vocab_size": 59 }
7
Python
7
72b9b01d0ede4543c1d3f10e08a79345c550254a
quaternion.py
196,561
2
23
vector_part
https://github.com/sympy/sympy.git
minor changes
21
0
47,998
8
4
12
def get_results_limit(request): config = get_config() try: limit = int(request.query_params.get('limit', config.PAGINATE_COUNT)) or config.MAX_PAGE_SIZE except ValueError: limit = config.PAGINATE_COUNT if config.MAX_PAGE_SIZE: limit = min(limit, config.MAX_PAGE_SIZE) return limit
netbox/ipam/api/views.py
99
netbox
{ "docstring": "\n Return the lesser of the specified limit (if any) and the configured MAX_PAGE_SIZE.\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 11 }
25
Python
19
de17a651e6f976e8b7c16b49d4e78f6a6988b870
views.py
265,579
9
59
get_results_limit
https://github.com/netbox-community/netbox.git
Closes #10043: Add support for 'limit' query parameter to available VLANs API endpoint
64
0
78,147
14
1
3
def parse_with_tabs(self) -> "ParserElement": self.keepTabs = True return self
pipenv/patched/notpip/_vendor/pyparsing/core.py
29
pipenv
{ "docstring": "\n Overrides default behavior to expand ``<TAB>`` s to spaces before parsing the input string.\n Must be called before ``parse_string`` when the input grammar contains elements that\n match ``<TAB>`` characters.\n ", "language": "en", "n_whitespaces": 58, "n_words": 29, "vocab_size": 24 }
9
Python
9
f3166e673fe8d40277b804d35d77dcdb760fc3b3
core.py
20,549
8
15
parse_with_tabs
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
30
0
3,421
7
1
11
def sql_flush(style, connection, reset_sequences=True, allow_cascade=False): tables = connection.introspection.django_table_names( only_existing=True, include_views=False ) return connection.ops.sql_flush( style, tables, reset_sequences=reset_sequences, allow_cascade=allow_cascade, )
django/core/management/sql.py
76
django
{ "docstring": "\n Return a list of the SQL statements used to flush the database.\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 11 }
18
Python
17
9c19aff7c7561e3a82978a272ecdaad40dda5c00
sql.py
204,710
10
52
sql_flush
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
68
0
50,845
9
2
4
def set_saved_weights_in_checkpoint_flag(config): for input_feature in config.get("input_features", []): input_feature["saved_weights_in_checkpoint"] = True
ludwig/utils/misc_utils.py
45
ludwig
{ "docstring": "Adds a flag to all input features indicating that the weights are saved in the checkpoint.\n\n Next time the model is loaded we will restore pre-trained encoder weights from ludwig model (and not load from cache\n or model hub).\n ", "language": "en", "n_whitespaces": 48, "n_words": 39, "vocab_size": 33 }
10
Python
10
7a2bfd65e97e81f02e4c8821e2a82a78d5f6ab00
misc_utils.py
6,987
3
25
set_saved_weights_in_checkpoint_flag
https://github.com/ludwig-ai/ludwig.git
Set saved_weights_in_checkpoint immediately after creating model. Also adds test. (#2131) * Set saved_weights_in_checkpoint immediately after creating model. Also adds integration test. * Set saved_weights_in_checkpoint config key on load. Co-authored-by: Daniel Treiman <[email protected]>
23
0
1,097
10
2
55
def test_condinst_maskhead_loss(self): s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'pad_shape': (s, s, 3), 'scale_factor': 1, }] condinst_bboxhead = CondInstBboxHead( num_classes=4, in_channels=1, feat_channels=1, stacked_convs=1, norm_cfg=None) mask_feature_head = _fake_mask_feature_head() condinst_maskhead = CondInstMaskHead( mask_feature_head=mask_feature_head, loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=True, eps=5e-6, loss_weight=1.0)) # Fcos head expects a multiple levels of features per image feats = [] for i in range(len(condinst_bboxhead.strides)): feats.append( torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3)))) feats = tuple(feats) cls_scores, bbox_preds, centernesses, param_preds =\ condinst_bboxhead.forward(feats) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s) _ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds, centernesses, param_preds, [gt_instances], img_metas) # When truth is empty then all mask loss # should be zero for random inputs positive_infos = condinst_bboxhead.get_positive_infos() mask_outs = condinst_maskhead.forward(feats, positive_infos) empty_gt_mask_losses = condinst_maskhead.loss_by_feat( *mask_outs, [gt_instances], img_metas, positive_infos) loss_mask = empty_gt_mask_losses['loss_mask'] self.assertEqual(loss_mask, 0, 'mask loss should be zero') # When truth is non-empty then all cls, box loss and centerness loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s) _ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds, centernesses, param_preds, [gt_instances], img_metas) positive_infos = condinst_bboxhead.get_positive_infos() mask_outs = condinst_maskhead.forward(feats, positive_infos) one_gt_mask_losses = condinst_maskhead.loss_by_feat( *mask_outs, [gt_instances], img_metas, positive_infos) loss_mask = one_gt_mask_losses['loss_mask'] self.assertGreater(loss_mask, 0, 'mask loss should be nonzero')
tests/test_models/test_dense_heads/test_condinst_head.py
641
mmdetection
{ "docstring": "Tests condinst maskhead loss when truth is empty and non-empty.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
228
Python
134
79c8295801acedee0cbdbf128a01b9fe162646b0
test_condinst_head.py
245,868
56
412
test_condinst_maskhead_loss
https://github.com/open-mmlab/mmdetection.git
[Feature]: Support Condinst (#9223) * [Feature]: support condinst for instance segmentation * update * update * update * fix config name and add test unit * fix squeeze error * add README and chang mask to poly
917
0
70,917
16
8
12
def get_updates_for(self, inputs): if inputs is None: # Requesting unconditional updates. return [u for u in self.updates if u._unconditional_update] # Requesting input-conditional updates. updates = [u for u in self.updates if not u._unconditional_update] inputs = tf.nest.flatten(inputs) reachable = tf_utils.get_reachable_from_inputs(inputs, updates) return [u for u in updates if u in reachable]
keras/engine/base_layer_v1.py
117
keras
{ "docstring": "Retrieves updates relevant to a specific set of inputs.\n\n Args:\n inputs: Input tensor or list/tuple of input tensors.\n\n Returns:\n List of update ops of the layer that depend on `inputs`.\n ", "language": "en", "n_whitespaces": 69, "n_words": 30, "vocab_size": 27 }
50
Python
27
84afc5193d38057e2e2badf9c889ea87d80d8fbf
base_layer_v1.py
270,918
7
75
get_updates_for
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
121
0
80,599
10
1
6
def _get_free_vram(self) -> List[float]: vram = [] self._log("debug", f"GPU VRAM free: {vram}") return vram
lib/gpu_stats/cpu.py
50
faceswap
{ "docstring": " Obtain the amount of RAM that is available, in Megabytes, for the running system.\n\n Returns\n -------\n list\n An empty list for CPU backends\n ", "language": "en", "n_whitespaces": 64, "n_words": 23, "vocab_size": 20 }
14
Python
13
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
cpu.py
100,568
11
26
_get_free_vram
https://github.com/deepfakes/faceswap.git
Refactor lib.gpu_stats (#1218) * inital gpu_stats refactor * Add dummy CPU Backend * Update Sphinx documentation
42
0
20,032
9
2
17
def _ensure_html_response(url, session): # type: (str, PipSession) -> None scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url) if scheme not in {'http', 'https'}: raise _NotHTTP() resp = session.head(url, allow_redirects=True) raise_for_status(resp) _ensure_html_header(resp)
.venv/lib/python3.8/site-packages/pip/_internal/index/collector.py
98
transferlearning
{ "docstring": "Send a HEAD request to the URL, and ensure the response contains HTML.\n\n Raises `_NotHTTP` if the URL is not available for a HEAD request, or\n `_NotHTML` if the content type is not text/html.\n ", "language": "en", "n_whitespaces": 43, "n_words": 34, "vocab_size": 26 }
30
Python
29
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
collector.py
60,716
7
60
_ensure_html_response
https://github.com/jindongwang/transferlearning.git
upd; format
58
0
12,258
9
15
50
def __new__(cls, name, patch, symbols=None, relations={}, **kwargs): if not isinstance(name, Str): name = Str(name) # canonicallize the symbols if symbols is None: names = kwargs.get('names', None) if names is None: symbols = Tuple( *[Symbol('%s_%s' % (name.name, i), real=True) for i in range(patch.dim)] ) else: sympy_deprecation_warning( f, deprecated_since_version="1.7", active_deprecations_target="deprecated-diffgeom-mutable", ) symbols = Tuple( *[Symbol(n, real=True) for n in names] ) else: syms = [] for s in symbols: if isinstance(s, Symbol): syms.append(Symbol(s.name, **s._assumptions.generator)) elif isinstance(s, str): sympy_deprecation_warning( f, deprecated_since_version="1.7", active_deprecations_target="deprecated-diffgeom-mutable", ) syms.append(Symbol(s, real=True)) symbols = Tuple(*syms) # canonicallize the relations rel_temp = {} for k,v in relations.items(): s1, s2 = k if not isinstance(s1, Str): s1 = Str(s1) if not isinstance(s2, Str): s2 = Str(s2) key = Tuple(s1, s2) # Old version used Lambda as a value. if isinstance(v, Lambda): v = (tuple(v.signature), tuple(v.expr)) else: v = (tuple(v[0]), tuple(v[1])) rel_temp[key] = v relations = Dict(rel_temp) # construct the object obj = super().__new__(cls, name, patch, symbols, relations) # Add deprecated attributes obj.transforms = _deprecated_dict( , {}) obj._names = [str(n) for n in symbols] obj.patch.coord_systems.append(obj) # deprecated obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated obj._dummy = Dummy() return obj
sympy/diffgeom/diffgeom.py
681
sympy
{ "docstring": "\nThe 'names' argument to CoordSystem is deprecated. Use 'symbols' instead. That\nis, replace\n\n CoordSystem(..., names={names})\n\nwith\n\n CoordSystem(..., symbols=[{', '.join([\"Symbol(\" + repr(n) + \", real=True)\" for n in names])}])\n \n\nPassing a string as the coordinate symbol name to CoordSystem is deprecated.\nPass a Symbol with the appropriate name and assumptions instead.\n\nThat is, replace {s} with Symbol({s!r}, real=True).\n \n CoordSystem.transforms is deprecated. The CoordSystem class is now\n immutable. Use the 'relations' keyword argument to the\n CoordSystems() constructor to specify relations.\n ", "language": "en", "n_whitespaces": 167, "n_words": 78, "vocab_size": 52 }
188
Python
109
f8674bfe4988332e7ce60ceb36b365ce9aff662a
diffgeom.py
197,094
73
399
__new__
https://github.com/sympy/sympy.git
Update the sympy.diffgeom mutability deprecations
923
0
48,334
21
2
12
def GetHeaderGuardCPPVariable(filename): # Restores original filename in case that cpplint is invoked from Emacs's # flymake. filename = re.sub(r'_flymake\.h$', '.h', filename) filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
code/deep/BJMMD/caffe/scripts/cpp_lint.py
145
transferlearning
{ "docstring": "Returns the CPP variable that should be used as a header guard.\n\n Args:\n filename: The name of a C++ header file.\n\n Returns:\n The CPP variable that should be used as a header guard in the\n named file.\n\n ", "language": "en", "n_whitespaces": 49, "n_words": 37, "vocab_size": 23 }
49
Python
38
cc4d0564756ca067516f71718a3d135996525909
cpp_lint.py
60,402
8
85
GetHeaderGuardCPPVariable
https://github.com/jindongwang/transferlearning.git
Balanced joint maximum mean discrepancy for deep transfer learning
61
0
12,130
13
4
18
def husl_palette(n_colors=6, h=.01, s=.9, l=.65, as_cmap=False): # noqa if as_cmap: n_colors = 256 hues = np.linspace(0, 1, int(n_colors) + 1)[:-1] hues += h hues %= 1 hues *= 359 s *= 99 l *= 99 # noqa palette = [_color_to_rgb((h_i, s, l), input="husl") for h_i in hues] if as_cmap: return mpl.colors.ListedColormap(palette, "hsl") else: return _ColorPalette(palette)
seaborn/palettes.py
173
seaborn
{ "docstring": "\n Return hues with constant lightness and saturation in the HUSL system.\n\n The hues are evenly sampled along a circular path. The resulting palette will be\n appropriate for categorical or cyclical data.\n\n The `h`, `l`, and `s` values should be between 0 and 1.\n\n This function is similar to :func:`hls_palette`, but it uses a nonlinear color\n space that is more perceptually uniform.\n\n Parameters\n ----------\n n_colors : int\n Number of colors in the palette.\n h : float\n The value of the first hue.\n l : float\n The lightness value.\n s : float\n The saturation intensity.\n as_cmap : bool\n If True, return a matplotlib colormap object.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n hls_palette : Make a palette using evenly spaced hues in the HSL system.\n\n Examples\n --------\n .. include:: ../docstrings/husl_palette.rst\n\n ", "language": "en", "n_whitespaces": 245, "n_words": 133, "vocab_size": 97 }
55
Python
42
e644793f0ac2b1be178425f20f529121f37f29de
palettes.py
42,252
14
117
husl_palette
https://github.com/mwaskom/seaborn.git
Convert color palette docstrings to notebooks (#3034) * Convert color palette docstrings to notebooks and rerun all with py310 kernel * Add v0.12.1 release notes to index * Improve failure mode when ipywidgets is not involved * Update palettes docstrings * Remove all other doctest-style examples * Remove doctest-oriented testing infrastructure * Mention in release notes * Skip colormap patch test on matplotlib's where it's not relevant * Use more robust approach to mpl backcompat
111
0
7,512
12
1
6
def aix_platform(): # type: () -> str vrmf, bd = _aix_bosmp64() return _aix_tag(_aix_vrtl(vrmf), bd) # extract vrtl from the BUILD_GNU_TYPE as an int
python3.10.4/Lib/_aix_support.py
42
XX-Net
{ "docstring": "\n AIX filesets are identified by four decimal values: V.R.M.F.\n V (version) and R (release) can be retreived using ``uname``\n Since 2007, starting with AIX 5.3 TL7, the M value has been\n included with the fileset bos.mp64 and represents the Technology\n Level (TL) of AIX. The F (Fix) value also increases, but is not\n relevant for comparing releases and binary compatibility.\n For binary compatibility the so-called builddate is needed.\n Again, the builddate of an AIX release is associated with bos.mp64.\n AIX ABI compatibility is described as guaranteed at: https://www.ibm.com/\\\n support/knowledgecenter/en/ssw_aix_72/install/binary_compatability.html\n\n For pep425 purposes the AIX platform tag becomes:\n \"aix-{:1x}{:1d}{:02d}-{:04d}-{}\".format(v, r, tl, builddate, bitsize)\n e.g., \"aix-6107-1415-32\" for AIX 6.1 TL7 bd 1415, 32-bit\n and, \"aix-6107-1415-64\" for AIX 6.1 TL7 bd 1415, 64-bit\n ", "language": "en", "n_whitespaces": 167, "n_words": 120, "vocab_size": 90 }
23
Python
22
8198943edd73a363c266633e1aa5b2a9e9c9f526
_aix_support.py
219,476
3
22
aix_platform
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
34
0
55,599
9
5
24
def __call__(self, *fields): orders = (covariant_order(e) + contravariant_order(e) for e in self.args) mul = 1/Mul(*(factorial(o) for o in orders)) perms = permutations(fields) perms_par = (Permutation( p).signature() for p in permutations(range(len(fields)))) tensor_prod = TensorProduct(*self.args) return mul*Add(*[tensor_prod(*p[0])*p[1] for p in zip(perms, perms_par)])
sympy/diffgeom/diffgeom.py
185
sympy
{ "docstring": "Apply on a list of vector_fields.\n The expression is rewritten internally in terms of tensor products and evaluated.", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 17 }
40
Python
29
8fc835bcd86ea080644783a363e47adca6dff3a7
diffgeom.py
200,252
8
117
__call__
https://github.com/sympy/sympy.git
Remove redundant list calls
100
0
49,566
15
26
51
def _check_ordering(cls): if cls._meta._ordering_clash: return [ checks.Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=cls, id="models.E021", ), ] if cls._meta.order_with_respect_to or not cls._meta.ordering: return [] if not isinstance(cls._meta.ordering, (list, tuple)): return [ checks.Error( "'ordering' must be a tuple or list (even if you want to order by only one field).", obj=cls, id="models.E014", ) ] errors = [] fields = cls._meta.ordering # Skip expressions and '?' fields. fields = (f for f in fields if isinstance(f, str) and f != "?") # Convert "-field" to "field". fields = ((f[1:] if f.startswith("-") else f) for f in fields) # Separate related fields and non-related fields. _fields = [] related_fields = [] for f in fields: if LOOKUP_SEP in f: related_fields.append(f) else: _fields.append(f) fields = _fields # Check related fields. for field in related_fields: _cls = cls fld = None for part in field.split(LOOKUP_SEP): try: # pk is an alias that won't be found by opts.get_field. if part == "pk": fld = _cls._meta.pk else: fld = _cls._meta.get_field(part) if fld.is_relation: _cls = fld.path_infos[-1].to_opts.model else: _cls = None except (FieldDoesNotExist, AttributeError): if fld is None or ( fld.get_transform(part) is None and fld.get_lookup(part) is None ): errors.append( checks.Error( "'ordering' refers to the nonexistent field, " "related field, or lookup '%s'." % field, obj=cls, id="models.E015", ) ) # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. fields = {f for f in fields if f != "pk"} # Check for invalid or nonexistent fields in ordering. invalid_fields = [] # Any field name that is not present in field_names does not exist. # Also, ordering by m2m fields is not allowed. opts = cls._meta valid_fields = set( chain.from_iterable( (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) for f in chain(opts.fields, opts.related_objects) ) ) invalid_fields.extend(fields - valid_fields) for invalid_field in invalid_fields: errors.append( checks.Error( "'ordering' refers to the nonexistent field, related " "field, or lookup '%s'." % invalid_field, obj=cls, id="models.E015", ) ) return errors
django/db/models/base.py
673
django
{ "docstring": "\n Check \"ordering\" option -- is it a list of strings and do all fields\n exist?\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
332
Python
161
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
205,420
78
414
_check_ordering
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
1,568
0
51,123
21
3
17
def cyclic_reduction(self, removed=False): word = self.copy() g = self.group.identity while not word.is_cyclically_reduced(): exp1 = abs(word.exponent_syllable(0)) exp2 = abs(word.exponent_syllable(-1)) exp = min(exp1, exp2) start = word[0]**abs(exp) end = word[-1]**abs(exp) word = start**-1*word*end**-1 g = g*start if removed: return word, g return word
sympy/combinatorics/free_groups.py
184
sympy
{ "docstring": "Return a cyclically reduced version of the word. Unlike\n `identity_cyclic_reduction`, this will not cyclically permute\n the reduced word - just remove the \"unreduced\" bits on either\n side of it. Compare the examples with those of\n `identity_cyclic_reduction`.\n\n When `removed` is `True`, return a tuple `(word, r)` where\n self `r` is such that before the reduction the word was either\n `r*word*r**-1`.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> F, x, y = free_group(\"x, y\")\n >>> (x**2*y**2*x**-1).cyclic_reduction()\n x*y**2\n >>> (x**-3*y**-1*x**5).cyclic_reduction()\n y**-1*x**2\n >>> (x**-3*y**-1*x**5).cyclic_reduction(removed=True)\n (y**-1*x**2, x**-3)\n\n ", "language": "en", "n_whitespaces": 209, "n_words": 83, "vocab_size": 66 }
41
Python
28
498015021131af4dbb07eb110e5badaba8250c7b
free_groups.py
196,069
14
113
cyclic_reduction
https://github.com/sympy/sympy.git
Updated import locations
171
0
47,569
13
1
5
def execute(): frappe.reload_doc("manufacturing", "doctype", "bom") frappe.reload_doc("manufacturing", "doctype", "bom_operation") frappe.db.sql( )
erpnext/patches/v13_0/set_operation_time_based_on_operating_cost.py
63
erpnext
{ "docstring": "\n\t\tUPDATE\n\t\t\t`tabBOM Operation`\n\t\tSET\n\t\t\ttime_in_mins = (operating_cost * 60) / hour_rate\n\t\tWHERE\n\t\t\ttime_in_mins = 0 AND operating_cost > 0\n\t\t\tAND hour_rate > 0 AND docstatus = 1 AND parenttype = \"BOM\"\n\t", "language": "en", "n_whitespaces": 24, "n_words": 31, "vocab_size": 20 }
10
Python
8
494bd9ef78313436f0424b918f200dab8fc7c20b
set_operation_time_based_on_operating_cost.py
66,794
14
32
execute
https://github.com/frappe/erpnext.git
style: format code with black
5
0
14,337
8
1
10
def predict(self, X): # Note: since `predict` does not accept semi-supervised labels as input, # `fit(X, y).predict(X) != fit(X, y).transduction_`. # Hence, `fit_predict` is not implemented. # See https://github.com/scikit-learn/scikit-learn/pull/24898 probas = self.predict_proba(X) return self.classes_[np.argmax(probas, axis=1)].ravel()
sklearn/semi_supervised/_label_propagation.py
62
scikit-learn
{ "docstring": "Perform inductive inference across the model.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data matrix.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n Predictions for input data.\n ", "language": "en", "n_whitespaces": 101, "n_words": 30, "vocab_size": 27 }
35
Python
31
2b34dfde2453743fa046312a49cc312a5586ea04
_label_propagation.py
261,698
3
36
predict
https://github.com/scikit-learn/scikit-learn.git
DOC Improve docs of BaseLabelPropagation.transduction_ (#24985)
84
0
76,941
11
1
7
def _array_perimeter(arr): # note we use Python's half-open ranges to avoid repeating # the corners forward = np.s_[0:-1] # [0 ... -1) backward = np.s_[-1:0:-1] # [-1 ... 0) return np.concatenate(( arr[0, forward], arr[forward, -1], arr[-1, backward], arr[backward, 0], ))
lib/matplotlib/cbook/__init__.py
104
matplotlib
{ "docstring": "\n Get the elements on the perimeter of *arr*.\n\n Parameters\n ----------\n arr : ndarray, shape (M, N)\n The input array.\n\n Returns\n -------\n ndarray, shape (2*(M - 1) + 2*(N - 1),)\n The elements on the perimeter of the array::\n\n [arr[0, 0], ..., arr[0, -1], ..., arr[-1, -1], ..., arr[-1, 0], ...]\n\n Examples\n --------\n >>> i, j = np.ogrid[:3, :4]\n >>> a = i*10 + j\n >>> a\n array([[ 0, 1, 2, 3],\n [10, 11, 12, 13],\n [20, 21, 22, 23]])\n >>> _array_perimeter(a)\n array([ 0, 1, 2, 3, 13, 23, 22, 21, 20, 10])\n ", "language": "en", "n_whitespaces": 191, "n_words": 92, "vocab_size": 64 }
40
Python
35
13438f842729df1b04445d44ea83f616d1b85567
__init__.py
110,055
9
69
_array_perimeter
https://github.com/matplotlib/matplotlib.git
Fix some minor docstring typos
95
0
23,900
10
3
27
def _hits_numpy(G, normalized=True): import numpy as np if len(G) == 0: return {}, {} adj_ary = nx.to_numpy_array(G) # Hub matrix H = adj_ary @ adj_ary.T e, ev = np.linalg.eig(H) h = ev[:, np.argmax(e)] # eigenvector corresponding to the maximum eigenvalue # Authority matrix A = adj_ary.T @ adj_ary e, ev = np.linalg.eig(A) a = ev[:, np.argmax(e)] # eigenvector corresponding to the maximum eigenvalue if normalized: h /= h.sum() a /= a.sum() else: h /= h.max() a /= a.max() hubs = dict(zip(G, map(float, h))) authorities = dict(zip(G, map(float, a))) return hubs, authorities
networkx/algorithms/link_analysis/hits_alg.py
276
networkx
{ "docstring": "Returns HITS hubs and authorities values for nodes.\n\n The HITS algorithm computes two numbers for a node.\n Authorities estimates the node value based on the incoming links.\n Hubs estimates the node value based on outgoing links.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n normalized : bool (default=True)\n Normalize results by the sum of all of the values.\n\n Returns\n -------\n (hubs,authorities) : two-tuple of dictionaries\n Two dictionaries keyed by node containing the hub and authority\n values.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n\n The `hubs` and `authorities` are given by the eigenvectors corresponding to the\n maximum eigenvalues of the hubs_matrix and the authority_matrix, respectively.\n\n The ``hubs`` and ``authority`` matrices are computed from the adjancency\n matrix:\n\n >>> adj_ary = nx.to_numpy_array(G)\n >>> hubs_matrix = adj_ary @ adj_ary.T\n >>> authority_matrix = adj_ary.T @ adj_ary\n\n `_hits_numpy` maps the eigenvector corresponding to the maximum eigenvalue\n of the respective matrices to the nodes in `G`:\n\n >>> from networkx.algorithms.link_analysis.hits_alg import _hits_numpy\n >>> hubs, authority = _hits_numpy(G)\n\n Notes\n -----\n The eigenvector calculation uses NumPy's interface to LAPACK.\n\n The HITS algorithm was designed for directed graphs but this\n algorithm does not check if the input graph is directed and will\n execute on undirected graphs.\n\n References\n ----------\n .. [1] A. Langville and C. Meyer,\n \"A survey of eigenvector methods of web information retrieval.\"\n http://citeseer.ist.psu.edu/713792.html\n .. [2] Jon Kleinberg,\n Authoritative sources in a hyperlinked environment\n Journal of the ACM 46 (5): 604-32, 1999.\n doi:10.1145/324133.324140.\n http://www.cs.cornell.edu/home/kleinber/auth.pdf.\n ", "language": "en", "n_whitespaces": 397, "n_words": 233, "vocab_size": 146 }
91
Python
53
e5f1edb82a379ceb6afcf421fa5f6b4cb43cfbaf
hits_alg.py
176,920
20
173
_hits_numpy
https://github.com/networkx/networkx.git
Make HITS numpy and scipy private functions (#5771) * Make HITS numpy and scipy private functions * fix examples with correct imports * remove functions from TOC
179
0
42,172
12
3
14
def _get_dependency_info() -> dict[str, JSONSerializable]: deps = [ "pandas", # required "numpy", "pytz", "dateutil", # install / build, "setuptools", "pip", "Cython", # test "pytest", "hypothesis", # docs "sphinx", # Other, need a min version "blosc", "feather", "xlsxwriter", "lxml.etree", "html5lib", "pymysql", "psycopg2", "jinja2", # Other, not imported. "IPython", "pandas_datareader", ] deps.extend(list(VERSIONS)) result: dict[str, JSONSerializable] = {} for modname in deps: mod = import_optional_dependency(modname, errors="ignore") result[modname] = get_version(mod) if mod else None return result
pandas/util/_print_versions.py
191
pandas
{ "docstring": "\n Returns dependency information as a JSON serializable dictionary.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
72
Python
61
44b660dc4a07f4fb507c31795ae63dca2e6e9440
_print_versions.py
166,568
32
106
_get_dependency_info
https://github.com/pandas-dev/pandas.git
fix pandas.show_versions() and remove pin for setuptools (#47096)
289
0
39,828
12
4
11
def push(self, actor): busy_actors = [] if self._future_to_actor.values(): _, busy_actors = zip(*self._future_to_actor.values()) if actor in self._idle_actors or actor in busy_actors: raise ValueError("Actor already belongs to current ActorPool") else: self._return_actor(actor)
python/ray/util/actor_pool.py
103
ray
{ "docstring": "Pushes a new actor into the current list of idle actors.\n\n Examples:\n >>> @ray.remote # doctest: +SKIP\n >>> class Actor: # doctest: +SKIP\n ... ... # doctest: +SKIP\n >>> a1, b1 = Actor.remote(), Actor.remote() # doctest: +SKIP\n >>> pool = ActorPool([a1]) # doctest: +SKIP\n >>> pool.submit(lambda a, v: a.double.remote(v), 1) # doctest: +SKIP\n >>> print(pool.get_next()) # doctest: +SKIP\n 2\n >>> pool2 = ActorPool([b1]) # doctest: +SKIP\n >>> pool2.push(pool.pop_idle()) # doctest: +SKIP\n ", "language": "en", "n_whitespaces": 199, "n_words": 71, "vocab_size": 37 }
29
Python
24
21f1e8a5c65241ef618df88885c38222550209dd
actor_pool.py
140,502
8
60
push
https://github.com/ray-project/ray.git
[Core] Use newly pushed actor for existing pending tasks (#24980) Newly pushed actors will never be used with existing pending submits, so the worker will not be used to speed up existing tasks. If _return_actor is called at the end of push instead, the actor is pushed to _idle_actors and immediately used if there are pending submits.
97
0
31,978
14
7
16
def concat(cls, axis, left_parts, right_parts): # TODO: Possible change is `isinstance(right_parts, list)` if type(right_parts) is list: # `np.array` with partitions of empty ModinFrame has a shape (0,) # but `np.concatenate` can concatenate arrays only if its shapes at # specified axis are equals, so filtering empty frames to avoid concat error right_parts = [o for o in right_parts if o.size != 0] to_concat = ( [left_parts] + right_parts if left_parts.size != 0 else right_parts ) result = ( np.concatenate(to_concat, axis=axis) if len(to_concat) else left_parts ) else: result = np.append(left_parts, right_parts, axis=axis) if axis == 0: return cls.rebalance_partitions(result) else: return result, None
modin/core/dataframe/pandas/partitioning/partition_manager.py
167
modin
{ "docstring": "\n Concatenate the blocks of partitions with another set of blocks.\n\n Parameters\n ----------\n axis : int\n The axis to concatenate to.\n left_parts : np.ndarray\n NumPy array of partitions to concatenate with.\n right_parts : np.ndarray or list\n NumPy array of partitions to be concatenated.\n\n Returns\n -------\n np.ndarray\n A new NumPy array with concatenated partitions.\n list[int] or None\n Row lengths if possible to compute it.\n\n Notes\n -----\n Assumes that the blocks are already the same shape on the\n dimension being concatenated. A ValueError will be thrown if this\n condition is not met.\n ", "language": "en", "n_whitespaces": 257, "n_words": 89, "vocab_size": 61 }
101
Python
76
eee5f435f68786778184c4886ff25d386fce0c4f
partition_manager.py
154,354
15
107
concat
https://github.com/modin-project/modin.git
PERF-#4892: Compute `lengths` in `rebalance_partitions` when possible (#4893) Signed-off-by: Myachev <[email protected]>
294
0
35,945
13
1
8
def _secs2timedelta(secs): msec = int(abs(secs - int(secs)) * 100) return f"{datetime.timedelta(seconds=int(secs))}.{msec:02d}"
paddlenlp/trainer/trainer_utils.py
70
PaddleNLP
{ "docstring": "\n convert seconds to hh:mm:ss.msec, msecs rounded to 2 decimals\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 8 }
11
Python
11
44a290e94d1becd1f09fddc3d873f9e19c9d6919
trainer_utils.py
323,178
3
25
_secs2timedelta
https://github.com/PaddlePaddle/PaddleNLP.git
[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761) * add some datasets for finetune. * support fine tune for all tastks. * add trainer prototype. * init verison for paddlenlp trainer. * refine trainer. * update for some details. * support multi-cards training evaluation. * support load from ckpt. * support for export inference model. * first version of trainer. * seq cls support clue. * trainer support for token classification and question answersing tasks. * fix as reviews. Co-authored-by: Zeyu Chen <[email protected]>
20
0
118,399
14
8
21
def _get_module_collection_mode(mode_dict, name, noarchive=False): # Default mode: collect into PYZ, unless noarchive is enabled. In that case, collect as pyc. mode_flags = _ModuleCollectionMode.PYC if noarchive else _ModuleCollectionMode.PYZ # If we have no collection mode settings, end here and now. if not mode_dict: return mode_flags # Search the parent modules/packages in top-down fashion, and take the last given setting. This ensures that # a setting given for the top-level package is recursively propagated to all its subpackages and submodules, # but also allows individual sub-modules to override the setting again. mode = 'pyz' name_parts = name.split('.') for i in range(len(name_parts)): modlevel = ".".join(name_parts[:i + 1]) modlevel_mode = mode_dict.get(modlevel, None) if modlevel_mode is not None: mode = modlevel_mode # Convert mode string to _ModuleCollectionMode flags try: mode_flags = _MODULE_COLLECTION_MODES[mode] except KeyError: raise ValueError(f"Unknown module collection mode for {name!r}: {mode!r}!") # noarchive flag being set means that we need to change _ModuleCollectionMode.PYZ into _ModuleCollectionMode.PYC if noarchive and _ModuleCollectionMode.PYZ in mode_flags: mode_flags ^= _ModuleCollectionMode.PYZ mode_flags |= _ModuleCollectionMode.PYC return mode_flags
PyInstaller/building/build_main.py
216
pyinstaller
{ "docstring": "\n Determine the module/package collection mode for the given module name, based on the provided collection\n mode settings dictionary.\n ", "language": "en", "n_whitespaces": 28, "n_words": 18, "vocab_size": 14 }
165
Python
106
6e1bfa2de254d8ae302f54dcea0cfefae4dd3585
build_main.py
263,853
19
122
_get_module_collection_mode
https://github.com/pyinstaller/pyinstaller.git
building: more module collection modes, consolidate noarchive codepath Map the module collection mode strings into (combinations of) integer flags that control try basic collection modes: - collect a pyc into PYZ archive - collect a pyc as a data file - collect a py as a data file Consolidate the `noarchive=True` codepath into module collection mode, where "collect a pyc into PYZ archive" flag is swapped for a "collect a pyc as a data file". The new collection mode also implicitly fixes couple of minor annoyances of the `noarchive=True` mode: - the user-writable paths containing python source code are not littered with pyc/pyo files anymore; all pycs are now gathered in build directory - the name of pycs in local build directory are not mangled anymore (was previously the case for pycs that could not be written to their original locations due to lack of permissions) - the pycs have code paths stripped from them, same as in noarchive=False mode
283
0
77,469
13
1
6
def adapt(self, data, batch_size=None, steps=None): super().adapt(data, batch_size=batch_size, steps=steps)
keras/layers/preprocessing/discretization.py
49
keras
{ "docstring": "Computes bin boundaries from quantiles in a input dataset.\n\n Calling `adapt()` on a `Discretization` layer is an alternative to passing\n in a `bin_boundaries` argument during construction. A `Discretization` layer\n should always be either adapted over a dataset or passed `bin_boundaries`.\n\n During `adapt()`, the layer will estimate the quantile boundaries of the\n input dataset. The number of quantiles can be controlled via the `num_bins`\n argument, and the error tolerance for quantile boundaries can be controlled\n via the `epsilon` argument.\n\n In order to make `Discretization` efficient in any distribution context, the\n computed boundaries are kept static with respect to any compiled `tf.Graph`s\n that call the layer. As a consequence, if the layer is adapted a second\n time, any models using the layer should be re-compiled. For more information\n see `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.\n\n `adapt()` is meant only as a single machine utility to compute layer state.\n To analyze a dataset that cannot fit on a single machine, see\n [Tensorflow Transform](https://www.tensorflow.org/tfx/transform/get_started)\n for a multi-machine, map-reduce solution.\n\n Arguments:\n data: The data to train on. It can be passed either as a\n `tf.data.Dataset`, or as a numpy array.\n batch_size: Integer or `None`.\n Number of samples per state update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` if your data is in the\n form of datasets, generators, or `keras.utils.Sequence` instances\n (since they generate batches).\n steps: Integer or `None`.\n Total number of steps (batches of samples)\n When training with input tensors such as\n TensorFlow data tensors, the default `None` is equal to\n the number of samples in your dataset divided by\n the batch size, or 1 if that cannot be determined. If x is a\n `tf.data` dataset, and 'steps' is None, the epoch will run until\n the input dataset is exhausted. When passing an infinitely\n repeating dataset, you must specify the `steps` argument. This\n argument is not supported with array inputs.\n ", "language": "en", "n_whitespaces": 653, "n_words": 305, "vocab_size": 175 }
8
Python
8
84afc5193d38057e2e2badf9c889ea87d80d8fbf
discretization.py
272,918
2
32
adapt
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
22
0
81,075
9
1
11
def calculate_bounds_for_mechanism(value_array, min_val_array, max_val_array): # TODO: Double check whether the iDPGaussianMechanism class squares its squared_l2_norm values!! worst_case_l2_norm = np.sqrt(np.sum(np.square(max_val_array - min_val_array))) * np.ones_like(value_array) l2_norm = np.sqrt(np.sum(np.square(value_array))) * np.ones_like(value_array) # print(l2_norm.shape, worst_case_l2_norm.shape) # print(l2_norm.shape) return l2_norm, worst_case_l2_norm
packages/syft/src/syft/core/adp/vectorized_publish.py
113
PySyft
{ "docstring": "Calculates the squared L2 norm values needed to create a Mechanism, and calculate privacy budget + spend If you calculate the privacy budget spend with the worst case bound, you can show this number to the D.S.\n If you calculate it with the regular value (the value computed below when public_only = False, you cannot show the \n privacy budget to the DS because this violates privacy.\n ", "language": "en", "n_whitespaces": 76, "n_words": 66, "vocab_size": 43 }
36
Python
30
56137bacda6fea5a0053c65eb6fd88688f5298cc
vectorized_publish.py
819
8
67
calculate_bounds_for_mechanism
https://github.com/OpenMined/PySyft.git
Implemented working vectorized_publish method into codebase Took 26 minutes
61
0
126
14
1
3
def __call__(self, f):
jina/serve/instrumentation/__init__.py
15
jina
{ "docstring": "function that gets called when this class is used as a decortor\n :param f: function that is decorated\n :return: wrapped function\n ", "language": "en", "n_whitespaces": 42, "n_words": 21, "vocab_size": 17 }
3
Python
3
71e422211fe10930d384f2bf679785d3c415f514
__init__.py
13,277
4
19
__call__
https://github.com/jina-ai/jina.git
feat: record existing Prometheus metrics into OpenTelemetry Histograms (#5275)
10
0
2,593
6
3
11
def _node_redundancy(G, v): n = len(G[v]) overlap = sum( 1 for (u, w) in combinations(G[v], 2) if (G[u].keys() & G[w].keys()) - {v} ) return (2 * overlap) / (n * (n - 1))
networkx/algorithms/bipartite/redundancy.py
121
networkx
{ "docstring": "Returns the redundancy of the node `v` in the bipartite graph `G`.\n\n If `G` is a graph with `n` nodes, the redundancy of a node is the ratio\n of the \"overlap\" of `v` to the maximum possible overlap of `v`\n according to its degree. The overlap of `v` is the number of pairs of\n neighbors that have mutual neighbors themselves, other than `v`.\n\n `v` must have at least two neighbors in `G`.\n\n ", "language": "en", "n_whitespaces": 90, "n_words": 72, "vocab_size": 41 }
33
Python
29
1f033118f2e0cca12c6e2375708dc92197b62da6
redundancy.py
177,486
6
79
_node_redundancy
https://github.com/networkx/networkx.git
Minor Python 2 cleanup (#6219) Python3 cleanup Use dict.keys() for set operations rather than explicitly creating sets.
55
0
42,392
15
8
22
def add_edges_from(self, ebunch_to_add, **attr): for e in ebunch_to_add: ne = len(e) if ne == 3: u, v, dd = e elif ne == 2: u, v = e dd = {} else: raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.") if u not in self._succ: if u is None: raise ValueError("None cannot be a node") self._succ[u] = self.adjlist_inner_dict_factory() self._pred[u] = self.adjlist_inner_dict_factory() self._node[u] = self.node_attr_dict_factory() if v not in self._succ: if v is None: raise ValueError("None cannot be a node") self._succ[v] = self.adjlist_inner_dict_factory() self._pred[v] = self.adjlist_inner_dict_factory() self._node[v] = self.node_attr_dict_factory() datadict = self._adj[u].get(v, self.edge_attr_dict_factory()) datadict.update(attr) datadict.update(dd) self._succ[u][v] = datadict self._pred[v][u] = datadict
networkx/classes/digraph.py
350
networkx
{ "docstring": "Add all the edges in ebunch_to_add.\n\n Parameters\n ----------\n ebunch_to_add : container of edges\n Each edge given in the container will be added to the\n graph. The edges must be given as 2-tuples (u, v) or\n 3-tuples (u, v, d) where d is a dictionary containing edge data.\n attr : keyword arguments, optional\n Edge data (or labels or objects) can be assigned using\n keyword arguments.\n\n See Also\n --------\n add_edge : add a single edge\n add_weighted_edges_from : convenient way to add weighted edges\n\n Notes\n -----\n Adding the same edge twice has no effect but any edge data\n will be updated when each duplicate edge is added.\n\n Edge attributes specified in an ebunch take precedence over\n attributes specified via keyword arguments.\n\n When adding edges from an iterator over the graph you are changing,\n a `RuntimeError` can be raised with message:\n `RuntimeError: dictionary changed size during iteration`. This\n happens when the graph's underlying dictionary is modified during\n iteration. To avoid this error, evaluate the iterator into a separate\n object, e.g. by using `list(iterator_of_edges)`, and pass this\n object to `G.add_edges_from`.\n\n Examples\n --------\n >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc\n >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples\n >>> e = zip(range(0, 3), range(1, 4))\n >>> G.add_edges_from(e) # Add the path graph 0-1-2-3\n\n Associate data to edges\n\n >>> G.add_edges_from([(1, 2), (2, 3)], weight=3)\n >>> G.add_edges_from([(3, 4), (1, 4)], label=\"WN2898\")\n\n Evaluate an iterator over a graph if using it to modify the same graph\n\n >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)])\n >>> # Grow graph by one new node, adding edges to all existing nodes.\n >>> # wrong way - will raise RuntimeError\n >>> # G.add_edges_from(((5, n) for n in G.nodes))\n >>> # right way - note that there will be no self-edge for node 5\n >>> G.add_edges_from(list((5, n) for n in G.nodes))\n ", "language": "en", "n_whitespaces": 629, "n_words": 305, "vocab_size": 185 }
102
Python
55
979d54acba7c3d372c93d44c6c149700608ce8b0
digraph.py
177,528
27
217
add_edges_from
https://github.com/networkx/networkx.git
doc: update documentation when providing an iterator over current graph to add/remove_edges_from. (#6268) * doc for add_edges_from * doc for digraph * doc for multigraph * multigraph.add_nodes_from returns keylist * update docs for graph - edges * doc update: graph.add_nodes_from * doc update: graph.remove_nodes_from * doc update: graph.add_edges_from * doc update: rewording for graph.add_edges_from * doc update: graph.add_weighted_edges_from rewording * doc update: digraph updated as graph * doc update: digraph minor sync * doc update: multigraph same as graph * Update graph.py * Update digraph.py * Update multigraph.py
455
0
42,422
14
5
14
def validate_house_rent_dates(doc): if not doc.rented_to_date or not doc.rented_from_date: frappe.throw(_("House rented dates required for exemption calculation")) if date_diff(doc.rented_to_date, doc.rented_from_date) < 14: frappe.throw(_("House rented dates should be atleast 15 days apart")) proofs = frappe.db.sql( , { "employee": doc.employee, "payroll_period": doc.payroll_period, "from_date": doc.rented_from_date, "to_date": doc.rented_to_date, }, ) if proofs: frappe.throw(_("House rent paid days overlapping with {0}").format(proofs[0][0]))
erpnext/regional/india/utils.py
183
erpnext
{ "docstring": "\n select name\n from `tabEmployee Tax Exemption Proof Submission`\n where\n docstatus=1 and employee=%(employee)s and payroll_period=%(payroll_period)s\n and (rented_from_date between %(from_date)s and %(to_date)s or rented_to_date between %(from_date)s and %(to_date)s)\n ", "language": "en", "n_whitespaces": 73, "n_words": 26, "vocab_size": 20 }
53
Python
45
12b7e14fded587abc0f7821e3c3dfbea64498a7d
utils.py
68,939
22
109
validate_house_rent_dates
https://github.com/frappe/erpnext.git
chore: keep back code to be a part of other apps / to be ported later
37
0
14,948
14
3
27
def customer_query(doctype, txt, searchfield, start, page_len, filters): conditions = [] cust_master_name = frappe.defaults.get_user_default("cust_master_name") if cust_master_name == "Customer Name": fields = ["name", "customer_group", "territory"] else: fields = ["name", "customer_name", "customer_group", "territory"] fields = get_fields("Customer", fields) searchfields = frappe.get_meta("Customer").get_search_fields() searchfields = " or ".join(field + " like %(txt)s" for field in searchfields) return frappe.db.sql( .format( **{ "fields": ", ".join(fields), "scond": searchfields, "mcond": get_match_cond(doctype), "fcond": get_filters_cond(doctype, filters, conditions).replace("%", "%%"), } ), {"txt": "%%%s%%" % txt, "_txt": txt.replace("%", ""), "start": start, "page_len": page_len}, ) # searches for supplier @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
erpnext/controllers/queries.py
322
@frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
erpnext
{ "docstring": "select {fields} from `tabCustomer`\n\t\twhere docstatus < 2\n\t\t\tand ({scond}) and disabled=0\n\t\t\t{fcond} {mcond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tif(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),\n\t\t\tidx desc,\n\t\t\tname, customer_name\n\t\tlimit %(start)s, %(page_len)s", "language": "en", "n_whitespaces": 23, "n_words": 33, "vocab_size": 27 }
86
Python
69
494bd9ef78313436f0424b918f200dab8fc7c20b
queries.py
65,661
30
172
customer_query
https://github.com/frappe/erpnext.git
style: format code with black
62
1
13,980
16
4
10
def get_default_contact(out, name): contact_persons = frappe.db.sql( , (name), as_dict=1, ) if contact_persons: for out.contact_person in contact_persons: if out.contact_person.is_primary_contact: return out.contact_person out.contact_person = contact_persons[0] return out.contact_person
erpnext/stock/doctype/delivery_trip/delivery_trip.py
90
erpnext
{ "docstring": "\n\t\t\tSELECT parent,\n\t\t\t\t(SELECT is_primary_contact FROM tabContact c WHERE c.name = dl.parent) AS is_primary_contact\n\t\t\tFROM\n\t\t\t\t`tabDynamic Link` dl\n\t\t\tWHERE\n\t\t\t\tdl.link_doctype=\"Customer\"\n\t\t\t\tAND dl.link_name=%s\n\t\t\t\tAND dl.parenttype = \"Contact\"\n\t\t", "language": "en", "n_whitespaces": 17, "n_words": 25, "vocab_size": 20 }
25
Python
18
494bd9ef78313436f0424b918f200dab8fc7c20b
delivery_trip.py
67,609
21
59
get_default_contact
https://github.com/frappe/erpnext.git
style: format code with black
13
0
14,575
12
10
34
def distance(hass, *args): locations = [] to_process = list(args) while to_process: value = to_process.pop(0) if isinstance(value, str) and not valid_entity_id(value): point_state = None else: point_state = _resolve_state(hass, value) if point_state is None: # We expect this and next value to be lat&lng if not to_process: _LOGGER.warning( "Distance:Expected latitude and longitude, got %s", value ) return None value_2 = to_process.pop(0) latitude = convert(value, float) longitude = convert(value_2, float) if latitude is None or longitude is None: _LOGGER.warning( "Distance:Unable to process latitude and longitude: %s, %s", value, value_2, ) return None else: if not loc_helper.has_location(point_state): _LOGGER.warning( "Distance:State does not contain valid location: %s", point_state ) return None latitude = point_state.attributes.get(ATTR_LATITUDE) longitude = point_state.attributes.get(ATTR_LONGITUDE) locations.append((latitude, longitude)) if len(locations) == 1: return hass.config.distance(*locations[0]) return hass.config.units.length( loc_util.distance(*locations[0] + locations[1]), UnitOfLength.METERS )
homeassistant/helpers/template.py
358
core
{ "docstring": "Calculate distance.\n\n Will calculate distance from home to a point or between points.\n Points can be passed in using state objects or lat/lng coordinates.\n ", "language": "en", "n_whitespaces": 33, "n_words": 24, "vocab_size": 23 }
126
Python
72
9f7fd8956f22bd873d14ae89460cdffe6ef6f85d
template.py
297,243
39
223
distance
https://github.com/home-assistant/core.git
Use new unit enums in helpers (#83387)
554
0
96,212
15
1
14
def test_filter_with_failing_queryset(self): modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site) request = self.request_factory.get("/", {}) request.user = self.alfred with self.assertRaises(ZeroDivisionError): modeladmin.get_changelist_instance(request)
tests/admin_filters/tests.py
83
django
{ "docstring": "\n When a filter's queryset method fails, it fails loudly and\n the corresponding exception doesn't get swallowed (#17828).\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 17 }
16
Python
14
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,127
6
48
test_filter_with_failing_queryset
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
62
0
51,872
9
1
25
def test_background_populate_rooms_creator_column(self) -> None: # Insert a room without the creator room_id = self._generate_room() self.get_success( self.store.db_pool.simple_update( table="rooms", keyvalues={"room_id": room_id}, updatevalues={"creator": None}, desc="test", ) ) # Make sure the test is starting out with a room without a creator room_creator_before = self.get_success( self.store.db_pool.simple_select_one_onecol( table="rooms", keyvalues={"room_id": room_id}, retcol="creator", allow_none=True, ) ) self.assertEqual(room_creator_before, None) # Insert and run the background update. self.get_success( self.store.db_pool.simple_insert( "background_updates", { "update_name": _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN, "progress_json": "{}", }, ) ) # ... and tell the DataStore that it hasn't finished all updates yet self.store.db_pool.updates._all_done = False # Now let's actually drive the updates to completion self.wait_for_background_updates() # Make sure the background update filled in the room creator room_creator_after = self.get_success( self.store.db_pool.simple_select_one_onecol( table="rooms", keyvalues={"room_id": room_id}, retcol="creator", allow_none=True, ) ) self.assertEqual(room_creator_after, self.user_id)
tests/storage/databases/main/test_room.py
316
synapse
{ "docstring": "Test that the background update to populate the rooms creator column\n works properly.\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 12 }
119
Python
73
3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b
test_room.py
250,085
42
188
test_background_populate_rooms_creator_column
https://github.com/matrix-org/synapse.git
Require types in tests.storage. (#14646) Adds missing type hints to `tests.storage` package and does not allow untyped definitions.
610
0
73,263
14
1
8
def with_content_json(self, content): obj = super().with_content_json(content) # Ensure other values that are meaningful for the object as a whole (rather than # to a specific revision) are preserved obj.locked = self.locked obj.locked_at = self.locked_at obj.locked_by = self.locked_by return obj
wagtail/models/__init__.py
70
wagtail
{ "docstring": "\n Returns a new version of the object with field values updated to reflect changes\n in the provided ``content`` (which usually comes from a previously-saved revision).\n\n Certain field values are preserved in order to prevent errors if the returned\n object is saved, such as ``id``. The following field values are also preserved,\n as they are considered to be meaningful to the object as a whole, rather than\n to a specific revision:\n\n * ``locked``\n * ``locked_at``\n * ``locked_by``\n ", "language": "en", "n_whitespaces": 147, "n_words": 76, "vocab_size": 53 }
39
Python
32
1cec004d97f0cfa3d4a8a1e99eea42ae7f651993
__init__.py
79,041
6
41
with_content_json
https://github.com/wagtail/wagtail.git
Extract page locking fields into a mixin
95
0
16,859
10
1
2
def hidesurface(self): return self["hidesurface"]
packages/python/plotly/plotly/graph_objs/_surface.py
22
plotly.py
{ "docstring": "\n Determines whether or not a surface is drawn. For example, set\n `hidesurface` to False `contours.x.show` to True and\n `contours.y.show` to True to draw a wire frame plot.\n\n The 'hidesurface' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 107, "n_words": 43, "vocab_size": 35 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_surface.py
228,377
2
11
hidesurface
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
60,050
7
3
15
def _total_vram_required(self) -> float: vrams = self._vram_per_phase vram_required_count = sum(1 for p in vrams.values() if p > 0) logger.debug("VRAM requirements: %s. Plugins requiring VRAM: %s", vrams, vram_required_count) retval = (sum(vrams.values()) * self._parallel_scaling.get(vram_required_count, self._scaling_fallback)) logger.debug("Total VRAM required: %s", retval) return retval
plugins/extract/pipeline.py
124
faceswap
{ "docstring": " Return vram required for all phases plus the buffer ", "language": "en", "n_whitespaces": 10, "n_words": 9, "vocab_size": 9 }
40
Python
35
13cfb3f39e72e9ca181f173b7b3db2a048db0d08
pipeline.py
101,466
10
76
_total_vram_required
https://github.com/deepfakes/faceswap.git
extract: Add batch processing mode
126
0
20,879
12
1
14
def test_check_perms_grant_test_true(test_file): expected = { "comment": "", "changes": {"grant_perms": {"Users": {"permissions": "read_execute"}}}, "name": str(test_file), "result": None, } with patch.dict(win_dacl.__opts__, {"test": True}): result = win_file.check_perms( path=str(test_file), grant_perms={"Users": {"perms": "read_execute"}}, inheritance=None, ) assert result == expected
tests/pytests/functional/modules/win_file/test_check_perms.py
165
salt
{ "docstring": "\n Test setting grant perms on a file with test=True\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
34
Python
31
5550d1823e9cb571740ae9e57b25424cfe6a919e
test_check_perms.py
216,417
14
91
test_check_perms_grant_test_true
https://github.com/saltstack/salt.git
Add changelong
128
0
54,581
16
1
2
def ayref(self): return self["ayref"]
packages/python/plotly/plotly/graph_objs/layout/_annotation.py
22
plotly.py
{ "docstring": "\n Indicates in what coordinates the tail of the annotation\n (ax,ay) is specified. If set to a ay axis id (e.g. \"ay\" or\n \"ay2\"), the `ay` position refers to a ay coordinate. If set to\n \"paper\", the `ay` position refers to the distance from the\n bottom of the plotting area in normalized coordinates where 0\n (1) corresponds to the bottom (top). If set to a ay axis ID\n followed by \"domain\" (separated by a space), the position\n behaves like for \"paper\", but refers to the distance in\n fractions of the domain length from the bottom of the domain of\n that axis: e.g., *ay2 domain* refers to the domain of the\n second ay axis and a ay position of 0.5 refers to the point\n between the bottom and the top of the domain of the second ay\n axis. In order for absolute positioning of the arrow to work,\n \"ayref\" must be exactly the same as \"yref\", otherwise \"ayref\"\n will revert to \"pixel\" (explained next). For relative\n positioning, \"ayref\" can be set to \"pixel\", in which case the\n \"ay\" value is specified in pixels relative to \"y\". Absolute\n positioning is useful for trendline annotations which should\n continue to indicate the correct trend when zoomed. Relative\n positioning is useful for specifying the text offset for an\n annotated point.\n\n The 'ayref' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['pixel']\n - A string that matches one of the following regular expressions:\n ['^y([2-9]|[1-9][0-9]+)?( domain)?$']\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 481, "n_words": 249, "vocab_size": 133 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_annotation.py
230,876
2
11
ayref
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
62,549
7
1
5
def serialize(learning_rate_schedule): return generic_utils.serialize_keras_object(learning_rate_schedule) @keras_export("keras.optimizers.schedules.deserialize")
keras/optimizers/schedules/learning_rate_schedule.py
35
@keras_export("keras.optimizers.schedules.deserialize")
keras
{ "docstring": "Serializes a `LearningRateSchedule` into a JSON-compatible representation.\n\n Args:\n learning_rate_schedule: The `LearningRateSchedule` object to serialize.\n\n Returns:\n A JSON-serializable dict representing the object's config.\n\n Example:\n\n >>> lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n ... 0.1, decay_steps=100000, decay_rate=0.96, staircase=True)\n >>> tf.keras.optimizers.schedules.serialize(lr_schedule)\n {'class_name': 'ExponentialDecay', 'config': {...}}\n ", "language": "en", "n_whitespaces": 74, "n_words": 38, "vocab_size": 35 }
5
Python
5
84afc5193d38057e2e2badf9c889ea87d80d8fbf
learning_rate_schedule.py
275,644
2
13
serialize
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
10
1
81,437
7
1
16
def get_arguments(): parser = argparse.ArgumentParser(description="DML_CSR Network") parser.add_argument("--batch-size", type=int, default=1, help="Number of images sent to the network in one step.") parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY, help="Path to the directory containing the PASCAL VOC dataset.") parser.add_argument("--out-dir", type=str, default=DATA_DIRECTORY, help="Path to the directory containing the PASCAL VOC dataset.") parser.add_argument("--dataset", type=str, default='val', help="Path to the file listing the images in the dataset.") parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL, help="The index of the label to ignore during the training.") parser.add_argument("--num-classes", type=int, default=NUM_CLASSES, help="Number of classes to predict (including background).") parser.add_argument("--restore-from", type=str, help="Where restore model parameters from.") parser.add_argument("--gpu", type=str, default='7', help="choose gpu device.") parser.add_argument("--input-size", type=str, default=INPUT_SIZE, help="Comma-separated string with height and width of images.") parser.add_argument("--local_rank", type=int, default=0, help="choose gpu numbers") parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend') parser.add_argument("--model_type", type=int, default=0, help="choose model type") return parser.parse_args()
parsing/dml_csr/test.py
389
insightface
{ "docstring": "Parse all the arguments provided from the CLI.\n \n Returns:\n A list of parsed arguments.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 13 }
122
Python
78
af2d71fab71bfd819daef263f4988f36499c0af2
test.py
9,173
27
233
get_arguments
https://github.com/deepinsight/insightface.git
Create test.py
445
0
1,570
10
2
4
def is_link_local(self): return (self.network_address.is_link_local and self.broadcast_address.is_link_local)
python3.10.4/Lib/ipaddress.py
34
XX-Net
{ "docstring": "Test if the address is reserved for link-local.\n\n Returns:\n A boolean, True if the address is reserved per RFC 4291.\n\n ", "language": "en", "n_whitespaces": 45, "n_words": 20, "vocab_size": 15 }
6
Python
6
8198943edd73a363c266633e1aa5b2a9e9c9f526
ipaddress.py
218,534
3
20
is_link_local
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
35
0
55,371
9
1
2
def no_style(): return make_style("nocolor")
django/core/management/color.py
22
django
{ "docstring": "\n Return a Style object with no color scheme.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
4
Python
4
9c19aff7c7561e3a82978a272ecdaad40dda5c00
color.py
204,611
2
10
no_style
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
10
0
50,814
8
1
14
def _add_speaker_embedding(x, speaker_embedding): # Save the dimensions as human-readable names batch_size = x.size()[0] text_num_chars = x.size()[1] # Start by making a copy of each speaker embedding to match the input text length # The output of this has size (batch_size, text_num_chars * speaker_embedding_size) speaker_embedding_size = speaker_embedding.size()[1] e = speaker_embedding.repeat_interleave(text_num_chars, dim=1) # Reshape it and transpose e = e.reshape(batch_size, speaker_embedding_size, text_num_chars) e = e.transpose(1, 2) # Concatenate the tiled speaker embedding with the encoder output x = torch.cat((x, e), 2) return x
synthesizer/models/tacotron.py
141
MockingBird
{ "docstring": "Add speaker embedding\n This concats the speaker embedding for each char in the encoder output\n Args:\n x (3D tensor with size `[batch_size, text_num_chars, encoder_dims]`): the encoder output\n speaker_embedding (2D tensor `[batch_size, speaker_embedding_size]`): the speaker embedding\n\n Returns:\n 3D tensor with size `[batch_size, text_num_chars, encoder_dims+speaker_embedding_size]`\n ", "language": "en", "n_whitespaces": 108, "n_words": 43, "vocab_size": 27 }
81
Python
59
f17e3b04e1049528e13ae340db3ac8212c56a35d
tacotron.py
161,314
9
88
_add_speaker_embedding
https://github.com/babysor/MockingBird.git
Refactor (#650) * Refactor model * Add description for * update launch json
187
0
38,961
9
1
2
def locationssrc(self): return self["locationssrc"]
packages/python/plotly/plotly/graph_objs/_choropleth.py
22
plotly.py
{ "docstring": "\n Sets the source reference on Chart Studio Cloud for\n `locations`.\n\n The 'locationssrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 84, "n_words": 27, "vocab_size": 25 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_choropleth.py
226,442
2
11
locationssrc
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,115
7