ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
id
int64
70
338k
n_whitespaces
int64
3
14k
path
stringlengths
8
134
n_words
int64
4
4.82k
n_identifiers
int64
1
131
random_cut
stringlengths
16
15.8k
commit_message
stringlengths
2
15.3k
fun_name
stringlengths
1
84
commit_id
stringlengths
40
40
repo
stringlengths
3
28
file_name
stringlengths
5
79
ast_levels
int64
6
31
nloc
int64
1
548
url
stringlengths
31
59
complexity
int64
1
66
token_counts
int64
6
2.13k
n_ast_errors
int64
0
28
vocab_size
int64
4
1.11k
n_ast_nodes
int64
15
19.2k
language
stringclasses
1 value
documentation
dict
code
stringlengths
101
62.2k
56,295
221,252
39
python3.10.4/Lib/calendar.py
18
10
def monthdays2calendar(self, year, month): days = list(self.itermonthdays2(year, month)) r
add python 3.10.4 for windows
monthdays2calendar
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
calendar.py
11
3
https://github.com/XX-net/XX-Net.git
2
48
0
18
72
Python
{ "docstring": "\n Return a matrix representing a month's calendar.\n Each row represents a week; week entries are\n (day number, weekday number) tuples. Day numbers outside this month\n are zero.\n ", "language": "en", "n_whitespaces": 63, "n_words": 27, "vocab_size": 24 }
def monthdays2calendar(self, year, month): days = list(self.itermonthdays2(year, month)) return [ days[i:i+7] for i in range(0, len(days), 7) ]
28,163
126,397
55
python/ray/serve/drivers.py
23
8
async def predict_with_route(self, route_path, *args, **kwargs): if route_path not in self.dags: raise RayServeExc
[Serve] Support Multiple DAG Entrypoints in DAGDriver (#26573)
predict_with_route
410fe1b5ec9e798d6e7ffbb5844e258d08e323b3
ray
drivers.py
11
4
https://github.com/ray-project/ray.git
2
45
0
21
76
Python
{ "docstring": "Perform inference directly without HTTP for multi dags.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
async def predict_with_route(self, route_path, *args, **kwargs): if route_path not in self.dags: raise RayServeException(f"{route_path} does not exist in dags routes") return await self.dags[route_path].remote(*args, **kwargs)
28,813
128,796
133
python/ray/tune/experiment/trial.py
23
9
def should_recover(self): return ( self.num_failures < self.max_failures or self.max_failures < 0 or ( self.num_failures == self.max_failures and self.num_restore_failures < int(os.environ.get("TUNE_RESTORE_RETRY_NUM", 0)) )
[tune] Add retry logic for restoring trials. (#29086) This is an advanced setting. Consider the following scenario: Due to scheduling glitches, sometimes a restoring trial may be scheduled onto a dying node. By setting this env var to a positive number, the trial can be restored several times and hopefully one of the times it will not be put on a dying node. This retry behavior won't increment the per trial failure number, which is compared against max_failures. Signed-off-by: xwjiang2010 <[email protected]> Signed-off-by: xwjiang2010 <[email protected]>
should_recover
f1882f90cf2d91f5d802b7dffd41db5e306d6e6c
ray
trial.py
15
10
https://github.com/ray-project/ray.git
4
50
0
15
81
Python
{ "docstring": "Returns whether the trial qualifies for retrying.\n\n This is if the trial has not failed more than max_failures. Note this\n may return true even when there is no checkpoint, either because\n `self.checkpoint_freq` is `0` or because the trial failed before\n a checkpoint has been made.\n ", "language": "en", "n_whitespaces": 80, "n_words": 45, "vocab_size": 36 }
def should_recover(self): return ( self.num_failures < self.max_failures or self.max_failures < 0 or ( self.num_failures == self.max_failures and self.num_restore_failures < int(os.environ.get("TUNE_RESTORE_RETRY_NUM", 0)) ) )
16,239
74,216
75
wagtail/core/tests/test_locale_model.py
15
14
def test_change_root_page_locale_on_locale_deletion(self): # change 'real' pages first Page.objects.filter(depth__gt=1).update( locale=Locale.objects.get(language_code="fr") ) self.assertEqual(Page.get_first_root_node().locale.language_code, "en") Locale.objects.get(language_code="en").delete() self.assertEqual(Page.get_first_root_node().locale.language_code, "fr")
Reformat with black
test_change_root_page_locale_on_locale_deletion
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_locale_model.py
12
7
https://github.com/wagtail/wagtail.git
1
78
0
14
136
Python
{ "docstring": "\n On deleting the locale used for the root page (but no 'real' pages), the\n root page should be reassigned to a new locale (the default one, if possible)\n ", "language": "en", "n_whitespaces": 50, "n_words": 28, "vocab_size": 23 }
def test_change_root_page_locale_on_locale_deletion(self): # change 'real' pages first Page.objects.filter(depth__gt=1).update( locale=Locale.objects.get(language_code="fr") ) self.assertEqual(Page.get_first_root_node().locale.language_code, "en") Locale.objects.get(language_code="en").delete() self.assertEqual(Page.get_first_root_node().locale.language_code, "fr")
3,880
21,496
1,320
pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py
468
47
def _proc_pax(self, tarfile): # Read the header information. buf = tarfile.fileobj.read(self._block(self.size)) # A pax header stores supplemental information for either # the following file (extended) or all following files # (global). if self.type == XGLTYPE: pax_headers = tarfile.pax_headers else: pax_headers = tarfile.pax_headers.copy() # Check if the pax header contains a hdrcharset field. This tells us # the encoding of the path, linkpath, uname and gname fields. Normally, # these fields are UTF-8 encoded but since POSIX.1-2008 tar # implementations are allowed to store them as raw binary strings if # the translation to UTF-8 fails. match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) if match is not None: pax_headers["hdrcharset"] = match.group(1).decode("utf8") # For the time being, we don't care about anything other than "BINARY". # The only other value that is currently allowed by the standard is # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. hdrcharset = pax_headers.get("hdrcharset") if hdrcharset == "BINARY": encoding = tarfile.encoding else: encoding = "utf8" # Parse pax header information. A record looks like that: # "%d %s=%s\n" % (length, keyword, value). length is the size # of the complete record including the length field itself and # the newline. keyword and value are both UTF-8 encoded strings. regex = re.compile(br"(\d+) ([^=]+)=") pos = 0 while True: match = regex.match(buf, pos) if not match: break length, keyword = match.groups() length = int(length) value = buf[match.end(2)
Vendor in pip 22.1.2
_proc_pax
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
pipenv
tarfile.py
15
52
https://github.com/pypa/pipenv.git
16
387
0
249
669
Python
{ "docstring": "Process an extended or global header as described in\n POSIX.1-2008.\n ", "language": "en", "n_whitespaces": 27, "n_words": 10, "vocab_size": 10 }
def _proc_pax(self, tarfile): # Read the header information. buf = tarfile.fileobj.read(self._block(self.size)) # A pax header stores supplemental information for either # the following file (extended) or all following files # (global). if self.type == XGLTYPE: pax_headers = tarfile.pax_headers else: pax_headers = tarfile.pax_headers.copy() # Check if the pax header contains a hdrcharset field. This tells us # the encoding of the path, linkpath, uname and gname fields. Normally, # these fields are UTF-8 encoded but since POSIX.1-2008 tar # implementations are allowed to store them as raw binary strings if # the translation to UTF-8 fails. match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) if match is not None: pax_headers["hdrcharset"] = match.group(1).decode("utf8") # For the time being, we don't care about anything other than "BINARY". # The only other value that is currently allowed by the standard is # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. hdrcharset = pax_headers.get("hdrcharset") if hdrcharset == "BINARY": encoding = tarfile.encoding else: encoding = "utf8" # Parse pax header information. A record looks like that: # "%d %s=%s\n" % (length, keyword, value). length is the size # of the complete record including the length field itself and # the newline. keyword and value are both UTF-8 encoded strings. regex = re.compile(br"(\d+) ([^=]+)=") pos = 0 while True: match = regex.match(buf, pos) if not match: break length, keyword = match.groups() length = int(length) value = buf[match.end(2) + 1:match.start(1) + length - 1] # Normally, we could just use "utf8" as the encoding and "strict" # as the error handler, but we better not take the risk. For # example, GNU tar <= 1.23 is known to store filenames it cannot # translate to UTF-8 as raw strings (unfortunately without a # hdrcharset=BINARY header). # We first try the strict standard encoding, and if that fails we # fall back on the user's encoding and error handler. keyword = self._decode_pax_field(keyword, "utf8", "utf8", tarfile.errors) if keyword in PAX_NAME_FIELDS: value = self._decode_pax_field(value, encoding, tarfile.encoding, tarfile.errors) else: value = self._decode_pax_field(value, "utf8", "utf8", tarfile.errors) pax_headers[keyword] = value pos += length # Fetch the next header. try: next = self.fromtarfile(tarfile) except HeaderError: raise SubsequentHeaderError("missing or bad subsequent header") # Process GNU sparse information. if "GNU.sparse.map" in pax_headers: # GNU extended sparse format version 0.1. self._proc_gnusparse_01(next, pax_headers) elif "GNU.sparse.size" in pax_headers: # GNU extended sparse format version 0.0. self._proc_gnusparse_00(next, pax_headers, buf) elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": # GNU extended sparse format version 1.0. self._proc_gnusparse_10(next, pax_headers, tarfile) if self.type in (XHDTYPE, SOLARIS_XHDTYPE): # Patch the TarInfo object with the extended header info. next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) next.offset = self.offset if "size" in pax_headers: # If the extended header replaces the size field, # we need to recalculate the offset where the next # header starts. offset = next.offset_data if next.isreg() or next.type not in SUPPORTED_TYPES: offset += next._block(next.size) tarfile.offset = offset return next
4,585
23,379
134
ppocr/modeling/backbones/rec_efficientb3_pren.py
22
9
def get_global_params(): GlobalParams = namedtuple('GlobalParams', [ 'drop_connect_rate', 'width_coefficient', 'depth_coefficient', 'depth_divisor', 'image_size' ]) global_params = GlobalParams( drop_connect_rate=0.3, width_coefficient=1.2, depth_coefficient=1.4, depth_divisor=8,
[Feature] Add PREN Scene Text Recognition Model(Accepted in CVPR2021) (#5563) * [Feature] add PREN scene text recognition model * [Patch] Optimize yml File * [Patch] Save Label/Pred Preprocess Time Cost * [BugFix] Modify Shape Conversion to Fit for Inference Model Exportion * [Patch] ? * [Patch] ? * 啥情况...
get_global_params
6e607a0fa1cefbf0388dac86c84debf4781cec48
PaddleOCR
rec_efficientb3_pren.py
10
12
https://github.com/PaddlePaddle/PaddleOCR.git
1
55
0
20
83
Python
{ "docstring": "\n The fllowing are efficientnetb3's arch superparams, but to fit for scene \n text recognition task, the resolution(image_size) here is changed \n from 300 to 64.\n ", "language": "en", "n_whitespaces": 54, "n_words": 23, "vocab_size": 22 }
def get_global_params(): GlobalParams = namedtuple('GlobalParams', [ 'drop_connect_rate', 'width_coefficient', 'depth_coefficient', 'depth_divisor', 'image_size' ]) global_params = GlobalParams( drop_connect_rate=0.3, width_coefficient=1.2, depth_coefficient=1.4, depth_divisor=8, image_size=64) return global_params
23,349
108,809
31
lib/matplotlib/path.py
10
8
def _create_closed(cls, vertices): v = _to_unmasked_float_array(vertices)
Add a helper to generate closed paths. Instead of having to manually append an unused vertex that corresponds to the CLOSEPATH code, add a _make_closed helper (private for now) which does that for us.
_create_closed
e994b58e49bcd98334b220d74540005f62af918d
matplotlib
path.py
12
3
https://github.com/matplotlib/matplotlib.git
1
36
0
10
57
Python
{ "docstring": "\n Create a closed polygonal path going through *vertices*.\n\n Unlike ``Path(..., closed=True)``, *vertices* should **not** end with\n an entry for the CLOSEPATH; this entry is added by `._create_closed`.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 26 }
def _create_closed(cls, vertices): v = _to_unmasked_float_array(vertices) return cls(np.concatenate([v, v[:1]]), closed=True)
@pytest.mark.parametrize("loss", ALL_LOSSES) @pytest.mark.parametrize("sample_weight", [None, "range"]) @pytest.mark.parametrize("dtype", (np.float32, np.float64)) @pytest.mark.parametrize("order", ("C", "F"))
75,678
259,259
483
sklearn/_loss/tests/test_loss.py
93
34
def test_predict_proba(loss, global_random_seed): n_samples = 20 y_true, raw_prediction = random_y_true_raw_prediction( loss=loss, n_samples=n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=global_random_seed, ) if hasattr(loss, "predict_proba"): proba = loss.predict_proba(raw_prediction) assert proba.shape == (n_samples, loss.n_classes) assert np.sum(proba, axis=1) == approx(1, rel=1e-11)
TST ensure that sklearn/_loss/tests/test_loss.py is seed insensitive (#22847) Co-authored-by: Christian Lorentzen <[email protected]>
test_predict_proba
751c5cd05ff545c20ad0b09ac491c07f31e4cd56
scikit-learn
test_loss.py
14
38
https://github.com/scikit-learn/scikit-learn.git
4
248
1
62
453
Python
{ "docstring": "Test that predict_proba and gradient_proba work as expected.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def test_predict_proba(loss, global_random_seed): n_samples = 20 y_true, raw_prediction = random_y_true_raw_prediction( loss=loss, n_samples=n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=global_random_seed, ) if hasattr(loss, "predict_proba"): proba = loss.predict_proba(raw_prediction) assert proba.shape == (n_samples, loss.n_classes) assert np.sum(proba, axis=1) == approx(1, rel=1e-11) if hasattr(loss, "gradient_proba"): for grad, proba in ( (None, None), (None, np.empty_like(raw_prediction)), (np.empty_like(raw_prediction), None), (np.empty_like(raw_prediction), np.empty_like(raw_prediction)), ): grad, proba = loss.gradient_proba( y_true=y_true, raw_prediction=raw_prediction, sample_weight=None, gradient_out=grad, proba_out=proba, ) assert proba.shape == (n_samples, loss.n_classes) assert np.sum(proba, axis=1) == approx(1, rel=1e-11) assert_allclose( grad, loss.gradient( y_true=y_true, raw_prediction=raw_prediction, sample_weight=None, gradient_out=None, ), ) @pytest.mark.parametrize("loss", ALL_LOSSES) @pytest.mark.parametrize("sample_weight", [None, "range"]) @pytest.mark.parametrize("dtype", (np.float32, np.float64)) @pytest.mark.parametrize("order", ("C", "F"))
13,932
65,555
4
erpnext/buying/doctype/supplier_scorecard_variable/supplier_scorecard_variable.py
6
4
def get_late_shipments(scorecard): return get
style: format code with black
get_late_shipments
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
supplier_scorecard_variable.py
8
2
https://github.com/frappe/erpnext.git
1
16
0
6
29
Python
{ "docstring": "Gets the number of late shipments (counting each item) in the period (based on Purchase Receipts vs POs)", "language": "en", "n_whitespaces": 17, "n_words": 18, "vocab_size": 17 }
def get_late_shipments(scorecard): return get_total_shipments(scorecard) - get_on_time_shipments(scorecard)
1,150
7,172
121
ludwig/models/base.py
38
16
def eval_loss(self, targets, predictions): eval_loss = 0 for of_name, of_obj in self.outp
feat: Added model type GBM (LightGBM tree learner), as an alternative to ECD (#2027)
eval_loss
aa0c63bf2ed825eb3ca8eff8a002d5ccbe395173
ludwig
base.py
12
10
https://github.com/ludwig-ai/ludwig.git
3
82
0
29
132
Python
{ "docstring": "Computes all evaluation losses for the model given targets and predictions.\n\n Args:\n targets: A dictionary of target names to target tensors.\n predictions: A dictionary of output names to output tensors.\n\n Returns:\n A tuple of loss values for eval losses and additional losses.\n ", "language": "en", "n_whitespaces": 96, "n_words": 42, "vocab_size": 29 }
def eval_loss(self, targets, predictions): eval_loss = 0 for of_name, of_obj in self.output_features.items(): of_eval_loss = of_obj.eval_loss(targets[of_name], predictions[of_name]) eval_loss += of_obj.loss["weight"] * of_eval_loss additional_loss = 0 additional_losses = self.losses() if additional_losses: additional_loss = torch.sum(torch.stack(additional_losses)) # other losses return eval_loss, additional_loss
35,404
153,455
135
modin/db_conn.py
40
6
def partition_query(self, query, limit, offset): return ( ( f"SELECT * FROM ({query}) AS _ ORDER BY(SELECT NULL)" + f" OFFSET {offset} ROWS FETCH NEXT {limit} ROWS ONLY" ) if self._dialect_is_microsoft_sql() else f"SELECT * FROM
FEAT-#979: Enable reading from SQL server. (#4279) Co-authored-by: eavidan <[email protected]> Co-authored-by: Devin Petersohn <[email protected]> Signed-off-by: mvashishtha <[email protected]>
partition_query
2d40797b2b700d81d4db4a4cd023d563edf6431f
modin
db_conn.py
11
9
https://github.com/modin-project/modin.git
2
31
0
31
73
Python
{ "docstring": "\n Get a query that partitions the original `query`.\n\n Parameters\n ----------\n query : str\n The SQL query to get a partition.\n limit : int\n The size of the partition.\n offset : int\n Where the partition begins.\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 142, "n_words": 38, "vocab_size": 27 }
def partition_query(self, query, limit, offset): return ( ( f"SELECT * FROM ({query}) AS _ ORDER BY(SELECT NULL)" + f" OFFSET {offset} ROWS FETCH NEXT {limit} ROWS ONLY" ) if self._dialect_is_microsoft_sql() else f"SELECT * FROM ({query}) LIMIT {limit} OFFSET {offset}" )
51,869
207,118
339
tests/admin_filters/tests.py
122
24
def test_parameter_ends_with__in__or__isnull(self): # When it ends with '__in' ----------------------------------------- modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site) request = self.request_factory.get("/", {"decade__in": "the 90s"}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, "publication decade") choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2]["display"], "the 1990's") self.assertIs(choices[2]["selected"], True) self.assertEqual(choices[2]["query_string"], "?decade__in=the+90s") # When it ends with '__isnull' ----------
Refs #33476 -- Reformatted code with Black.
test_parameter_ends_with__in__or__isnull
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
tests.py
11
25
https://github.com/django/django.git
1
284
0
52
475
Python
{ "docstring": "\n A SimpleListFilter's parameter name is not mistaken for a model field\n if it ends with '__isnull' or '__in' (#17091).\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 19 }
def test_parameter_ends_with__in__or__isnull(self): # When it ends with '__in' ----------------------------------------- modeladmin = DecadeFilterBookAdminParameterEndsWith__In(Book, site) request = self.request_factory.get("/", {"decade__in": "the 90s"}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, "publication decade") choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2]["display"], "the 1990's") self.assertIs(choices[2]["selected"], True) self.assertEqual(choices[2]["query_string"], "?decade__in=the+90s") # When it ends with '__isnull' --------------------------------------- modeladmin = DecadeFilterBookAdminParameterEndsWith__Isnull(Book, site) request = self.request_factory.get("/", {"decade__isnull": "the 90s"}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) # Make sure the correct queryset is returned queryset = changelist.get_queryset(request) self.assertEqual(list(queryset), [self.bio_book]) # Make sure the correct choice is selected filterspec = changelist.get_filters(request)[0][0] self.assertEqual(filterspec.title, "publication decade") choices = list(filterspec.choices(changelist)) self.assertEqual(choices[2]["display"], "the 1990's") self.assertIs(choices[2]["selected"], True) self.assertEqual(choices[2]["query_string"], "?decade__isnull=the+90s")
55,384
218,553
39
python3.10.4/Lib/ipaddress.py
16
5
def v4_int_to_packed(address): try: return address.to_bytes(4, 'big') except OverflowError: raise ValueError("Address negative or too large for I
add python 3.10.4 for windows
v4_int_to_packed
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
ipaddress.py
11
5
https://github.com/XX-net/XX-Net.git
2
25
0
16
47
Python
{ "docstring": "Represent an address as 4 packed bytes in network (big-endian) order.\n\n Args:\n address: An integer representation of an IPv4 IP address.\n\n Returns:\n The integer address packed as 4 bytes in network (big-endian) order.\n\n Raises:\n ValueError: If the integer is negative or too large to be an\n IPv4 IP address.\n\n ", "language": "en", "n_whitespaces": 91, "n_words": 49, "vocab_size": 33 }
def v4_int_to_packed(address): try: return address.to_bytes(4, 'big') except OverflowError: raise ValueError("Address negative or too large for IPv4")
71,768
247,600
88
tests/handlers/test_directory.py
12
14
def test_delete_alias_not_allowed(self) -> None: self._create_alias(self.admin_user) self.get_failure( self.handler.delete_association( create_requester(self.test_us
Add type hints to some tests/handlers files. (#12224)
test_delete_alias_not_allowed
5dd949bee6158a8b651db9f2ae417a62c8184bfd
synapse
test_directory.py
12
9
https://github.com/matrix-org/synapse.git
1
47
0
12
75
Python
{ "docstring": "A user that doesn't meet the expected guidelines cannot delete an alias.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def test_delete_alias_not_allowed(self) -> None: self._create_alias(self.admin_user) self.get_failure( self.handler.delete_association( create_requester(self.test_user), self.room_alias ), synapse.api.errors.AuthError, )
48,923
198,414
3,401
sympy/integrals/trigonometry.py
909
41
def trigintegrate(f, x, conds='piecewise'): pat, a, n, m = _pat_sincos(x) f = f.rewrite('sincos') M = f.match(pat) if M is None: return n, m = M[n], M[m] if n.is_zero and m.is_zero: return x zz = x if n.is_zero else S.Zero a = M[a] if n.is_odd or m.is_odd: u = _u n_, m_ = n.is_odd, m.is_odd # take smallest n or m -- to choose simplest substitution if n_ and m_: # Make sure to choose the positive one # otherwise an incorrect integral can occur. if n < 0 and m > 0: m_ = True n_ = False elif m < 0 and n > 0: n_ = True m_ = False # Both are negative so choose the smallest n or m # in absolute value for simplest substitution. elif (n < 0 and m < 0): n_ = n > m m_ = not (n > m) # Both n and m are odd and positive else: n_ = (n < m) # NB: careful here, one of the m_ = not (n < m) # conditions *must* be true # n m u=C (n-1)/2 m # S(x) * C(x) dx --> -(1-u^2) * u du if n_: ff = -(1 - u**2)**((n - 1)/2) * u**m uu = cos(a*x) # n m u=S n (m-1)/2 # S(x) * C(x) dx --> u * (1-u^2) du elif m_: ff = u**n * (1 - u**2)**((m - 1)/2) uu = sin(a*x) fi = integrate(ff, u) # XXX cyclic deps fx = fi.subs(u, uu) if conds == 'piecewise': return Piecewise((fx / a, Ne(a, 0)), (zz, True)) return fx / a # n & m are both even # # 2k 2m 2l 2l # we transform S (x) * C (x) into terms with only S (x) or C (x) # # example: # 100 4 100 2 2 100 4 2 # S (x) * C (x) = S (x) * (1-S (x)) = S (x) * (1 + S (x) - 2*S (x)) # # 104 102 100 # = S (x) - 2*S (x) + S (x) # 2k # then S is integrated with recursive formula # take largest n or m -- to choose simplest substitution n_ = (Abs(n) > Abs(m)) m_ = (Abs(m) > Abs(n)) res = S.Zero if n_: # 2k 2 k i 2i # C = (1 - S ) = sum(i, (-) * B(k, i) * S ) if m > 0: for i in range(0, m//2 + 1): res += (S.NegativeOne**i * binomial(m//2, i) * _sin_pow_integrate(n + 2*i, x)) elif m == 0: res = _sin_pow_integrate(n, x) else: # m < 0 , |n| > |m| # / # | # | m n # | cos (x) sin (x) dx = # | # | #/ # / # | # -1 m+1 n-1 n - 1 | m+2 n-2 # ________ cos (x) sin (x) + _______ | cos (x) sin (x) dx # | # m + 1 m + 1 | # / res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) + Rational(n - 1, m + 1) * trigintegrate(cos(x)**(m + 2)*sin(x)**(n - 2), x)) elif m_: # 2k 2 k i 2i # S = (1 - C ) = sum(i, (-) * B(k, i) * C ) if n > 0: # / / # | | # | m n | -m n # | cos (x)*sin (x) dx or | cos (x) * sin (x) dx # | | # / / # # |m| > |n| ; m, n >0 ; m, n belong to Z - {0} # n
Improve loop performance
trigintegrate
e94a7b45d7b033ccbd57395dca28b654f875c54c
sympy
trigonometry.py
24
78
https://github.com/sympy/sympy.git
30
829
0
266
1,394
Python
{ "docstring": "\n Integrate f = Mul(trig) over x.\n\n Examples\n ========\n\n >>> from sympy import sin, cos, tan, sec\n >>> from sympy.integrals.trigonometry import trigintegrate\n >>> from sympy.abc import x\n\n >>> trigintegrate(sin(x)*cos(x), x)\n sin(x)**2/2\n\n >>> trigintegrate(sin(x)**2, x)\n x/2 - sin(x)*cos(x)/2\n\n >>> trigintegrate(tan(x)*sec(x), x)\n 1/cos(x)\n\n >>> trigintegrate(sin(x)*tan(x), x)\n -log(sin(x) - 1)/2 + log(sin(x) + 1)/2 - sin(x)\n\n References\n ==========\n\n .. [1] http://en.wikibooks.org/wiki/Calculus/Integration_techniques\n\n See Also\n ========\n\n sympy.integrals.integrals.Integral.doit\n sympy.integrals.integrals.Integral\n ", "language": "en", "n_whitespaces": 129, "n_words": 62, "vocab_size": 44 }
def trigintegrate(f, x, conds='piecewise'): pat, a, n, m = _pat_sincos(x) f = f.rewrite('sincos') M = f.match(pat) if M is None: return n, m = M[n], M[m] if n.is_zero and m.is_zero: return x zz = x if n.is_zero else S.Zero a = M[a] if n.is_odd or m.is_odd: u = _u n_, m_ = n.is_odd, m.is_odd # take smallest n or m -- to choose simplest substitution if n_ and m_: # Make sure to choose the positive one # otherwise an incorrect integral can occur. if n < 0 and m > 0: m_ = True n_ = False elif m < 0 and n > 0: n_ = True m_ = False # Both are negative so choose the smallest n or m # in absolute value for simplest substitution. elif (n < 0 and m < 0): n_ = n > m m_ = not (n > m) # Both n and m are odd and positive else: n_ = (n < m) # NB: careful here, one of the m_ = not (n < m) # conditions *must* be true # n m u=C (n-1)/2 m # S(x) * C(x) dx --> -(1-u^2) * u du if n_: ff = -(1 - u**2)**((n - 1)/2) * u**m uu = cos(a*x) # n m u=S n (m-1)/2 # S(x) * C(x) dx --> u * (1-u^2) du elif m_: ff = u**n * (1 - u**2)**((m - 1)/2) uu = sin(a*x) fi = integrate(ff, u) # XXX cyclic deps fx = fi.subs(u, uu) if conds == 'piecewise': return Piecewise((fx / a, Ne(a, 0)), (zz, True)) return fx / a # n & m are both even # # 2k 2m 2l 2l # we transform S (x) * C (x) into terms with only S (x) or C (x) # # example: # 100 4 100 2 2 100 4 2 # S (x) * C (x) = S (x) * (1-S (x)) = S (x) * (1 + S (x) - 2*S (x)) # # 104 102 100 # = S (x) - 2*S (x) + S (x) # 2k # then S is integrated with recursive formula # take largest n or m -- to choose simplest substitution n_ = (Abs(n) > Abs(m)) m_ = (Abs(m) > Abs(n)) res = S.Zero if n_: # 2k 2 k i 2i # C = (1 - S ) = sum(i, (-) * B(k, i) * S ) if m > 0: for i in range(0, m//2 + 1): res += (S.NegativeOne**i * binomial(m//2, i) * _sin_pow_integrate(n + 2*i, x)) elif m == 0: res = _sin_pow_integrate(n, x) else: # m < 0 , |n| > |m| # / # | # | m n # | cos (x) sin (x) dx = # | # | #/ # / # | # -1 m+1 n-1 n - 1 | m+2 n-2 # ________ cos (x) sin (x) + _______ | cos (x) sin (x) dx # | # m + 1 m + 1 | # / res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) + Rational(n - 1, m + 1) * trigintegrate(cos(x)**(m + 2)*sin(x)**(n - 2), x)) elif m_: # 2k 2 k i 2i # S = (1 - C ) = sum(i, (-) * B(k, i) * C ) if n > 0: # / / # | | # | m n | -m n # | cos (x)*sin (x) dx or | cos (x) * sin (x) dx # | | # / / # # |m| > |n| ; m, n >0 ; m, n belong to Z - {0} # n 2 # sin (x) term is expanded here in terms of cos (x), # and then integrated. # for i in range(0, n//2 + 1): res += (S.NegativeOne**i * binomial(n//2, i) * _cos_pow_integrate(m + 2*i, x)) elif n == 0: # / # | # | 1 # | _ _ _ # | m # | cos (x) # / # res = _cos_pow_integrate(m, x) else: # n < 0 , |m| > |n| # / # | # | m n # | cos (x) sin (x) dx = # | # | #/ # / # | # 1 m-1 n+1 m - 1 | m-2 n+2 # _______ cos (x) sin (x) + _______ | cos (x) sin (x) dx # | # n + 1 n + 1 | # / res = (Rational(1, n + 1) * cos(x)**(m - 1)*sin(x)**(n + 1) + Rational(m - 1, n + 1) * trigintegrate(cos(x)**(m - 2)*sin(x)**(n + 2), x)) else: if m == n: ##Substitute sin(2x)/2 for sin(x)cos(x) and then Integrate. res = integrate((sin(2*x)*S.Half)**m, x) elif (m == -n): if n < 0: # Same as the scheme described above. # the function argument to integrate in the end will # be 1, this cannot be integrated by trigintegrate. # Hence use sympy.integrals.integrate. res = (Rational(1, n + 1) * cos(x)**(m - 1) * sin(x)**(n + 1) + Rational(m - 1, n + 1) * integrate(cos(x)**(m - 2) * sin(x)**(n + 2), x)) else: res = (Rational(-1, m + 1) * cos(x)**(m + 1) * sin(x)**(n - 1) + Rational(n - 1, m + 1) * integrate(cos(x)**(m + 2)*sin(x)**(n - 2), x)) if conds == 'piecewise': return Piecewise((res.subs(x, a*x) / a, Ne(a, 0)), (zz, True)) return res.subs(x, a*x) / a
77,585
264,064
23
PyInstaller/utils/hooks/win32.py
11
9
def get_pywin32_module_file_attribute(module_name): from PyInstaller.utils.win32 import winutils module = w
hookutils: win32: port to PyInstaller.isolated framework
get_pywin32_module_file_attribute
b87832b35dc1866c81ecaf7e502afe48a4e50a82
pyinstaller
win32.py
8
4
https://github.com/pyinstaller/pyinstaller.git
1
26
0
11
43
Python
{ "docstring": "\n Get the absolute path of the PyWin32 DLL specific to the PyWin32 module with the passed name.\n\n On import, each PyWin32 module:\n\n * Imports a DLL specific to that module.\n * Overwrites the values of all module attributes with values specific to that DLL. This includes that module's\n `__file__` attribute, which then provides the absolute path of that DLL.\n\n This function safely imports that module in a PyWin32-aware subprocess and returns the value of that module's\n `__file__` attribute.\n\n Parameters\n ----------\n module_name : str\n Fully-qualified name of that module.\n\n Returns\n ----------\n str\n Absolute path of that DLL.\n\n See Also\n ----------\n `PyInstaller.utils.win32.winutils.import_pywin32_module()`\n For further details.\n ", "language": "en", "n_whitespaces": 178, "n_words": 103, "vocab_size": 60 }
def get_pywin32_module_file_attribute(module_name): from PyInstaller.utils.win32 import winutils module = winutils.import_pywin32_module(module_name) return module.__file__
19,490
97,856
32
src/sentry/pipeline/base.py
11
6
def render_warning(self, message): context = {"error": message} return render_to_response("sentry/pipeline-provider-error.html", context, self.request)
ref(py): Split up large file (#32862) Co-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>
render_warning
d246d2b6d3e014270941209e54f2f12e09ad9a81
sentry
base.py
9
3
https://github.com/getsentry/sentry.git
1
26
0
11
45
Python
{ "docstring": "For situations when we want to display an error without triggering an issue", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
def render_warning(self, message): context = {"error": message} return render_to_response("sentry/pipeline-provider-error.html", context, self.request)
71,570
247,283
284
tests/rest/client/test_rooms.py
66
20
def test_context_filter_not_labels(self) -> None: event_id = self._send_labelled_messages_in_room() channel = self.make_request( "GET", "/rooms/%s/context/%s?filter=%s" % (self.room_id, event_id, json.dumps(self.FILTER_NOT_LABELS)), access_token=self.tok, ) self.assertEqual(channel.code, 200, channel.result) events_before = channel.json_body["events_before"] self.assertEqual( len(events_before), 1, [event["content"] for event in events_before] ) self.assertEqual( events_before[0]["content"]["body"], "without label", events_before[0] ) events_after = channel.json_body["events_after"] self.assertEqual( len(events_after), 2, [event["content"] for event in events_after] ) self.assertEqual( events_after[0]["content"]["body"], "with wrong label", events_after[0] ) self.assertEqual( events_after[1]["content"]["body"], "with two wrong labels", events_after[1] )
Add type hints to `tests/rest/client` (#12108) * Add type hints to `tests/rest/client` * newsfile * fix imports * add `test_account.py` * Remove one type hint in `test_report_event.py` * change `on_create_room` to `async` * update new functions in `test_third_party_rules.py` * Add `test_filter.py` * add `test_rooms.py` * change to `assertEquals` to `assertEqual` * lint
test_context_filter_not_labels
2ffaf30803f93273a4d8a65c9e6c3110c8433488
synapse
test_rooms.py
13
27
https://github.com/matrix-org/synapse.git
3
189
0
47
302
Python
{ "docstring": "Test that we can filter by the absence of a label on a /context request.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
def test_context_filter_not_labels(self) -> None: event_id = self._send_labelled_messages_in_room() channel = self.make_request( "GET", "/rooms/%s/context/%s?filter=%s" % (self.room_id, event_id, json.dumps(self.FILTER_NOT_LABELS)), access_token=self.tok, ) self.assertEqual(channel.code, 200, channel.result) events_before = channel.json_body["events_before"] self.assertEqual( len(events_before), 1, [event["content"] for event in events_before] ) self.assertEqual( events_before[0]["content"]["body"], "without label", events_before[0] ) events_after = channel.json_body["events_after"] self.assertEqual( len(events_after), 2, [event["content"] for event in events_after] ) self.assertEqual( events_after[0]["content"]["body"], "with wrong label", events_after[0] ) self.assertEqual( events_after[1]["content"]["body"], "with two wrong labels", events_after[1] )
42,311
177,245
189
networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py
53
11
def _consistent_PT(u, v, graph_params, state_params): G1, G2 = graph_params.G1, graph_params.G2 mapping, reverse_mapping = state_params.mapping, state_params.reverse_mapping for neighbor in G1[u]: if neighbor in mapping: if G1.number_of_edges(u, neighbor) != G2.number_of_edges( v, mapping[neighbor] ): return False for neighbor in G2[v]:
Preliminary VF2++ Implementation (#5788) * Preliminary implementation of the candidate node pair ordering of VF2++ * Removed unused lines of code * Added todos * Added demo and pseudocode for VF2++ * Pointed out a problem with the pseudocode * Initialisation of the VF2++ basis structure * Initialise the GraphMatcher * Remove useless changes * Check labels for the node ordering + demo * Code to verify the ordering * Implement the ISO feasibility check * Implement the IND feasibility * Create State class * Fixed Dan's code for the ordering * Preliminary form of the node ordering * Add visualisation * Use list comprehension for the Ti computation * Remove function * Create Unit Tests * Add labels check + update unit tests * Add pre-computation of G-labels * Remove todo * First implementation of the candidate selection * Initial version of candidate selection * Remove unnecessary files * Merge candidate selection cases into one * Create a function to incrementally update Ti and Ti_out * Unit Test for the Ti updating * Implement the Ti/Ti_out restoring * Finish the restoring of Ti and create unit test * Update test file names * Uncommented test section * Replace redundant loop with for-any * Create unit test for candidate selection using the same label for all nodes * Create unit test for candidate selection using different labels for the nodes * Update feasibility tests without the use of the state class * Create more unit tests for the feasibility checking * Provide explanation for the unit tests * First successful test of the complete ISO VF2++ algorithm (except from the buggy ordering) * Fix bug: when popping a node to climb up the DFS tree we need the previous node ordering (containing the node that we just popped) * Create a separate file for the VF2++ ISO algorithm * Delete file * Remove redundant iteration and memory use * Demo for different labels * Add benchmark for the incremental Ti updating * Remove unnecessary class * Fix bug with the ordering WOOOHOOOOO * Unit tests for the node ordering * Add unit tests for the VF2++ ISO * Fix ordering * Probablly fix logic error in ordering * Reformatted with black * Test precommit * Test precommit * Test pre commit * Testing pre commit * Update networkx/algorithms/isomorphism/tests/VF2++/test_vf2pp.py Co-authored-by: Ross Barnowski <[email protected]> * Add unit tests for vf2++ * Added vf2++ unit test * Added precheck for VF2++ * Add unit tests for the precheck * Updated the benchmarking * Updated the benchmark * Apply hooks * Add documentation for the ordering * Add documentation for the candidate selection * Added documentation for the feasibility * Added documentation for vf2++ * Separate functions for ISO feasibility * Refine unit tests * Apply hooks * Force reformat all files * Remove redundant return statements from VF2__ * Apply hooks * Apply hooks * Format * Minor changes * Add unit tests * Adjusted benchmark * Fix benchmark * Isort * Isort benchmark * Apply optimization in the candidate selection * Track matched node with pointer * Adjust benchmark * Restructure in VF2 function * Make VF2++ EXTREMELY PRETTY * Removed sorting in feasibility rules * Get rid of visited set, check mapping instead * Update networkx/algorithms/isomorphism/tests/VF2++/test_vf2pp.py Co-authored-by: Dan Schult <[email protected]> * Made color assignement deterministic in VF2++ unit tests * Add keyword argument in unit tests * Hoepfully fix pipeline errors * Add vf2++ unit tests for multigraphs * Add Unit tests for Feasibility * Add unit tests for feasibility on multi graphs * Finalize feasibility tests for multigraph settings * Update documentation * Remove list comprehension and boost performance * Add unit tests for both graphs and multi graphs, using same labels * Isort * Optimized precheck * Replace loop with any * Optimize multigraph chceck * Transfer except statement * Check order consistency * Cache degrees and labels from the beginning * Delete benchmark to create new * Fix precheck bug * Adjust unit tests * Add benchmark for perofmance comparison between VF2 and VF2++ * Fix Ti computing tests * Hopefully fix isort * Add benchmark for the candidate selection methods * Rename modules: lower case, remove + * Refactor VF2++ arguments * Adjust VF2++ to work with multiple node labels * Add unit tests for multiple labels * Adjust for different number of labels per node * Finish arguments of VF2++ * Add user functions * Exported the two vf2++ functions * Added underscore prefix to private functions and fixed tests * Update networkx/algorithms/isomorphism/vf2pp.py Co-authored-by: Dan Schult <[email protected]> * Update networkx/algorithms/isomorphism/demo.py Co-authored-by: Dan Schult <[email protected]> * Update networkx/algorithms/isomorphism/vf2pp.py Co-authored-by: Dan Schult <[email protected]> * Apply suggested changes * Refactor rst files * Rm unnecessary toctree from isomorphism page. * Autodoc vf2pp module + public functions. * Rm dedicated vf2pp reference article. * Rm extra vf2pp listing from autosummaries. * Add summary of three functions to module docstring. * Make sure docstrings match their functions. * Refactor everything * Format code * Add unit test * Inline process level function in node ordering * Perform intersection first rather than last * Update networkx/algorithms/isomorphism/vf2pp_helpers/candidates.py Co-authored-by: Dan Schult <[email protected]> * Replace return statement with multiple operations and make it more readable * Update networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py Co-authored-by: Dan Schult <[email protected]> * Fix multigraph bug in update_Tinout * Abstract the argmax function * Add unit test for first case of candidate selection * Create unit test for all candidate selection cases * Remove re-definition of namedtuple parameters * Update doc/reference/algorithms/isomorphism.rst Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/__init__.py Co-authored-by: Ross Barnowski <[email protected]> * Delete benchmark file * Add demo file * Create util file containing the helper functions, common across all unit tests * Fix CI/CD * Make unit tests for Ti updating specific * Remove util functions from vf2pp tests * Remove utils functions from multivf2pp tests * Remove utils functions from candidate tests * Remove utils functions from ordering checks * Remove utils functions from Ti tests * Add example in docstring * Remove unused utils functions * Separate initialization of vf2pp * Inline functions and add new abstract function for pushing to stack * Inline push to stack * Add commentsa * Separate precheck functions * Replace method with existing networkx function * Include label initialization inside parameter initializer function * Rename Tiout to Titilde * Update networkx/algorithms/isomorphism/tests/vf2pp/test_Ti_computing.py Co-authored-by: Ross Barnowski <[email protected]> * Use canonical setitem for dictionary insertions * Update networkx/algorithms/isomorphism/tests/vf2pp/test_precheck.py Co-authored-by: Ross Barnowski <[email protected]> * Remove variable assignement * Merge unit tests of vf2pp for graphs and multigraphs into the same file * Update networkx/algorithms/isomorphism/vf2pp.py Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/isomorphism/vf2pp.py Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/isomorphism/vf2pp.py Co-authored-by: Ross Barnowski <[email protected]> * Change variable name * Update networkx/algorithms/isomorphism/vf2pp.py Co-authored-by: Ross Barnowski <[email protected]> * Re-write ordering unit tests * Rename vf2pp solver * Update networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py Co-authored-by: Dan Schult <[email protected]> * Replace abstractified argmax function with two loops for readability * Apply final changes * Fix mistake * Update ref guide to reflect new fn names. * Update docstrings * Fix line length in module docstring * Copy updated parameter section to all 3 public fns. * Add Yields section to all_isomorphisms fn. Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Dan Schult <[email protected]>
_consistent_PT
bffcd74649fb95a57fb834846eb3c7d9693c55b8
networkx
feasibility.py
13
16
https://github.com/networkx/networkx.git
7
110
0
32
164
Python
{ "docstring": "Checks the consistency of extending the mapping using the current node pair.\n\n Parameters\n ----------\n u, v: Graph node\n The two candidate nodes being examined.\n\n graph_params: namedtuple\n Contains all the Graph-related parameters:\n\n G1,G2: NetworkX Graph or MultiGraph instances.\n The two graphs to check for isomorphism or monomorphism\n\n G1_labels,G2_labels: dict\n The label of every node in G1 and G2 respectively\n\n state_params: namedtuple\n Contains all the State-related parameters:\n\n mapping: dict\n The mapping as extended so far. Maps nodes of G1 to nodes of G2\n\n reverse_mapping: dict\n The reverse mapping as extended so far. Maps nodes from G2 to nodes of G1. It's basically \"mapping\" reversed\n\n T1, T2: set\n Ti contains uncovered neighbors of covered nodes from Gi, i.e. nodes that are not in the mapping, but are\n neighbors of nodes that are.\n\n T1_out, T2_out: set\n Ti_out contains all the nodes from Gi, that are neither in the mapping nor in Ti\n\n Returns\n -------\n True if the pair passes all the consistency checks successfully. False otherwise.\n ", "language": "en", "n_whitespaces": 329, "n_words": 162, "vocab_size": 94 }
def _consistent_PT(u, v, graph_params, state_params): G1, G2 = graph_params.G1, graph_params.G2 mapping, reverse_mapping = state_params.mapping, state_params.reverse_mapping for neighbor in G1[u]: if neighbor in mapping: if G1.number_of_edges(u, neighbor) != G2.number_of_edges( v, mapping[neighbor] ): return False for neighbor in G2[v]: if neighbor in reverse_mapping: if G1.number_of_edges(u, reverse_mapping[neighbor]) != G2.number_of_edges( v, neighbor ): return False return True
14,662
67,919
13
erpnext/stock/report/stock_balance/stock_balance.py
23
13
def get_variant_values_for(items): attribute_map = {} for attr in frappe.db.sql( % ", ".join(["%s"] * len(items)), tuple(items), as_dict=1, ): attribute_map.setdefault(attr["parent"], {}) attribute_map[attr["parent"]].
style: format code with black
get_variant_values_for
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
stock_balance.py
13
13
https://github.com/frappe/erpnext.git
2
82
0
22
140
Python
{ "docstring": "Returns variant values for items.select parent, attribute, attribute_value\n\t\tfrom `tabItem Variant Attribute` where parent in (%s)\n\t\t", "language": "en", "n_whitespaces": 14, "n_words": 16, "vocab_size": 16 }
def get_variant_values_for(items): attribute_map = {} for attr in frappe.db.sql( % ", ".join(["%s"] * len(items)), tuple(items), as_dict=1, ): attribute_map.setdefault(attr["parent"], {}) attribute_map[attr["parent"]].update({attr["attribute"]: attr["attribute_value"]}) return attribute_map
17,037
80,233
216
wagtail/snippets/tests/test_locking.py
63
17
def test_edit_post_locked_by_self(self): # Lock the snippet self.lock_snippet(self.user) # Try to edit the snippet response = self.client.post( self.get_url("edit"), {"text": "Edited while locked"}, follow=True, )
Add tests for locking snippets
test_edit_post_locked_by_self
10dbbddaf35607e4257f50dd960520a1268dd225
wagtail
test_locking.py
11
14
https://github.com/wagtail/wagtail.git
1
77
0
45
142
Python
{ "docstring": "A user can edit a snippet that is locked by themselves.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def test_edit_post_locked_by_self(self): # Lock the snippet self.lock_snippet(self.user) # Try to edit the snippet response = self.client.post( self.get_url("edit"), {"text": "Edited while locked"}, follow=True, ) self.refresh_snippet() # Should not show error message self.assertNotContains( response, f"The {self.model_name} could not be saved as it is locked", ) # Check that the snippet is still locked self.assertTrue(self.snippet.locked) # Check that the snippet is edited self.assertEqual(self.snippet.text, "Edited while locked")
1,143
7,165
64
ludwig/models/base.py
18
13
def update_metrics(self, targets, predictions):
feat: Added model type GBM (LightGBM tree learner), as an alternative to ECD (#2027)
update_metrics
aa0c63bf2ed825eb3ca8eff8a002d5ccbe395173
ludwig
base.py
10
6
https://github.com/ludwig-ai/ludwig.git
2
65
0
18
101
Python
{ "docstring": "Updates the model's metrics given targets and predictions.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def update_metrics(self, targets, predictions): for of_name, of_obj in self.output_features.items(): of_obj.update_metrics(targets[of_name], predictions[of_name]) eval_loss, additional_losses = self.eval_loss(targets, predictions) self.eval_loss_metric.update(eval_loss) self.eval_additional_losses_metrics.update(additional_losses)
22,772
107,483
111
lib/matplotlib/axis.py
31
8
def tick_right(self): label = True if 'label1On' in self._m
DOC: More cleanup axes -> Axes
tick_right
f156db08eee54d285ab0fb4e031e48d078ba6aa3
matplotlib
axis.py
12
7
https://github.com/matplotlib/matplotlib.git
3
51
0
26
94
Python
{ "docstring": "\n Move ticks and ticklabels (if present) to the right of the Axes.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
def tick_right(self): label = True if 'label1On' in self._major_tick_kw: label = (self._major_tick_kw['label1On'] or self._major_tick_kw['label2On']) self.set_ticks_position('right') # if labels were turned off before this was called # leave them off self.set_tick_params(which='both', labelright=label)
117,451
320,942
23
tests/unit/mainwindow/test_messageview.py
11
8
def test_show_message_twice(view, info1, info2, count): view.show_message(info1) view.show_message(info2) assert len(view._messages) == count
Only replace the exact same message If we have a error message followed by an info message with the same text, they should both be shown, not replaced automatically.
test_show_message_twice
676e01677183825d19107d3b2fbf1bb2c0684ede
qutebrowser
test_messageview.py
9
4
https://github.com/qutebrowser/qutebrowser.git
1
33
0
11
53
Python
{ "docstring": "Show the exact same message twice -> only one should be shown.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def test_show_message_twice(view, info1, info2, count): view.show_message(info1) view.show_message(info2) assert len(view._messages) == count
70,858
245,712
275
mmdet/models/task_modules/assigners/iou2d_calculator.py
94
17
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): bboxes1 = get_box_tensor(bboxes1) bboxes2 = get_box_tensor(bboxes2) assert bboxes1.size(-1) in [0, 4, 5] assert bboxes2.size(-1) in [0, 4, 5] if bboxes2.size(-1) == 5: bboxes2 = bboxes2[..., :4] if bboxes1.size(-1) == 5: bboxes1 = bboxes1[..., :4] if self.dtype == 'fp16': # change tensor type to save cpu and cuda memory and keep speed bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) if not overlaps.is_cuda and overlaps.dtype == torch.float16: # resume cpu float32
[Refactor] Refactor anchor head and base head with boxlist (#8625) * Refactor anchor head * Update * Update * Update * Add a series of boxes tools * Fix box type to support n x box_dim boxes * revert box type changes * Add docstring * refactor retina_head * Update * Update * Fix comments * modify docstring of coder and ioucalculator * Replace with_boxlist with use_box_type
__call__
d915740fa8228cf57741b27d9e5d66e358456b8e
mmdetection
iou2d_calculator.py
12
17
https://github.com/open-mmlab/mmdetection.git
6
183
0
54
279
Python
{ "docstring": "Calculate IoU between 2D bboxes.\n\n Args:\n bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)\n in <x1, y1, x2, y2> format, or shape (m, 5) in <x1, y1, x2,\n y2, score> format.\n bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)\n in <x1, y1, x2, y2> format, shape (m, 5) in <x1, y1, x2, y2,\n score> format, or be empty. If ``is_aligned `` is ``True``,\n then m and n must be equal.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection\n over foreground), or \"giou\" (generalized intersection over\n union).\n is_aligned (bool, optional): If True, then m and n must be equal.\n Default False.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)\n ", "language": "en", "n_whitespaces": 311, "n_words": 115, "vocab_size": 64 }
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): bboxes1 = get_box_tensor(bboxes1) bboxes2 = get_box_tensor(bboxes2) assert bboxes1.size(-1) in [0, 4, 5] assert bboxes2.size(-1) in [0, 4, 5] if bboxes2.size(-1) == 5: bboxes2 = bboxes2[..., :4] if bboxes1.size(-1) == 5: bboxes1 = bboxes1[..., :4] if self.dtype == 'fp16': # change tensor type to save cpu and cuda memory and keep speed bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) if not overlaps.is_cuda and overlaps.dtype == torch.float16: # resume cpu float32 overlaps = overlaps.float() return overlaps return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
21,037
101,629
20
tools/sort/sort_methods_aligned.py
6
6
def binning(self) -> List[List[str]]: return self._binning_linear_threshold(multiplier=100)
Overhaul sort: - Standardize image data reading and writing - Optimize loading (just one pass required) - Make all sort groups binnable (to greater or lesser results) - Add sort by pitch - Deprecate multiple options - linting, docs + locales
binning
98d01760e469fd2108eed8d0b0a1ba6297c3177c
faceswap
sort_methods_aligned.py
8
9
https://github.com/deepfakes/faceswap.git
1
23
0
6
38
Python
{ "docstring": " Create bins to split linearly from the lowest to the highest sample value\n\n Returns\n -------\n list\n List of bins of filenames\n ", "language": "en", "n_whitespaces": 61, "n_words": 21, "vocab_size": 17 }
def binning(self) -> List[List[str]]: return self._binning_linear_threshold(multiplier=100)
54,062
215,636
21
salt/transport/base.py
7
6
def connect(self, publish_port, connect_callback=None, disconnect_callback=None): raise NotImpleme
Add NotImplimentedError to stubs
connect
8683fed190f0ac807ab3f87e0e66808f7dbc130c
salt
base.py
6
2
https://github.com/saltstack/salt.git
1
18
0
7
28
Python
{ "docstring": "\n Create a network connection to the the PublishServer or broker.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 9 }
def connect(self, publish_port, connect_callback=None, disconnect_callback=None): raise NotImplementedError
47,586
196,086
79
sympy/combinatorics/free_groups.py
18
6
def contains(self, g): if not isinstance(g, FreeGroupElement): return False elif self != g.group: return False else: ret
Updated import locations
contains
498015021131af4dbb07eb110e5badaba8250c7b
sympy
free_groups.py
8
7
https://github.com/sympy/sympy.git
3
32
0
15
53
Python
{ "docstring": "Tests if Free Group element ``g`` belong to self, ``G``.\n\n In mathematical terms any linear combination of generators\n of a Free Group is contained in it.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> f, x, y, z = free_group(\"x y z\")\n >>> f.contains(x**3*y**2)\n True\n\n ", "language": "en", "n_whitespaces": 108, "n_words": 45, "vocab_size": 40 }
def contains(self, g): if not isinstance(g, FreeGroupElement): return False elif self != g.group: return False else: return True
14,170
66,339
16
erpnext/loan_management/doctype/loan_security_unpledge/loan_security_unpledge.py
34
14
def get_pledged_security_qty(loan): current_pledges = {} unpledges = frappe._dict( frappe.db.sql( , (loan), ) ) pledges = frappe._dict( frappe.db.sql( , (loan), ) ) for security, qt
style: format code with black
get_pledged_security_qty
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
loan_security_unpledge.py
11
32
https://github.com/frappe/erpnext.git
2
85
0
24
131
Python
{ "docstring": "\n\t\tSELECT u.loan_security, sum(u.qty) as qty\n\t\tFROM `tabLoan Security Unpledge` up, `tabUnpledge` u\n\t\tWHERE up.loan = %s\n\t\tAND u.parent = up.name\n\t\tAND up.status = 'Approved'\n\t\tGROUP BY u.loan_security\n\t\n\t\tSELECT p.loan_security, sum(p.qty) as qty\n\t\tFROM `tabLoan Security Pledge` lp, `tabPledge`p\n\t\tWHERE lp.loan = %s\n\t\tAND p.parent = lp.name\n\t\tAND lp.status = 'Pledged'\n\t\tGROUP BY p.loan_security\n\t", "language": "en", "n_whitespaces": 41, "n_words": 53, "vocab_size": 35 }
def get_pledged_security_qty(loan): current_pledges = {} unpledges = frappe._dict( frappe.db.sql( , (loan), ) ) pledges = frappe._dict( frappe.db.sql( , (loan), ) ) for security, qty in pledges.items(): current_pledges.setdefault(security, qty) current_pledges[security] -= unpledges.get(security, 0.0) return current_pledges
51,234
205,838
436
django/db/models/sql/compiler.py
101
25
def get_select(self): select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) if self.query.default_cols: cols = self.get_default_columns() else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { "model": self.query.model, "select_fields": select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.appe
Refs #33476 -- Reformatted code with Black.
get_select
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
compiler.py
12
49
https://github.com/django/django.git
11
311
0
65
327
Python
{ "docstring": "\n Return three values:\n - a list of 3-tuples of (expression, (sql, params), alias)\n - a klass_info structure,\n - a dictionary of annotations\n\n The (sql, params) is what the expression will produce, and alias is the\n \"AS alias\" for the column (possibly None).\n\n The klass_info structure contains the following information:\n - The base model of the query.\n - Which columns for that model are present in the query (by\n position of the select clause).\n - related_klass_infos: [f, klass_info] to descent into\n\n The annotations is a dictionary of {'attname': column position} values.\n ", "language": "en", "n_whitespaces": 184, "n_words": 90, "vocab_size": 59 }
def get_select(self): select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) if self.query.default_cols: cols = self.get_default_columns() else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { "model": self.query.model, "select_fields": select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select) klass_info["related_klass_infos"] = related_klass_infos
77,467
263,850
81
PyInstaller/depend/imphookapi.py
20
7
def set_module_collection_mode(self, name, mode): if name is None: name = self.__name__ if mode is None: self._module_collection_mode.pop(name) else:
building & hooks: implement module collection mode setting Implement a mechanism for controlling the collection mode of modules and packages, with granularity ranging from top-level packages to individual sub-modules. Therefore, the hooks can now specify whether the hooked package should be collected as byte-compiled .pyc modules into embedded PYZ archive (the default behavior), or as source .py files collected as external data files (without corresponding modules in the PYZ archive). The latter option should let us avoid unnecessary .pyc module collection when the source files are required by the code, or work around the situations where having a .pyc module in PYZ archive causes issues due to FrozenImporter's incompatibility with sys.path manipulation that some packages attempt to perform. This feature adds a new optional global hook variable, called `module_collection_mode`. The value can be either a string ("py" or "pyc") or a dictionary of module names and setting strings. In the case of a string, the setting affects the hooked module or a package, and is applied recursively to all sub-packages and sub-modules, unless another hook overrides it. The dictionary setting allows a hook to specify different settings for the package and it subpackages, or even different settings for other packages. A corresponding `set_module_collection_mode` method has been added to the `hook_api` object for adjusting the collection mode from within the `hook()` function. The `Analysis` object can now also be passed a dictionary via an optional `module_collection_mode` argument; the corresponding settings are applied last, which allows advanced users to both supplement and override the settings made by the hooks.
set_module_collection_mode
5b2ab7067ba954bd7950a79ed31e5ee177ff3f43
pyinstaller
imphookapi.py
11
7
https://github.com/pyinstaller/pyinstaller.git
3
43
0
14
70
Python
{ "docstring": "\"\n Set the package/module collection mode for the specified module\n name. If `name` is `None`, the hooked module/package name is used.\n Valid values for `mode` are: `'pyc'`, `'py'`, and `None`.\n ", "language": "en", "n_whitespaces": 58, "n_words": 30, "vocab_size": 26 }
def set_module_collection_mode(self, name, mode): if name is None: name = self.__name__ if mode is None: self._module_collection_mode.pop(name) else: self._module_collection_mode[name] = mode
56,817
222,933
55
python3.10.4/Lib/distutils/file_util.py
18
8
def write_file (filename, contents): f = open(filename, "w") try: for line in contents: f.writ
add python 3.10.4 for windows
write_file
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
file_util.py
13
7
https://github.com/XX-net/XX-Net.git
3
38
0
18
69
Python
{ "docstring": "Create a file with the specified name and write 'contents' (a\n sequence of strings without line terminators) to it.\n ", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 19 }
def write_file (filename, contents): f = open(filename, "w") try: for line in contents: f.write(line + "\n") finally: f.close()
11,792
58,650
41
tests/orion/schemas/test_core.py
13
8
async def test_flow_run_policy_is_backwards_compatible(self): empty_new_policy = schemas.core.FlowRunPolicy() # should not raise an error self.OldFlowRu
Revert breaking schema change (#6521) * Revert breaking schema change * Add new retry properties on policies; deprecate prior ones * Add tests for schema compat * Use root_validator to populate properties from deprecated
test_flow_run_policy_is_backwards_compatible
82c78fe8b65117dc5fe89365acb62e7aa902f8ba
prefect
test_core.py
10
3
https://github.com/PrefectHQ/prefect.git
1
26
0
13
49
Python
{ "docstring": "\n In version 2.1.1 and prior, the FlowRunPolicy schema required two properties,\n `max_retries` and `retry_delay_seconds`. These properties are deprecated.\n\n This test ensures old clients can load new FlowRunPolicySchemas. It can be removed\n when the corresponding properties are removed.\n ", "language": "en", "n_whitespaces": 73, "n_words": 37, "vocab_size": 32 }
async def test_flow_run_policy_is_backwards_compatible(self): empty_new_policy = schemas.core.FlowRunPolicy() # should not raise an error self.OldFlowRunPolicy(**empty_new_policy.dict())
44,041
183,062
33
tests/css/test_help_text.py
18
5
def test_help_text_examples_are_contextualized(): rendered_inline = render(spacing_invalid_value("padding", "inline")) assert "widget.styles.padding" in rendered_inline rendered_css = render(spacing_invalid_value("padding", "css")) assert "padding:" in rendered_css
Testing for help text
test_help_text_examples_are_contextualized
91783b7c1e06a45e93fd89dbdb6aa3d1a9c2e990
textual
test_help_text.py
11
5
https://github.com/Textualize/textual.git
1
35
0
12
70
Python
{ "docstring": "Ensure that if the user is using CSS, they see CSS-specific examples\n and if they're using inline styles they see inline-specific examples.", "language": "en", "n_whitespaces": 24, "n_words": 22, "vocab_size": 18 }
def test_help_text_examples_are_contextualized(): rendered_inline = render(spacing_invalid_value("padding", "inline")) assert "widget.styles.padding" in rendered_inline rendered_css = render(spacing_invalid_value("padding", "css")) assert "padding:" in rendered_css
@pytest.mark.parametrize("patch", [True, False]) @pytest.mark.parametrize("connection_strategy", ["eager", "lazy"])
73,885
251,912
245
test/mitmproxy/proxy/layers/test_modes.py
80
30
def test_reverse_proxy(tctx, keep_host_header): server = Placeholder(Server) tctx.options.mode = "reverse:http://localhost:8000" tctx.options.connection_strategy = "lazy" tctx.options.keep_host_header = keep_host_header assert ( Playbook(modes.ReverseProxy(tctx), hooks=False) >> DataReceived( tctx.client, b"GET /foo HTTP/1.1\r\n" b"Host: example.com\r\n\r\n" ) << NextLayerHook(Placeholder(NextLayer)) >> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.transparent)) << OpenConnection(server) >> reply(None) << SendData( server, b"GET /foo HTTP/1.1\r\n" b"Host: " + (b"example.com" if keep_host_header else b"localhost:8000") + b"\r\n\r\n", ) >> DataReceived(server, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") << SendData(tctx.client, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") ) assert server().address == ("localhost", 8000) @pytest.mark.parametrize("patch", [True, False]) @pytest.mark.parametrize("connection_strategy", ["eager", "lazy"])
make it black!
test_reverse_proxy
b3587b52b25077f68116b9852b041d33e7fc6601
mitmproxy
test_modes.py
18
25
https://github.com/mitmproxy/mitmproxy.git
2
160
1
58
319
Python
{ "docstring": "Test mitmproxy in reverse proxy mode.\n\n - make sure that we connect to the right host\n - make sure that we respect keep_host_header\n - make sure that we include non-standard ports in the host header (#4280)\n ", "language": "en", "n_whitespaces": 48, "n_words": 36, "vocab_size": 23 }
def test_reverse_proxy(tctx, keep_host_header): server = Placeholder(Server) tctx.options.mode = "reverse:http://localhost:8000" tctx.options.connection_strategy = "lazy" tctx.options.keep_host_header = keep_host_header assert ( Playbook(modes.ReverseProxy(tctx), hooks=False) >> DataReceived( tctx.client, b"GET /foo HTTP/1.1\r\n" b"Host: example.com\r\n\r\n" ) << NextLayerHook(Placeholder(NextLayer)) >> reply_next_layer(lambda ctx: http.HttpLayer(ctx, HTTPMode.transparent)) << OpenConnection(server) >> reply(None) << SendData( server, b"GET /foo HTTP/1.1\r\n" b"Host: " + (b"example.com" if keep_host_header else b"localhost:8000") + b"\r\n\r\n", ) >> DataReceived(server, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") << SendData(tctx.client, b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") ) assert server().address == ("localhost", 8000) @pytest.mark.parametrize("patch", [True, False]) @pytest.mark.parametrize("connection_strategy", ["eager", "lazy"])
50,337
203,366
226
django/contrib/admin/checks.py
36
16
def _check_filter_horizontal(self, obj): if not isinstance(obj.filter_horizontal, (list, tuple)): return must_be( "a list or tuple", option="filter_horizontal", obj=obj, id="admin.E018" ) else: return list( chain.from_iterable( self._check_filter_item( obj, field_name, "filter_horizontal[%d]" % index
Refs #33476 -- Reformatted code with Black.
_check_filter_horizontal
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
checks.py
16
14
https://github.com/django/django.git
3
74
0
32
119
Python
{ "docstring": "Check that filter_horizontal is a sequence of field names.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def _check_filter_horizontal(self, obj): if not isinstance(obj.filter_horizontal, (list, tuple)): return must_be( "a list or tuple", option="filter_horizontal", obj=obj, id="admin.E018" ) else: return list( chain.from_iterable( self._check_filter_item( obj, field_name, "filter_horizontal[%d]" % index ) for index, field_name in enumerate(obj.filter_horizontal) ) )
4,523
23,163
49
ppocr/data/imaug/fce_aug.py
24
9
def poly_intersection(poly_det, poly_gt): assert isinstance(poly_det, plg.Polygon) assert isinstance(poly_gt, plg.Polygon) poly_inter = poly_det & poly_gt if len(poly_inter) == 0: return 0, poly_in
add fcenet
poly_intersection
9f62b610dea6161627200ed85d92e19b1923279a
PaddleOCR
fce_aug.py
8
7
https://github.com/PaddlePaddle/PaddleOCR.git
2
51
0
19
81
Python
{ "docstring": "Calculate the intersection area between two polygon.\n\n Args:\n poly_det (Polygon): A polygon predicted by detector.\n poly_gt (Polygon): A gt polygon.\n\n Returns:\n intersection_area (float): The intersection area between two polygons.\n ", "language": "en", "n_whitespaces": 59, "n_words": 29, "vocab_size": 22 }
def poly_intersection(poly_det, poly_gt): assert isinstance(poly_det, plg.Polygon) assert isinstance(poly_gt, plg.Polygon) poly_inter = poly_det & poly_gt if len(poly_inter) == 0: return 0, poly_inter return poly_inter.area(), poly_inter
73,544
250,781
26
mitmproxy/dns.py
12
10
def size(self) -> int: return sum(len(x.data) for x in [*self.answers, *self.authorities, *self.additionals])
[dns] first commit
size
8c700ec6e45fc69379eec230da1bd840854ac20e
mitmproxy
dns.py
11
3
https://github.com/mitmproxy/mitmproxy.git
2
37
0
12
59
Python
{ "docstring": "Returns the cumulative data size of all resource record sections.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
def size(self) -> int: return sum(len(x.data) for x in [*self.answers, *self.authorities, *self.additionals])
28,234
126,691
340
dashboard/modules/snapshot/snapshot_head.py
49
18
async def get_job_submission_info(self): jobs = {} fetched_jobs = await self._job_info_client.get_all_jobs() for ( job_submission_id, job_info, ) in fetched_jobs.items(): if job_info is not None: entry = { "jo
Convert job_manager to be async (#27123) Updates jobs api Updates snapshot api Updates state api Increases jobs api version to 2 Signed-off-by: Alan Guo [email protected] Why are these changes needed? follow-up for #25902 (comment)
get_job_submission_info
326b5bd1acc6d3d00ab0546e4ae45da6bed501f7
ray
snapshot_head.py
13
21
https://github.com/ray-project/ray.git
3
104
0
43
173
Python
{ "docstring": "Info for Ray job submission. Here a job can have 0 or many drivers.", "language": "en", "n_whitespaces": 14, "n_words": 14, "vocab_size": 13 }
async def get_job_submission_info(self): jobs = {} fetched_jobs = await self._job_info_client.get_all_jobs() for ( job_submission_id, job_info, ) in fetched_jobs.items(): if job_info is not None: entry = { "job_submission_id": job_submission_id, "status": job_info.status, "message": job_info.message, "error_type": job_info.error_type, "start_time": job_info.start_time, "end_time": job_info.end_time, "metadata": job_info.metadata, "runtime_env": job_info.runtime_env, "entrypoint": job_info.entrypoint, } jobs[job_submission_id] = entry return jobs
48,473
197,330
156
sympy/ntheory/qs.py
65
11
def _gen_sieve_array(M, factor_base): sieve_array =
Remove abbreviations in documentation
_gen_sieve_array
65be461082dda54c8748922f9c29a19af1279fe1
sympy
qs.py
13
12
https://github.com/sympy/sympy.git
6
112
0
45
171
Python
{ "docstring": "Sieve Stage of the Quadratic Sieve. For every prime in the factor_base\n that does not divide the coefficient `a` we add log_p over the sieve_array\n such that ``-M <= soln1 + i*p <= M`` and ``-M <= soln2 + i*p <= M`` where `i`\n is an integer. When p = 2 then log_p is only added using\n ``-M <= soln1 + i*p <= M``.\n\n Parameters\n ==========\n\n M : sieve interval\n factor_base : factor_base primes\n ", "language": "en", "n_whitespaces": 104, "n_words": 74, "vocab_size": 52 }
def _gen_sieve_array(M, factor_base): sieve_array = [0]*(2*M + 1) for factor in factor_base: if factor.soln1 is None: #The prime does not divides a continue for idx in range((M + factor.soln1) % factor.prime, 2*M, factor.prime): sieve_array[idx] += factor.log_p if factor.prime == 2: continue #if prime is 2 then sieve only with soln_1_p for idx in range((M + factor.soln2) % factor.prime, 2*M, factor.prime): sieve_array[idx] += factor.log_p return sieve_array
1,906
10,748
122
setup.py
46
9
def rescue_docarray(): try: import docarray as docarray __docarray_version__ = docarray.__version__ except AttributeError: # Being here means docarray is not installed correctly, attempt to reinstall it # as recommended by pip https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program import subprocess subprocess.check_call( [sys.executable, '-m', 'pip', 'uninstall', '--yes', 'docarray']
fix: rescue docarray in setup (#4203)
rescue_docarray
1f2c86359246e00eae7cba081d9e952cb64c9aea
jina
setup.py
12
10
https://github.com/jina-ai/jina.git
2
59
0
39
110
Python
{ "docstring": "Upgrading from 2.x to 3.x is broken (https://github.com/jina-ai/jina/issues/4194)\n This function checks if docarray is broken and if so attempts to rescue it\n ", "language": "en", "n_whitespaces": 28, "n_words": 22, "vocab_size": 18 }
def rescue_docarray(): try: import docarray as docarray __docarray_version__ = docarray.__version__ except AttributeError: # Being here means docarray is not installed correctly, attempt to reinstall it # as recommended by pip https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program import subprocess subprocess.check_call( [sys.executable, '-m', 'pip', 'uninstall', '--yes', 'docarray'] ) subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'docarray'])
75,313
258,597
161
sklearn/kernel_approximation.py
55
15
def transform(self, X): msg = ( "%(name)s is not fitted. Call f
DOC Fix docstring for AdditiveChi2Sampler (#22138)
transform
ff85a34c95a9d8de13805be55f1a72f1b7ee2a42
scikit-learn
kernel_approximation.py
10
11
https://github.com/scikit-learn/scikit-learn.git
2
68
0
47
118
Python
{ "docstring": "Apply approximate feature map to X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n -------\n X_new : {ndarray, sparse matrix}, \\\n shape = (n_samples, n_features * (2*sample_steps - 1))\n Whether the return value is an array or sparse matrix depends on\n the type of the input X.\n ", "language": "en", "n_whitespaces": 173, "n_words": 66, "vocab_size": 50 }
def transform(self, X): msg = ( "%(name)s is not fitted. Call fit to set the parameters before" " calling transform" ) check_is_fitted(self, msg=msg) X = self._validate_data(X, accept_sparse="csr", reset=False) check_non_negative(X, "X in AdditiveChi2Sampler.transform") sparse = sp.issparse(X) # zeroth component # 1/cosh = sech # cosh(0) = 1.0 transf = self._transform_sparse if sparse else self._transform_dense return transf(X)
12,561
61,417
53
.venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py
14
4
def get_repository_root(cls, location): # type: (str) -> Optional[str] if cls.is_repository_directory(location): return location return None
upd; format
get_repository_root
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
transferlearning
versioncontrol.py
7
4
https://github.com/jindongwang/transferlearning.git
2
20
0
13
35
Python
{ "docstring": "\n Return the \"root\" (top-level) directory controlled by the vcs,\n or `None` if the directory is not in any.\n\n It is meant to be overridden to implement smarter detection\n mechanisms for specific vcs.\n\n This can do more than is_repository_directory() alone. For\n example, the Git override checks that Git is actually available.\n ", "language": "en", "n_whitespaces": 100, "n_words": 50, "vocab_size": 42 }
def get_repository_root(cls, location): # type: (str) -> Optional[str] if cls.is_repository_directory(location): return location return None
517
3,695
41
airbyte-integrations/connectors/source-hubspot/unit_tests/test_client.py
26
17
def test_it_should_not_read_quotes_stream_if_it_does_not_exist_in_client(oauth_config, configured_catalog): source = SourceHubspot() all_records = list(source.read(logger, config=oauth_config, catalog=configured_catalog, state=None)) records = [record for record in all_records if reco
Source Hubspot: fix "quotes" key error exception (#10055) * check if stream exists in source * check if stream exists in source, added comment * test skipping reading quotes stream * format code * airbyte-cdk version * added __init__.py to unit_tests * fix importing airbyte models * bump the version * update spec and def yamls Co-authored-by: auganbay <[email protected]>
test_it_should_not_read_quotes_stream_if_it_does_not_exist_in_client
b22efc03a18c5545c12cf8a0462dea7505aec410
airbyte
test_client.py
11
5
https://github.com/airbytehq/airbyte.git
3
56
0
22
87
Python
{ "docstring": "\n If 'quotes' stream is not in the client, it should skip it.\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 12 }
def test_it_should_not_read_quotes_stream_if_it_does_not_exist_in_client(oauth_config, configured_catalog): source = SourceHubspot() all_records = list(source.read(logger, config=oauth_config, catalog=configured_catalog, state=None)) records = [record for record in all_records if record.type == Type.RECORD] assert not records
29,221
130,300
553
python/ray/_private/utils.py
226
17
def get_conda_env_dir(env_name): conda_prefix = os.environ.get("CONDA_PREFIX") if conda_prefix is None: # The caller is neither in a conda env or in (base) env. This is rare # because by default, new terminals start in (base), but we can still # support this case. conda_exe = os.environ.get("CONDA_EXE") if conda_exe is None: raise ValueError( "Cannot find environment variables set by conda. " "Please verify conda is installed." ) # Example: CONDA_EXE=$HOME/anaconda3/bin/python # Strip out /bin/python by going up two parent directories. conda_prefix = str(Path(conda_exe).parent.parent) # There are two cases: # 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and # CONDA_PREFIX=$HOME/anaconda3 # 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and # CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name if os.environ.get("CONDA_DEFAULT_ENV") == "base": # Caller's curent environment is (base). # Not recommended by conda, but we can still support it. if env_name == "base": # Desired environment is (base), located at e.g. $HOME/anaconda3 env_dir = conda_prefix else: # Des
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
get_conda_env_dir
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
ray
utils.py
14
26
https://github.com/ray-project/ray.git
6
142
0
130
268
Python
{ "docstring": "Find and validate the conda directory for a given conda environment.\n\n For example, given the environment name `tf1`, this function checks\n the existence of the corresponding conda directory, e.g.\n `/Users/scaly/anaconda3/envs/tf1`, and returns it.\n ", "language": "en", "n_whitespaces": 45, "n_words": 33, "vocab_size": 26 }
def get_conda_env_dir(env_name): conda_prefix = os.environ.get("CONDA_PREFIX") if conda_prefix is None: # The caller is neither in a conda env or in (base) env. This is rare # because by default, new terminals start in (base), but we can still # support this case. conda_exe = os.environ.get("CONDA_EXE") if conda_exe is None: raise ValueError( "Cannot find environment variables set by conda. " "Please verify conda is installed." ) # Example: CONDA_EXE=$HOME/anaconda3/bin/python # Strip out /bin/python by going up two parent directories. conda_prefix = str(Path(conda_exe).parent.parent) # There are two cases: # 1. We are in a conda (base) env: CONDA_DEFAULT_ENV=base and # CONDA_PREFIX=$HOME/anaconda3 # 2. We are in a user-created conda env: CONDA_DEFAULT_ENV=$env_name and # CONDA_PREFIX=$HOME/anaconda3/envs/$current_env_name if os.environ.get("CONDA_DEFAULT_ENV") == "base": # Caller's curent environment is (base). # Not recommended by conda, but we can still support it. if env_name == "base": # Desired environment is (base), located at e.g. $HOME/anaconda3 env_dir = conda_prefix else: # Desired environment is user-created, e.g. # $HOME/anaconda3/envs/$env_name env_dir = os.path.join(conda_prefix, "envs", env_name) else: # Now `conda_prefix` should be something like # $HOME/anaconda3/envs/$current_env_name # We want to replace the last component with the desired env name. conda_envs_dir = os.path.split(conda_prefix)[0] env_dir = os.path.join(conda_envs_dir, env_name) if not os.path.isdir(env_dir): raise ValueError( "conda env " + env_name + " not found in conda envs directory. Run `conda env list` to " + "verify the name is correct." ) return env_dir
110,215
311,550
147
tests/components/homekit_controller/test_switch.py
39
14
async def test_switch_change_outlet_state(hass, utcnow): helper = await setup_test_component(hass, create_switch_service) await hass.services.async_call( "switch", "turn_on", {"entity_id": "switch.testdevice"}, blocking=True ) helper.async_assert_service_values( ServicesTypes.OUTLET, { CharacteristicsTypes.ON: 1, }, ) await hass.services.async_call( "switch", "turn_off", {"entity_id": "switch.testdevice"}, blocking=True ) helper.async_assert_service_values( ServicesTypes.OUTLET, {
Improve homekit_controller tests (#65266)
test_switch_change_outlet_state
58b8c30221a6f6e5acbbe98b7e3298b03fb741f5
core
test_switch.py
11
20
https://github.com/home-assistant/core.git
1
95
0
24
158
Python
{ "docstring": "Test that we can turn a HomeKit outlet on and off again.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
async def test_switch_change_outlet_state(hass, utcnow): helper = await setup_test_component(hass, create_switch_service) await hass.services.async_call( "switch", "turn_on", {"entity_id": "switch.testdevice"}, blocking=True ) helper.async_assert_service_values( ServicesTypes.OUTLET, { CharacteristicsTypes.ON: 1, }, ) await hass.services.async_call( "switch", "turn_off", {"entity_id": "switch.testdevice"}, blocking=True ) helper.async_assert_service_values( ServicesTypes.OUTLET, { CharacteristicsTypes.ON: 0, }, )
22,743
107,428
460
lib/matplotlib/axis.py
111
30
def _update_label_position(self, renderer): if not self._autolabelpos: return # get bounding boxes for this axis and any siblings # that have been set by `fig.align_xlabels()` bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer) x, y = self.label.get_position() if self.label_position == 'bottom': try: spine = self.axes.spines['bottom'] spinebbox = spine.get_window_extent() except KeyError: # use axes if spine doesn't
FIX: use window_extent instead
_update_label_position
c0a78bdff86d7b02b8a23d373b64c72297f935d5
matplotlib
axis.py
16
27
https://github.com/matplotlib/matplotlib.git
5
191
0
65
315
Python
{ "docstring": "\n Update the label position based on the bounding box enclosing\n all the ticklabels and axis spine\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 14 }
def _update_label_position(self, renderer): if not self._autolabelpos: return # get bounding boxes for this axis and any siblings # that have been set by `fig.align_xlabels()` bboxes, bboxes2 = self._get_tick_boxes_siblings(renderer=renderer) x, y = self.label.get_position() if self.label_position == 'bottom': try: spine = self.axes.spines['bottom'] spinebbox = spine.get_window_extent() except KeyError: # use axes if spine doesn't exist spinebbox = self.axes.bbox bbox = mtransforms.Bbox.union(bboxes + [spinebbox]) bottom = bbox.y0 self.label.set_position( (x, bottom - self.labelpad * self.figure.dpi / 72) ) else: try: spine = self.axes.spines['top'] spinebbox = spine.get_window_extent() except KeyError: # use axes if spine doesn't exist spinebbox = self.axes.bbox bbox = mtransforms.Bbox.union(bboxes2 + [spinebbox]) top = bbox.y1 self.label.set_position( (x, top + self.labelpad * self.figure.dpi / 72) )
4,547
23,225
47
ppocr/postprocess/fce_postprocess.py
26
12
def poly_union(poly_det, poly_gt): assert isinstance(poly_det, plg.Polygon) assert isinstance(poly_gt, plg.Polygon) area_det = poly_det.area() area_gt = poly_gt.area() area_inters, _ = poly_intersection(poly_det, poly_gt) return area_det + area_gt - area_inters
add fcenet
poly_union
9f62b610dea6161627200ed85d92e19b1923279a
PaddleOCR
fce_postprocess.py
8
7
https://github.com/PaddlePaddle/PaddleOCR.git
1
56
0
20
89
Python
{ "docstring": "Calculate the union area between two polygon.\n\n Args:\n poly_det (Polygon): A polygon predicted by detector.\n poly_gt (Polygon): A gt polygon.\n\n Returns:\n union_area (float): The union area between two polygons.\n ", "language": "en", "n_whitespaces": 59, "n_words": 29, "vocab_size": 22 }
def poly_union(poly_det, poly_gt): assert isinstance(poly_det, plg.Polygon) assert isinstance(poly_gt, plg.Polygon) area_det = poly_det.area() area_gt = poly_gt.area() area_inters, _ = poly_intersection(poly_det, poly_gt) return area_det + area_gt - area_inters
73,483
250,506
198
tests/config/test_tls.py
48
24
def test_whitelist_idna_result(self) -> None: config: JsonDict = { "federation_certificate_verification_whitelist": [ "example.com", "*.xn--eckwd4c7c.xn--zckzah", ] } t = TestConfig() t.tls.read_
Add missing type hints to tests.config. (#14681)
test_whitelist_idna_result
3aeca2588b79111a48a6083c88efc4d68a2cea19
synapse
test_tls.py
11
19
https://github.com/matrix-org/synapse.git
1
110
0
38
187
Python
{ "docstring": "\n The federation certificate whitelist will match on IDNA encoded names.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
def test_whitelist_idna_result(self) -> None: config: JsonDict = { "federation_certificate_verification_whitelist": [ "example.com", "*.xn--eckwd4c7c.xn--zckzah", ] } t = TestConfig() t.tls.read_config(config, config_dir_path="", data_dir_path="") cf = FederationPolicyForHTTPS(cast(HomeServerConfig, t)) # Not in the whitelist opts = cf.get_options(b"notexample.com") assert isinstance(opts, SSLClientConnectionCreator) self.assertTrue(opts._verifier._verify_certs) # Caught by the wildcard opts = cf.get_options(idna.encode("テスト.ドメイン.テスト")) assert isinstance(opts, SSLClientConnectionCreator) self.assertFalse(opts._verifier._verify_certs)
70,340
244,348
41
mmdet/models/dense_heads/dense_test_mixins.py
13
7
def simple_test_rpn(self, x, img_metas): rpn_outs = self(x) proposal_list = self.get_results(*rpn_outs, img_metas=img_metas) r
[Refactor] Refactor dense head outputs to InstanceResults.
simple_test_rpn
9a3bf7660e6ced54672741095f96df07919f9ba7
mmdetection
dense_test_mixins.py
9
4
https://github.com/open-mmlab/mmdetection.git
1
31
0
11
50
Python
{ "docstring": "Test without augmentation, only for ``RPNHead`` and its variants,\n e.g., ``GARPNHead``, etc.\n\n Args:\n x (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n img_metas (list[dict]): Meta info of each image.\n\n Returns:\n list[Tensor]: Proposals of each image, each item has shape (n, 5),\n where 5 represent (tl_x, tl_y, br_x, br_y, score).\n ", "language": "en", "n_whitespaces": 142, "n_words": 51, "vocab_size": 47 }
def simple_test_rpn(self, x, img_metas): rpn_outs = self(x) proposal_list = self.get_results(*rpn_outs, img_metas=img_metas) return proposal_list
22,775
107,486
250
lib/matplotlib/axis.py
85
25
def _get_tick_boxes_siblings(self, renderer): # Get the Grouper keeping track of x or y label groups for this figure. axis_names = [ name for name, axis in self.axes._get_axis_map().items() if name in self.figure._align_label_groups and axis is self] if len(axis_names) != 1: return
DOC: More cleanup axes -> Axes
_get_tick_boxes_siblings
f156db08eee54d285ab0fb4e031e48d078ba6aa3
matplotlib
axis.py
13
17
https://github.com/matplotlib/matplotlib.git
6
133
0
64
219
Python
{ "docstring": "\n Get the bounding boxes for this `.axis` and its siblings\n as set by `.Figure.align_xlabels` or `.Figure.align_ylabels`.\n\n By default it just gets bboxes for self.\n ", "language": "en", "n_whitespaces": 54, "n_words": 24, "vocab_size": 23 }
def _get_tick_boxes_siblings(self, renderer): # Get the Grouper keeping track of x or y label groups for this figure. axis_names = [ name for name, axis in self.axes._get_axis_map().items() if name in self.figure._align_label_groups and axis is self] if len(axis_names) != 1: return [], [] axis_name, = axis_names grouper = self.figure._align_label_groups[axis_name] bboxes = [] bboxes2 = [] # If we want to align labels from other Axes: for ax in grouper.get_siblings(self.axes): axis = getattr(ax, f"{axis_name}axis") ticks_to_draw = axis._update_ticks() tlb, tlb2 = axis._get_ticklabel_bboxes(ticks_to_draw, renderer) bboxes.extend(tlb) bboxes2.extend(tlb2) return bboxes, bboxes2
81,746
276,832
20
keras/utils/generic_utils.py
10
3
def default(method): method._is_de
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
default
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
generic_utils.py
7
3
https://github.com/keras-team/keras.git
1
13
0
10
25
Python
{ "docstring": "Decorates a method to detect overrides in subclasses.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def default(method): method._is_default = True # pylint: disable=protected-access return method
26,270
118,528
38
lib/tests/streamlit/camera_input_test.py
10
9
def test_help_tooltip(self): st.camera_input("the label", help="help_label") c = self.get_delta_from_queue().new_element.camera_input self.assert
Feature/camera image input (#4038) * Camera_input widget Co-authored-by: willhuang1997 <[email protected]> Co-authored-by: Henrikh Kantuni <[email protected]> Co-authored-by: William Huang <[email protected]> Co-authored-by: Vincent Donato <[email protected]>
test_help_tooltip
33855278eaf8599b2bec1ddefa5eebb592e55e25
streamlit
camera_input_test.py
10
4
https://github.com/streamlit/streamlit.git
1
37
0
10
67
Python
{ "docstring": "Test that it can be called using a string for type parameter.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
def test_help_tooltip(self): st.camera_input("the label", help="help_label") c = self.get_delta_from_queue().new_element.camera_input self.assertEqual(c.help, "help_label")
107,643
308,917
253
tests/components/nest/test_camera_sdm.py
96
33
async def test_multiple_event_images(hass, auth): subscriber = await async_setup_camera(hass, DEVICE_TRAITS, auth=auth) assert len(hass.states.async_all()) == 1 assert hass.states.get("camera.my_camera") event_timestamp = utcnow() await subscriber.async_receive_event( make_motion_event(event_session_id="event-session-1", timestamp=event_timestamp) ) await hass.async_block_till_done() auth.responses = [ # Fake response from API that returns url image aiohttp.web.json_response(GENERATE_IMAGE_URL_RESPONSE), # Fake response for the image content fetch aiohttp.web.Response(body=IMAGE_BYTES_FROM_EVENT), # Image is refetched after being cleared by expiration alarm aiohttp.web.json_response(GENERATE_IMAGE_URL_RESPONSE), aiohttp.web.Response(body=b"updated image bytes"), ] image = await async_get_image(hass) assert image.content == IMAGE_BYTES_FROM_EVENT next_event_timestamp = event_timestamp + datetime.timedelta(seconds=25) await subscriber.async_receive_ev
Delete nest event image fetching and use same APIs as media player (#62789)
test_multiple_event_images
4203e1b0640e16fbda55672c5be089431face880
core
test_camera_sdm.py
12
28
https://github.com/home-assistant/core.git
1
183
0
63
301
Python
{ "docstring": "Test fallback for an event event image that has been cleaned up on expiration.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
async def test_multiple_event_images(hass, auth): subscriber = await async_setup_camera(hass, DEVICE_TRAITS, auth=auth) assert len(hass.states.async_all()) == 1 assert hass.states.get("camera.my_camera") event_timestamp = utcnow() await subscriber.async_receive_event( make_motion_event(event_session_id="event-session-1", timestamp=event_timestamp) ) await hass.async_block_till_done() auth.responses = [ # Fake response from API that returns url image aiohttp.web.json_response(GENERATE_IMAGE_URL_RESPONSE), # Fake response for the image content fetch aiohttp.web.Response(body=IMAGE_BYTES_FROM_EVENT), # Image is refetched after being cleared by expiration alarm aiohttp.web.json_response(GENERATE_IMAGE_URL_RESPONSE), aiohttp.web.Response(body=b"updated image bytes"), ] image = await async_get_image(hass) assert image.content == IMAGE_BYTES_FROM_EVENT next_event_timestamp = event_timestamp + datetime.timedelta(seconds=25) await subscriber.async_receive_event( make_motion_event( event_id="updated-event-id", event_session_id="event-session-2", timestamp=next_event_timestamp, ) ) await hass.async_block_till_done() image = await async_get_image(hass) assert image.content == b"updated image bytes"
72,668
249,161
399
tests/rest/admin/test_room.py
87
26
def test_delete_same_room_twice(self) -> None: body = {"new_room_user_id": self.admin_user} # first call to delete room # and do not wait for finish the task first_channel = self.make_request( "DELETE", self.url.encode("ascii"), content=body, access_token=self.admin_user_tok, await_result=False, ) # second call to delete room second_channel = self.make_request( "DELETE", self.url.encode("ascii"), content=body, access_token=s
Use literals in place of `HTTPStatus` constants in tests (#13469)
test_delete_same_room_twice
c97042f7eef3748e17c90e48a4122389a89c4735
synapse
test_room.py
11
32
https://github.com/matrix-org/synapse.git
1
176
0
61
290
Python
{ "docstring": "Test that the call for delete a room at second time gives an exception.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
def test_delete_same_room_twice(self) -> None: body = {"new_room_user_id": self.admin_user} # first call to delete room # and do not wait for finish the task first_channel = self.make_request( "DELETE", self.url.encode("ascii"), content=body, access_token=self.admin_user_tok, await_result=False, ) # second call to delete room second_channel = self.make_request( "DELETE", self.url.encode("ascii"), content=body, access_token=self.admin_user_tok, ) self.assertEqual( HTTPStatus.BAD_REQUEST, second_channel.code, msg=second_channel.json_body ) self.assertEqual(Codes.UNKNOWN, second_channel.json_body["errcode"]) self.assertEqual( f"History purge already in progress for {self.room_id}", second_channel.json_body["error"], ) # get result of first call first_channel.await_result() self.assertEqual(200, first_channel.code, msg=first_channel.json_body) self.assertIn("delete_id", first_channel.json_body) # check status after finish the task self._test_result( first_channel.json_body["delete_id"], self.other_user, expect_new_room=True, )
110,313
311,654
261
homeassistant/components/august/__init__.py
44
13
async def _async_refresh_device_detail_by_ids(self, device_ids_list): for device_id in device_ids_list: try: await self._async_refresh_device_detail_by_id(device_id) except asyncio.TimeoutError: _LOGGER.warning( "Timed out calling august api during refresh of device: %s", device_id, ) except (ClientResponseError, CannotConnect) as err: _LOGGER.warning( "Error from august api during refres
Improve reliability of august setup with recent api changes (#65314)
_async_refresh_device_detail_by_ids
fab9c4aa20b4c2549691d0aa5066798a0259e803
core
__init__.py
13
15
https://github.com/home-assistant/core.git
4
58
0
33
96
Python
{ "docstring": "Refresh each device in sequence.\n\n This used to be a gather but it was less reliable with august's\n recent api changes.\n\n The august api has been timing out for some devices so\n we want the ones that it isn't timing out for to keep working.\n ", "language": "en", "n_whitespaces": 80, "n_words": 45, "vocab_size": 39 }
async def _async_refresh_device_detail_by_ids(self, device_ids_list): for device_id in device_ids_list: try: await self._async_refresh_device_detail_by_id(device_id) except asyncio.TimeoutError: _LOGGER.warning( "Timed out calling august api during refresh of device: %s", device_id, ) except (ClientResponseError, CannotConnect) as err: _LOGGER.warning( "Error from august api during refresh of device: %s", device_id, exc_info=err, )
6,841
37,631
146
src/transformers/models/yolos/feature_extraction_yolos.py
86
33
def masks_to_boxes(masks): if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
Add YOLOS (#16848) * First draft * Add YolosForObjectDetection * Make forward pass work * Add mid position embeddings * Add interpolation of position encodings * Add expected values * Add YOLOS to tests * Add integration test * Support tiny model as well * Support all models in conversion script * Remove mid_pe_size attribute * Make more tests pass * Add model to README and fix config * Add copied from statements * Rename base_model_prefix to vit * Add missing YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP * Apply suggestions from code review * Apply more suggestions from code review * Convert remaining checkpoints * Improve docstrings * Add YolosFeatureExtractor * Add feature extractor to docs * Add corresponding tests * Fix style * Fix docs * Apply suggestion from code review * Fix bad rebase * Fix some more bad rebase * Fix missing character * Improve docs and variable names Co-authored-by: Niels Rogge <[email protected]>
masks_to_boxes
1ac698744c4dbdf1495d303246d08ffacdf4f5b8
transformers
feature_extraction_yolos.py
14
18
https://github.com/huggingface/transformers.git
2
289
0
56
442
Python
{ "docstring": "\n Compute the bounding boxes around the provided panoptic segmentation masks.\n\n The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.\n\n Returns a [N, 4] tensor, with the boxes in corner (xyxy) format.\n ", "language": "en", "n_whitespaces": 57, "n_words": 44, "vocab_size": 37 }
def masks_to_boxes(masks): if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.feature_extraction_detr.rgb_to_id
@_wraps(np.roots, lax_description="""\ Unlike the numpy version of this function, the JAX version returns the roots in a complex array regardless of the values of the roots. Additionally, the jax version of this function adds the ``strip_zeros`` function which must be set to False for the function to be compatible with JIT and other JAX transformations. With ``strip_zeros=False``, if your coefficients have leading zeros, the roots will be padded with NaN values: >>> coeffs = jnp.array([0, 1, 2]) # The default behavior matches numpy and strips leading zeros: >>> jnp.roots(coeffs) DeviceArray([-2.+0.j], dtype=complex64) # With strip_zeros=False, extra roots are set to NaN: >>> jnp.roots(coeffs, strip_zeros=False) DeviceArray([-2. +0.j, nan+nanj], dtype=complex64) """, extra_params=""" strip_zeros : bool, default=True If set to True, then leading zeros in the coefficients will be stripped, similar to :func:`numpy.roots`. If set to False, leading zeros will not be stripped, and undefined roots will be represented by NaN values in the function output. ``strip_zeros`` must be set to ``False`` for the function to be compatible with :func:`jax.jit` and other JAX transformations. """)
27,011
121,009
73
jax/_src/numpy/polynomial.py
68
18
def _roots_with_zeros(p, num_leading_zeros): # Avoid lapack errors when p is all zero p = _where(len(p) == num_leading_zeros, 1.0, p) # Roll any leading zeros to the end & compute the roots roots = _roots_no_zeros(roll(p, -num_leading_zeros)) # Sort zero roots to the end. roots = lax.sort_key_val(roots == 0, roots)[1] # Set roots associated with num_leading_zeros to NaN
jnp.roots: better support for computation under JIT
_roots_with_zeros
f6476f7a03f8390627c1a8e2a2ec8702d8a320e5
jax
polynomial.py
11
5
https://github.com/google/jax.git
1
80
1
51
147
Python
{ "docstring": "\\\nUnlike the numpy version of this function, the JAX version returns the roots in\na complex array regardless of the values of the roots. Additionally, the jax\nversion of this function adds the ``strip_zeros`` function which must be set to\nFalse for the function to be compatible with JIT and other JAX transformations.\nWith ``strip_zeros=False``, if your coefficients have leading zeros, the\nroots will be padded with NaN values:\n\n>>> coeffs = jnp.array([0, 1, 2])\n\n# The default behavior matches numpy and strips leading zeros:\n>>> jnp.roots(coeffs)\nDeviceArray([-2.+0.j], dtype=complex64)\n\n# With strip_zeros=False, extra roots are set to NaN:\n>>> jnp.roots(coeffs, strip_zeros=False)\nDeviceArray([-2. +0.j, nan+nanj], dtype=complex64)\n\nstrip_zeros : bool, default=True\n If set to True, then leading zeros in the coefficients will be stripped, similar\n to :func:`numpy.roots`. If set to False, leading zeros will not be stripped, and\n undefined roots will be represented by NaN values in the function output.\n ``strip_zeros`` must be set to ``False`` for the function to be compatible with\n :func:`jax.jit` and other JAX transformations.\n", "language": "en", "n_whitespaces": 167, "n_words": 167, "vocab_size": 92 }
def _roots_with_zeros(p, num_leading_zeros): # Avoid lapack errors when p is all zero p = _where(len(p) == num_leading_zeros, 1.0, p) # Roll any leading zeros to the end & compute the roots roots = _roots_no_zeros(roll(p, -num_leading_zeros)) # Sort zero roots to the end. roots = lax.sort_key_val(roots == 0, roots)[1] # Set roots associated with num_leading_zeros to NaN return _where(arange(roots.size) < roots.size - num_leading_zeros, roots, complex(np.nan, np.nan)) @_wraps(np.roots, lax_description=, extra_params=)
2,178
12,072
85
jina/orchestrate/flow/base.py
16
9
def port_monitoring(self) -> int: if GATEWAY_NAME in self._deployment_nodes: return self[GATEWAY_NAME].args.port_monitoring else: return self._common_kwargs.get( 'port_monitoring', __default_port_monitoring__ )
feat: expose prometheus metrics (#4526)
port_monitoring
8dc2999a588c46deca60b3f0d5c1b6278a6e165c
jina
base.py
11
10
https://github.com/jina-ai/jina.git
2
37
0
15
62
Python
{ "docstring": "Return if the monitoring is enabled\n .. # noqa: DAR201\n ", "language": "en", "n_whitespaces": 24, "n_words": 10, "vocab_size": 10 }
def port_monitoring(self) -> int: if GATEWAY_NAME in self._deployment_nodes: return self[GATEWAY_NAME].args.port_monitoring else: return self._common_kwargs.get( 'port_monitoring', __default_port_monitoring__ )
51,655
206,720
391
django/utils/module_loading.py
117
18
def autodiscover_modules(*args, **kwargs): from django.apps import apps register_to = kwargs.get("register_to") for app_config in apps.get_app_configs(): for module_to_search in args: # Attempt to import the app's module. try: if register_to: before_import_registry = copy.copy(register_to._registry) import_module("%s.%s" % (app_config.name, module_to_search)) except Exception: # Reset the registry to the state before the last import # as this import will have to reoccur on the next request and # this could raise NotRegistered and AlreadyRegistered # exceptions (see #8245). if register_to: register_to._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have the module in
Refs #33476 -- Reformatted code with Black.
autodiscover_modules
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
module_loading.py
17
14
https://github.com/django/django.git
7
87
0
78
152
Python
{ "docstring": "\n Auto-discover INSTALLED_APPS modules and fail silently when\n not present. This forces an import on them to register any admin bits they\n may want.\n\n You may provide a register_to keyword parameter as a way to access a\n registry. This register_to object must have a _registry instance variable\n to access it.\n ", "language": "en", "n_whitespaces": 71, "n_words": 49, "vocab_size": 40 }
def autodiscover_modules(*args, **kwargs): from django.apps import apps register_to = kwargs.get("register_to") for app_config in apps.get_app_configs(): for module_to_search in args: # Attempt to import the app's module. try: if register_to: before_import_registry = copy.copy(register_to._registry) import_module("%s.%s" % (app_config.name, module_to_search)) except Exception: # Reset the registry to the state before the last import # as this import will have to reoccur on the next request and # this could raise NotRegistered and AlreadyRegistered # exceptions (see #8245). if register_to: register_to._registry = before_import_registry # Decide whether to bubble up this error. If the app just # doesn't have the module in question, we can ignore the error # attempting to import it, otherwise we want it to bubble up. if module_has_submodule(app_config.module, module_to_search): raise
56,137
220,829
13
python3.10.4/Lib/asyncio/tasks.py
7
4
def ensure_future(coro_or_future, *, loop=None): return _ensure_future(coro_or_future, loop=loop)
add python 3.10.4 for windows
ensure_future
8198943edd73a363c266633e1aa5b2a9e9c9f526
XX-Net
tasks.py
8
2
https://github.com/XX-net/XX-Net.git
1
21
0
7
34
Python
{ "docstring": "Wrap a coroutine or an awaitable in a future.\n\n If the argument is a Future, it is returned directly.\n ", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 16 }
def ensure_future(coro_or_future, *, loop=None): return _ensure_future(coro_or_future, loop=loop)
69,846
242,362
101
src/PIL/Image.py
25
9
def getpalette(self, rawmode="RGB"): self.load() try: mode = self.im.getpalettemode() except ValueError: return None # no palette if rawmode is None: rawmode = mode
Allow rawmode None to return the palette in the current mode
getpalette
6be87277f71948bc7e4b945c46660cac3e5ce919
Pillow
Image.py
11
9
https://github.com/python-pillow/Pillow.git
3
53
0
21
91
Python
{ "docstring": "\n Returns the image palette as a list.\n\n :param rawmode: The mode in which to return the palette. ``None`` will\n return the palette in its current mode.\n :returns: A list of color values [r, g, b, ...], or None if the\n image has no palette.\n ", "language": "en", "n_whitespaces": 93, "n_words": 44, "vocab_size": 36 }
def getpalette(self, rawmode="RGB"): self.load() try: mode = self.im.getpalettemode() except ValueError: return None # no palette if rawmode is None: rawmode = mode return list(self.im.getpalette(mode, rawmode))
2,630
13,404
33
jina/types/request/data.py
8
6
def last_executor(self): if len(self.proto_wo
feat: pass `docs_map` to Executor (#5366)
last_executor
ad96553b064b9c17d626f6fcb78e4a45987be2c3
jina
data.py
11
3
https://github.com/jina-ai/jina.git
2
30
0
8
50
Python
{ "docstring": "\n Returns the name of the last Executor that has processed this Request\n\n :return: the name of the last Executor that processed this Request\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 12 }
def last_executor(self): if len(self.proto_wo_data.routes) > 0: return self.proto_wo_data.routes[-1].executor
81,689
276,584
256
keras/tests/temporal_sample_weights_correctness_test.py
58
18
def custom_generator_multi_io_temporal(self, sample_weights=None): batch_size = 3 num_samples = 3 iteration = 0 while True: batch_index = iteration * batch_size % num_samples iteration += 1 start = batch_index end = start + batch_size x = [self.x[start:end], self.x[start:end]] y = [self.y1[start:end], self.y2[start:end]] if sample_weights: sw = tf.nest.map_structure( lambda w: w[start:end], sample_weights ) else:
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
custom_generator_multi_io_temporal
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
temporal_sample_weights_correctness_test.py
15
18
https://github.com/keras-team/keras.git
3
116
0
39
180
Python
{ "docstring": "Generator for getting data for temporal multi io model.\n\n Args:\n sample_weights: List of sample_weights.\n\n Yields:\n Tuple of inputs, label, sample weights data.\n ", "language": "en", "n_whitespaces": 61, "n_words": 22, "vocab_size": 20 }
def custom_generator_multi_io_temporal(self, sample_weights=None): batch_size = 3 num_samples = 3 iteration = 0 while True: batch_index = iteration * batch_size % num_samples iteration += 1 start = batch_index end = start + batch_size x = [self.x[start:end], self.x[start:end]] y = [self.y1[start:end], self.y2[start:end]] if sample_weights: sw = tf.nest.map_structure( lambda w: w[start:end], sample_weights ) else: sw = None yield x, y, sw
78,920
267,484
379
test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py
61
18
def _just_docs(self): try: for child in self.ast.body: if not isinstance(child, as
ansible-test - Allow docstring in docs-only module
_just_docs
5b3557f8ba5c176eb7d2de21b3a4da3dcab3bada
ansible
main.py
17
16
https://github.com/ansible/ansible.git
11
107
0
45
171
Python
{ "docstring": "Module can contain just docs and from __future__ boilerplate\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
def _just_docs(self): try: for child in self.ast.body: if not isinstance(child, ast.Assign): # allow string constant expressions (these are docstrings) if isinstance(child, ast.Expr) and isinstance(child.value, ast.Constant) and isinstance(child.value.value, str): continue # allowed from __future__ imports if isinstance(child, ast.ImportFrom) and child.module == '__future__': for future_import in child.names: if future_import.name not in self.ACCEPTLIST_FUTURE_IMPORTS: break else: continue return False return True except AttributeError: return False
17,587
83,043
40
zerver/openapi/openapi.py
12
5
def response_validator(self) -> RequestValidator: self.check_reload() assert s
openapi: Use openapi_core ResponseValidator to validate responses. Signed-off-by: Anders Kaseorg <[email protected]>
response_validator
031f4596ab1737a237b1c099e792fe627a937ff7
zulip
openapi.py
7
8
https://github.com/zulip/zulip.git
1
24
0
11
41
Python
{ "docstring": "Reload the OpenAPI file if it has been modified after the last time\n it was read, and then return the openapi_core validator object. Similar\n to preceding functions. Used for proper access to OpenAPI objects.\n ", "language": "en", "n_whitespaces": 55, "n_words": 34, "vocab_size": 29 }
def response_validator(self) -> RequestValidator: self.check_reload() assert self._response_validator is not None return self._response_validator
@_wraps(np.polyfit, lax_description=_POLYFIT_DOC) @partial(jit, static_argnames=('deg', 'rcond', 'full', 'cov'))
26,694
119,828
164
jax/_src/numpy/polynomial.py
116
25
def roots(p, *, strip_zeros=True): # ported from https://github.com/numpy/numpy/blob/v1.17.0/numpy/lib/polynomial.py#L168-L251 p = atleast_1d(p) if p.ndim != 1: raise Valu
lax_numpy: move poly functions into numpy.polynomial
roots
603bb3c5ca288674579211e64fa47c6b2b0fb7a6
jax
polynomial.py
15
20
https://github.com/google/jax.git
6
133
1
74
274
Python
{ "docstring": "\\\nUnlike NumPy's implementation of polyfit, :py:func:`jax.numpy.polyfit` will not warn on rank reduction, which indicates an ill conditioned matrix\nAlso, it works best on rcond <= 10e-3 values.\n", "language": "en", "n_whitespaces": 25, "n_words": 28, "vocab_size": 27 }
def roots(p, *, strip_zeros=True): # ported from https://github.com/numpy/numpy/blob/v1.17.0/numpy/lib/polynomial.py#L168-L251 p = atleast_1d(p) if p.ndim != 1: raise ValueError("Input must be a rank-1 array.") # strip_zeros=False is unsafe because leading zeros aren't removed if not strip_zeros: if p.size > 1: return _roots_no_zeros(p) else: return array([]) if all(p == 0): return array([]) # factor out trivial roots start, end = _nonzero_range(p) # number of trailing zeros = number of roots at 0 trailing_zeros = p.size - end # strip leading and trailing zeros p = p[start:end] if p.size < 2: return zeros(trailing_zeros, p.dtype) else: roots = _roots_no_zeros(p) # combine roots and zero roots roots = hstack((roots, zeros(trailing_zeros, p.dtype))) return roots _POLYFIT_DOC = @_wraps(np.polyfit, lax_description=_POLYFIT_DOC) @partial(jit, static_argnames=('deg', 'rcond', 'full', 'cov'))
28,957
129,465
31
python/ray/tune/trainable.py
10
10
def _storage_path(self, local_path): rel_local_path =
[tune] only sync up and sync down checkpoint folder for cloud checkpoint. (#21658) By default, ~/ray_results/exp_name/trial_name/checkpoint_name. Instead of the whole trial checkpoint (~/ray_results/exp_name/trial_name/) directory. Stuff like progress.csv, result.json, params.pkl, params.json, events.out etc are coming from driver process. This could also enable us to de-couple sync up and delete - they don't have to wait for each other to finish.
_storage_path
0abcd5eea529fc84c4398620f2808087e4d8c6b6
ray
trainable.py
9
3
https://github.com/ray-project/ray.git
1
35
0
10
55
Python
{ "docstring": "Converts a `local_path` to be based off of\n `self.remote_checkpoint_dir`.", "language": "en", "n_whitespaces": 15, "n_words": 9, "vocab_size": 9 }
def _storage_path(self, local_path): rel_local_path = os.path.relpath(local_path, self.logdir) return os.path.join(self.remote_checkpoint_dir, rel_local_path)
1,588
9,373
22
reconstruction/ostec/external/stylegan2/dnnlib/submission/run_context.py
8
5
def get_time_since_last_update(self) -> float: return time.time() - self.last_upda
initialize ostec
get_time_since_last_update
7375ee364e0df2a417f92593e09557f1b2a3575a
insightface
run_context.py
8
3
https://github.com/deepinsight/insightface.git
1
18
0
8
32
Python
{ "docstring": "How much time has passed since the last call to update.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
def get_time_since_last_update(self) -> float: return time.time() - self.last_update_time
1,643
9,622
1,136
reconstruction/ostec/external/graphonomy/FaceHairMask/deeplab_xception.py
192
19
def train_fixbn(self, mode=True, freeze_bn=True, freeze_bn_affine=False): r super(DeepLabv3_plus, self).train(mode) if freeze_bn: print("Freezing Mean/Var of BatchNorm2D.") if freeze_bn_affine: print("Freezing Weight/Bias of BatchNorm2D.") if freeze_bn: for m in self.xception_features.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() if freeze_bn_affine: m.weight.requires_grad = False m.bias.requires_grad = False # for m in self.aspp1.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.aspp2.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.aspp3.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.aspp4.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.global_avg_pool.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.
Graphonomy Face/Hair Segmentation added
train_fixbn
2e5d23ee0e7fc1fdd7ad2e615fd651655aeb0f5b
insightface
deeplab_xception.py
16
23
https://github.com/deepinsight/insightface.git
7
90
0
35
188
Python
{ "docstring": "Sets the module in training mode.\n\n This has any effect only on certain modules. See documentations of\n particular modules for details of their behaviors in training/evaluation\n mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,\n etc.\n\n Returns:\n Module: self\n ", "language": "en", "n_whitespaces": 91, "n_words": 38, "vocab_size": 36 }
def train_fixbn(self, mode=True, freeze_bn=True, freeze_bn_affine=False): r super(DeepLabv3_plus, self).train(mode) if freeze_bn: print("Freezing Mean/Var of BatchNorm2D.") if freeze_bn_affine: print("Freezing Weight/Bias of BatchNorm2D.") if freeze_bn: for m in self.xception_features.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() if freeze_bn_affine: m.weight.requires_grad = False m.bias.requires_grad = False # for m in self.aspp1.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.aspp2.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.aspp3.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.aspp4.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.global_avg_pool.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.concat_projection_bn1.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False # for m in self.feature_projection_bn1.modules(): # if isinstance(m, nn.BatchNorm2d): # m.eval() # if freeze_bn_affine: # m.weight.requires_grad = False # m.bias.requires_grad = False
70,831
245,586
91
tests/test_models/test_backbones/test_resnet.py
34
14
def assert_params_all_zeros(module) -> bool: weight_data = module.weight.data is_weight_zero = weight_data.allclose( weight_data.new_zeros(weight_data.size())) if hasattr(module, 'bias') and module.bias is not None: bias_data = module.bias.data is_bias_zero = bias_data.allclose( bias_data.new_zeros(bias_data.size())) else: is_bias_zero = True ret
[Fix] Fix UT and remove delete mmcv ops. (#8623) * Remove get_root_logger * Fix UT * Update
assert_params_all_zeros
73a12e6508d4ba0331b84b1313027a511ba26fe3
mmdetection
test_resnet.py
14
19
https://github.com/open-mmlab/mmdetection.git
4
80
0
26
133
Python
{ "docstring": "Check if the parameters of the module is all zeros.\n\n Args:\n module (nn.Module): The module to be checked.\n\n Returns:\n bool: Whether the parameters of the module is all zeros.\n ", "language": "en", "n_whitespaces": 52, "n_words": 29, "vocab_size": 18 }
def assert_params_all_zeros(module) -> bool: weight_data = module.weight.data is_weight_zero = weight_data.allclose( weight_data.new_zeros(weight_data.size())) if hasattr(module, 'bias') and module.bias is not None: bias_data = module.bias.data is_bias_zero = bias_data.allclose( bias_data.new_zeros(bias_data.size())) else: is_bias_zero = True return is_weight_zero and is_bias_zero
52,741
209,587
188
scapy/contrib/automotive/scanner/executor.py
40
16
def cleanup_state(self): # type: () -> None for f in self.cleanup_functions: if not callable(f): continue try: if not f(self.socket, self.configuration): log_automotive.info(
Add Automotive Logger for all debug outputs of the automotive layer
cleanup_state
495b21f2867e48286767085c8cf2918e4092e9dc
scapy
executor.py
15
11
https://github.com/secdev/scapy.git
5
73
0
38
123
Python
{ "docstring": "\n Executes all collected cleanup functions from a traversed path\n :return: None\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 11 }
def cleanup_state(self): # type: () -> None for f in self.cleanup_functions: if not callable(f): continue try: if not f(self.socket, self.configuration): log_automotive.info( "Cleanup function %s failed", repr(f)) except (OSError, ValueError, Scapy_Exception) as e: log_automotive.critical("Exception during cleanup: %s", e) self.cleanup_functions = list()
15,998
73,261
226
wagtail/contrib/modeladmin/tests/test_simple_modeladmin.py
55
17
def test_model_with_two_tabbed_panels_only(self): Publisher.settings_panels = [FieldPanel("name")] Publisher.promote_panels = [FieldPanel("headquartered_in")] warning_1 = checks.Warning( "Publisher.promo
Reformat with black
test_model_with_two_tabbed_panels_only
d10f15e55806c6944827d801cd9c2d53f5da4186
wagtail
test_simple_modeladmin.py
10
26
https://github.com/wagtail/wagtail.git
1
102
0
37
178
Python
{ "docstring": "Ensure that Publisher uses `panels` instead of `promote_panels`\\\nor set up an `edit_handler` if you want a tabbed editing interface.\nThere are no default tabs on non-Page models so there will be no\\\n Promote tab for the promote_panels to render in.Ensure that Publisher uses `panels` instead of `settings_panels`\\\nor set up an `edit_handler` if you want a tabbed editing interface.\nThere are no default tabs on non-Page models so there will be no\\\n Settings tab for the settings_panels to render in.", "language": "en", "n_whitespaces": 76, "n_words": 81, "vocab_size": 45 }
def test_model_with_two_tabbed_panels_only(self): Publisher.settings_panels = [FieldPanel("name")] Publisher.promote_panels = [FieldPanel("headquartered_in")] warning_1 = checks.Warning( "Publisher.promote_panels will have no effect on modeladmin editing", hint=, obj=Publisher, id="wagtailadmin.W002", ) warning_2 = checks.Warning( "Publisher.settings_panels will have no effect on modeladmin editing", hint=, obj=Publisher, id="wagtailadmin.W002", ) checks_results = self.get_checks_result() self.assertIn(warning_1, checks_results) self.assertIn(warning_2, checks_results) # clean up for future checks delattr(Publisher, "settings_panels") delattr(Publisher, "promote_panels")
81,181
274,158
25
keras/layers/serialization.py
9
7
def get_builtin_layer(class_name): if not hasattr(LOCAL, "ALL_OBJECTS"): populate_deserializable_objects() return L
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
get_builtin_layer
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
serialization.py
9
4
https://github.com/keras-team/keras.git
2
27
0
9
49
Python
{ "docstring": "Returns class if `class_name` is registered, else returns None.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def get_builtin_layer(class_name): if not hasattr(LOCAL, "ALL_OBJECTS"): populate_deserializable_objects() return LOCAL.ALL_OBJECTS.get(class_name)
47,617
196,117
491
sympy/combinatorics/perm_groups.py
90
28
def abelian_invariants(self): if self.is_trivial: return [] gns = self.generators inv = [] G = self H = G.derived_subgroup() Hgens = H.generators for p in primefactors(G.order()): ranks = [] while True: pows = [] for g in gns:
Updated import locations
abelian_invariants
498015021131af4dbb07eb110e5badaba8250c7b
sympy
perm_groups.py
16
31
https://github.com/sympy/sympy.git
11
181
0
53
300
Python
{ "docstring": "\n Returns the abelian invariants for the given group.\n Let ``G`` be a nontrivial finite abelian group. Then G is isomorphic to\n the direct product of finitely many nontrivial cyclic groups of\n prime-power order.\n\n Explanation\n ===========\n\n The prime-powers that occur as the orders of the factors are uniquely\n determined by G. More precisely, the primes that occur in the orders of the\n factors in any such decomposition of ``G`` are exactly the primes that divide\n ``|G|`` and for any such prime ``p``, if the orders of the factors that are\n p-groups in one such decomposition of ``G`` are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``,\n then the orders of the factors that are p-groups in any such decomposition of ``G``\n are ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``.\n\n The uniquely determined integers ``p^{t_1} >= p^{t_2} >= ... p^{t_r}``, taken\n for all primes that divide ``|G|`` are called the invariants of the nontrivial\n group ``G`` as suggested in ([14], p. 542).\n\n Notes\n =====\n\n We adopt the convention that the invariants of a trivial group are [].\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation, PermutationGroup\n >>> a = Permutation([0, 2, 1])\n >>> b = Permutation([1, 0, 2])\n >>> G = PermutationGroup([a, b])\n >>> G.abelian_invariants()\n [2]\n >>> from sympy.combinatorics import CyclicGroup\n >>> G = CyclicGroup(7)\n >>> G.abelian_invariants()\n [7]\n\n ", "language": "en", "n_whitespaces": 437, "n_words": 212, "vocab_size": 103 }
def abelian_invariants(self): if self.is_trivial: return [] gns = self.generators inv = [] G = self H = G.derived_subgroup() Hgens = H.generators for p in primefactors(G.order()): ranks = [] while True: pows = [] for g in gns: elm = g**p if not H.contains(elm): pows.append(elm) K = PermutationGroup(Hgens + pows) if pows else H r = G.order()//K.order() G = K gns = pows if r == 1: break ranks.append(multiplicity(p, r)) if ranks: pows = [1]*ranks[0] for i in ranks: for j in range(0, i): pows[j] = pows[j]*p inv.extend(pows) inv.sort() return inv
80,751
271,364
652
keras/engine/functional_utils.py
292
35
def clone_graph_nodes(inputs, outputs): nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs) cloned_inputs = [] cloned_outputs = [] # We not only need to create copies of Nodes (mimic the calls), also need to # clone keras_tensors to avoid the override of _keras_history attached on the # keras_tensor. The following dict is used to track any keras tensor we cloned # The key is the string ID of the original keras tensor, and value is the # cloned keras_tensor instance. kt_id_mapping = {} for kt_input in tf.nest.flatten(inputs): if kt_input.
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
clone_graph_nodes
84afc5193d38057e2e2badf9c889ea87d80d8fbf
keras
functional_utils.py
14
34
https://github.com/keras-team/keras.git
5
221
0
161
364
Python
{ "docstring": "Clone the `Node` between the inputs and output tensors.\n\n This function is used to create a new functional model from any intermediate\n keras tensors. The clone of the nodes mimic the behavior of reconstructing the\n functional graph network by re-executing all the __call__ methods. The cloned\n nodes will be appended to the layers.\n\n Note that a new tf.keras.Inputs will be created for any items in the `inputs`\n\n Args:\n inputs: A nested structure of keras_tensors.\n outputs: A nested structure of keras_tensors.\n\n Returns:\n A pair of inputs and outputs, with cloned keras_tensors. They can be used to\n create a new functional model.\n ", "language": "en", "n_whitespaces": 144, "n_words": 100, "vocab_size": 63 }
def clone_graph_nodes(inputs, outputs): nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs) cloned_inputs = [] cloned_outputs = [] # We not only need to create copies of Nodes (mimic the calls), also need to # clone keras_tensors to avoid the override of _keras_history attached on the # keras_tensor. The following dict is used to track any keras tensor we cloned # The key is the string ID of the original keras tensor, and value is the # cloned keras_tensor instance. kt_id_mapping = {} for kt_input in tf.nest.flatten(inputs): if kt_input.node.is_input: # For any existing keras_tensor from tf.keras.Input, we leave them as is. cloned_inputs.append(kt_input) kt_id_mapping[id(kt_input)] = kt_input else: # We need to create a new tf.keras.Input for any intermediate keras_tensor cpy = _clone_keras_tensor(kt_input) cloned_input = input_layer_module.Input(tensor=cpy) cloned_inputs.append(cloned_input) kt_id_mapping[id(kt_input)] = cloned_input cloned_inputs = tf.nest.pack_sequence_as(inputs, cloned_inputs) for kt_output in tf.nest.flatten(outputs): cpy = _clone_keras_tensor(kt_output) # We reuse the _keras_history here, which contains the old information. It # is used in the Node constructor to check if the tensor "is_keras_tensor()" # The history will be override by the Node constructor anyway for the # corresponding layer output anyway. cpy._keras_history = ( kt_output._keras_history ) # pylint: disable=protected-access cloned_outputs.append(cpy) kt_id_mapping[id(kt_output)] = cpy cloned_outputs = tf.nest.pack_sequence_as(outputs, cloned_outputs) for node in nodes_to_clone: # Clone any keras_tensors to avoid override of _keras_history # Or reuse an existing keras_tensor if it has already been cloned. output_copy = clone_keras_tensors(node.output_tensors, kt_id_mapping) call_args_copy = clone_keras_tensors(node.call_args, kt_id_mapping) call_kwargs_copy = clone_keras_tensors(node.call_kwargs, kt_id_mapping) # Creating new nodes based on the existing node information. # Node wires itself to inbound and outbound layers. # The Node constructor actually updates this layer's self._inbound_nodes, # sets _keras_history on the outputs, and adds itself to the # `_outbound_nodes` of the layers that produced the inputs to this # layer call. node_module.Node( node.layer, call_args=call_args_copy, call_kwargs=call_kwargs_copy, outputs=output_copy, ) return cloned_inputs, cloned_outputs
49,276
199,471
83
sympy/physics/mechanics/rigidbody.py
26
14
def parallel_axis(self, point, frame=None): # circular import issue from sympy.physics.mechanics.functions import inertia_of_point_mass if frame is None: frame = self.frame return self.central_inertia.express(frame) + inertia_of_point_mass( sel
Add optional frame argument to parallel axis method
parallel_axis
801e149d69d5f88919a735f8b55b6024f97c6950
sympy
rigidbody.py
11
6
https://github.com/sympy/sympy.git
2
59
0
24
90
Python
{ "docstring": "Returns the inertia dyadic of the body with respect to another\n point.\n\n Parameters\n ==========\n\n point : sympy.physics.vector.Point\n The point to express the inertia dyadic about.\n frame : sympy.physics.vector.ReferenceFrame\n The reference frame used to construct the dyadic.\n\n Returns\n =======\n\n inertia : sympy.physics.vector.Dyadic\n The inertia dyadic of the rigid body expressed about the provided\n point.\n\n ", "language": "en", "n_whitespaces": 160, "n_words": 53, "vocab_size": 31 }
def parallel_axis(self, point, frame=None): # circular import issue from sympy.physics.mechanics.functions import inertia_of_point_mass if frame is None: frame = self.frame return self.central_inertia.express(frame) + inertia_of_point_mass( self.mass, self.masscenter.pos_from(point), frame)
75,568
259,103
38
sklearn/utils/tests/test_class_weight.py
26
5
def test_class_weight_does_not_contains_more_classses(): tree = DecisionTreeClassifier(class_weight={0: 1, 1: 10, 2: 20}) # Does not raise tree.fit([[0, 0, 1], [1, 0, 1], [1, 2, 0]], [0, 0, 1])
FIX Support extra class_weights in compute_class_weight (#22595)
test_class_weight_does_not_contains_more_classses
3605c140af992b6ac52f04f1689c58509cc0b5b2
scikit-learn
test_class_weight.py
11
3
https://github.com/scikit-learn/scikit-learn.git
1
63
0
22
89
Python
{ "docstring": "Check that class_weight can contain more labels than in y.\n\n Non-regression test for #22413\n ", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 14 }
def test_class_weight_does_not_contains_more_classses(): tree = DecisionTreeClassifier(class_weight={0: 1, 1: 10, 2: 20}) # Does not raise tree.fit([[0, 0, 1], [1, 0, 1], [1, 2, 0]], [0, 0, 1])
78,532
266,720
429
test/lib/ansible_test/_internal/cli/argparsing/parsers.py
89
28
def parse(self, state): # type: (ParserState) -> str if state.mode == ParserMode.PARSE: path = AnyParser().
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
parse
a06fa496d3f837cca3c437ab6e9858525633d147
ansible
parsers.py
22
21
https://github.com/ansible/ansible.git
9
145
0
65
285
Python
{ "docstring": "Parse the input from the given state and return the result.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 9 }
def parse(self, state): # type: (ParserState) -> str if state.mode == ParserMode.PARSE: path = AnyParser().parse(state) if not os.path.isfile(path): raise ParserError(f'Not a file: {path}') else: path = '' with state.delimit(PATH_DELIMITER, required=False) as boundary: # type: ParserBoundary while boundary.ready: directory = path or '.' try: with os.scandir(directory) as scan: # type: t.Iterator[os.DirEntry] choices = [f'{item.name}{PATH_DELIMITER}' if item.is_dir() else item.name for item in scan] except OSError: choices = [] if not path: choices.append(PATH_DELIMITER) # allow absolute paths choices.append('../') # suggest relative paths part = RelativePathNameParser(choices).parse(state) path += f'{part}{boundary.match or ""}' return path
43,751
182,133
94
src/textual/view.py
31
4
def layout(self) -> Layout: # self.log("I", self._inline_styles) # self.log("C", self._css_styles) # self.log("S", self.styles) assert self.s
implement inline styles
layout
c90cdd4ec8a10c689fee83a6a71e025393dcb38d
textual
view.py
7
7
https://github.com/Textualize/textual.git
1
20
0
21
46
Python
{ "docstring": "Convenience property for accessing ``self.styles.layout``.\n\n Returns: The Layout associated with this view\n Convenience property setter for setting ``view.styles.layout``.\n # Args:\n # new_value:\n\n # Returns:\n # None\n # ", "language": "en", "n_whitespaces": 84, "n_words": 27, "vocab_size": 19 }
def layout(self) -> Layout: # self.log("I", self._inline_styles) # self.log("C", self._css_styles) # self.log("S", self.styles) assert self.styles.layout return self.styles.layout # @layout.setter # def layout(self, new_value: Layout) -> None: # # self.styles.layout = new_value
53,965
215,425
355
salt/transport/rabbitmq.py
64
15
def timeout_message(self, message): future = self.send_future_map.pop(message, None) # In a race condition the message might have been sent by the time # we're timing it out. Make sure the future is not None if future is not None: del self.send_timeout_map[message] if future.attempts < future.tries: future.attempts += 1 log.info( "SaltReqTimeoutError, retrying. (%s/%s)", future.attempts, future.tries, ) self.send( message, timeout=future.timeout, tries=future.tries,
Start to add base class defs
timeout_message
ab4803984bce4a4de7cc10910e7310c4babf557e
salt
rabbitmq.py
15
19
https://github.com/saltstack/salt.git
3
96
0
54
152
Python
{ "docstring": "\n Handle a message timeout by removing it from the sending queue\n and informing the caller\n\n :raises: SaltReqTimeoutError\n ", "language": "en", "n_whitespaces": 46, "n_words": 17, "vocab_size": 16 }
def timeout_message(self, message): future = self.send_future_map.pop(message, None) # In a race condition the message might have been sent by the time # we're timing it out. Make sure the future is not None if future is not None: del self.send_timeout_map[message] if future.attempts < future.tries: future.attempts += 1 log.info( "SaltReqTimeoutError, retrying. (%s/%s)", future.attempts, future.tries, ) self.send( message, timeout=future.timeout, tries=future.tries, future=future, ) else: future.set_exception(SaltReqTimeoutError("Message timed out"))
768
5,410
467
airbyte-integrations/connectors/source-freshdesk/unit_tests/test_300_page.py
152
22
def test_not_all_records(self, requests_mock, authenticator, config, responses): expected_output = [ {"id": 1, "updated_at": "2018-01-02T00:00:00Z"}, {"id": 2, "updated_at": "2018-02-02T00:00:00Z"}, {"id": 2, "updated_at": "2018-02-02T00:00:00Z"}, # duplicate {"id": 3, "updated_at": "2018-03-02T00:00:00Z"}, {"id": 3, "updated_at": "2018-03-02T00:00:00Z"}, # duplicate {"id": 4, "updated_at": "2019-01-03T00:00:00Z"}, {"id": 4, "updated_at": "2019-01-03T00:00:00Z"}, # duplicate {"id": 5, "updated_at": "2019-02-03T00:00:00Z"}, {"id": 5, "updated_at": "2019-02-03T00:00:00Z"}, # duplicate {"id": 6, "updated_at": "2019-03-03T00:00:00Z"}, ] # INT value of page number where the switch state should be triggered. # in this test case values from: 1 - 4, assuming we want to switch state on this page. ticket_paginate_limit = 2 # This parameter mocks the "per_page" parameter in the API Ca
🎉 Source Freshdesk: Migrated to latest CDK (#12334)
test_not_all_records
9d1cd42ff9f3118e2312ea9c94ad647f1baaad73
airbyte
test_300_page.py
13
27
https://github.com/airbytehq/airbyte.git
2
201
0
90
360
Python
{ "docstring": "\n TEST 1 - not all records are retrieved\n\n During test1 the tickets_stream changes the state of parameters on page: 2,\n by updating the params:\n `params[\"order_by\"] = \"updated_at\"`\n `params[\"updated_since\"] = last_record`\n continues to fetch records from the source, using new cycle, and so on.\n\n NOTE:\n After switch of the state on ticket_paginate_limit = 2, is this example, we will experience the\n records duplication, because of the last_record state, starting at the point\n where we stoped causes the duplication of the output. The solution for this is to add at least 1 second to the\n last_record state. The DBT normalization should handle this for the end user, so the duplication issue is not a\n blocker in such cases.\n Main pricipal here is: airbyte is at-least-once delivery, but skipping records is data loss.\n ", "language": "en", "n_whitespaces": 229, "n_words": 130, "vocab_size": 90 }
def test_not_all_records(self, requests_mock, authenticator, config, responses): expected_output = [ {"id": 1, "updated_at": "2018-01-02T00:00:00Z"}, {"id": 2, "updated_at": "2018-02-02T00:00:00Z"}, {"id": 2, "updated_at": "2018-02-02T00:00:00Z"}, # duplicate {"id": 3, "updated_at": "2018-03-02T00:00:00Z"}, {"id": 3, "updated_at": "2018-03-02T00:00:00Z"}, # duplicate {"id": 4, "updated_at": "2019-01-03T00:00:00Z"}, {"id": 4, "updated_at": "2019-01-03T00:00:00Z"}, # duplicate {"id": 5, "updated_at": "2019-02-03T00:00:00Z"}, {"id": 5, "updated_at": "2019-02-03T00:00:00Z"}, # duplicate {"id": 6, "updated_at": "2019-03-03T00:00:00Z"}, ] # INT value of page number where the switch state should be triggered. # in this test case values from: 1 - 4, assuming we want to switch state on this page. ticket_paginate_limit = 2 # This parameter mocks the "per_page" parameter in the API Call result_return_limit = 1 # Create test_stream instance. test_stream = Tickets(authenticator=authenticator, config=config) test_stream.ticket_paginate_limit = ticket_paginate_limit test_stream.result_return_limit = result_return_limit # Mocking Request for response in responses: requests_mock.register_uri( "GET", response["url"], json=response["json"], headers=response.get("headers", {}), ) records = list(test_stream.read_records(sync_mode=SyncMode.full_refresh)) # We're expecting 6 records to return from the tickets_stream assert records == expected_output
50,844
204,704
29
django/core/management/commands/test.py
8
6
def run_from_argv(self, argv): s
Refs #33476 -- Reformatted code with Black.
run_from_argv
9c19aff7c7561e3a82978a272ecdaad40dda5c00
django
test.py
9
3
https://github.com/django/django.git
1
26
0
8
46
Python
{ "docstring": "\n Pre-parse the command line to extract the value of the --testrunner\n option. This allows a test runner to define additional command line\n arguments.\n ", "language": "en", "n_whitespaces": 52, "n_words": 23, "vocab_size": 18 }
def run_from_argv(self, argv): self.test_runner = get_command_line_option(argv, "--testrunner") super().run_from_argv(argv)
18,999
93,612
346
tests/sentry/integrations/slack/notifications/test_new_processing_issues.py
57
20
def test_new_processing_issue(self, mock_func): notification = NewProcessingIssuesActivityNotification( Activity( project=self.project, user=self.user, type=ActivityType.NEW_PROCESSING_ISSUES, data={ "issues": get_issues_data(), "reprocessing_active": True, },
fix(slack): Fix broken url formatting (#36976) Fix the URL format, it should be `<url|text>`.
test_new_processing_issue
e4c6ad69c22692e2999baa26d8bf8f44947cd1c1
sentry
test_new_processing_issues.py
15
27
https://github.com/getsentry/sentry.git
1
95
0
45
208
Python
{ "docstring": "\n Test that a Slack message is sent with the expected payload when an issue is held back in reprocessing\n ", "language": "en", "n_whitespaces": 34, "n_words": 19, "vocab_size": 18 }
def test_new_processing_issue(self, mock_func): notification = NewProcessingIssuesActivityNotification( Activity( project=self.project, user=self.user, type=ActivityType.NEW_PROCESSING_ISSUES, data={ "issues": get_issues_data(), "reprocessing_active": True, }, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() assert ( text == f"Processing issues on <http://testserver/settings/{self.organization.slug}/projects/{self.project.slug}/processing-issues/|{self.project.slug}>" ) assert ( attachment["text"] == f"Some events failed to process in your project {self.project.slug}" ) assert ( attachment["footer"] == f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=new_processing_issues_activity-slack-user|Notification Settings>" )
37,541
158,401
20
d2l/torch.py
11
9
def load_array(data_arrays, batch_size, is_train=True): dataset = data.TensorDataset(*data_arrays)
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
load_array
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
d2l-zh
torch.py
9
3
https://github.com/d2l-ai/d2l-zh.git
1
34
0
10
53
Python
{ "docstring": "Construct a PyTorch data iterator.\n\n Defined in :numref:`sec_linear_concise`", "language": "en", "n_whitespaces": 10, "n_words": 8, "vocab_size": 8 }
def load_array(data_arrays, batch_size, is_train=True): dataset = data.TensorDataset(*data_arrays) return data.DataLoader(dataset, batch_size, shuffle=is_train)
4,172
22,092
56
pipenv/patched/pip/_vendor/requests/models.py
13
7
def is_permanent_redirect(self):
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
is_permanent_redirect
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
pipenv
models.py
9
5
https://github.com/pypa/pipenv.git
2
27
0
12
45
Python
{ "docstring": "True if this Response one of the permanent versions of redirect.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
def is_permanent_redirect(self): return "location" in self.headers and self.status_code in ( codes.moved_permanently, codes.permanent_redirect, )
82,535
278,431
329
keras/utils/generic_utils.py
102
7
def _estimate_step_duration(self, current, now): if current: # there are a few special scenarios here: # 1) somebody is calling the progress bar without ever supplying # step 1 # 2) somebody is calling the p
resolve line-too-long in utils
_estimate_step_duration
80ee2fa4e1db2dda14370110830db82be3eb97b7
keras
generic_utils.py
15
13
https://github.com/keras-team/keras.git
5
69
0
68
116
Python
{ "docstring": "Estimate the duration of a single step.\n\n Given the step number `current` and the corresponding time `now` this\n function returns an estimate for how long a single step takes. If this\n is called before one step has been completed (i.e. `current == 0`) then\n zero is given as an estimate. The duration estimate ignores the duration\n of the (assumed to be non-representative) first step for estimates when\n more steps are available (i.e. `current>1`).\n\n Args:\n current: Index of current step.\n now: The current time.\n\n Returns: Estimate of the duration of a single step.\n ", "language": "en", "n_whitespaces": 173, "n_words": 92, "vocab_size": 62 }
def _estimate_step_duration(self, current, now): if current: # there are a few special scenarios here: # 1) somebody is calling the progress bar without ever supplying # step 1 # 2) somebody is calling the progress bar and supplies step one # multiple times, e.g. as part of a finalizing call # in these cases, we just fall back to the simple calculation if self._time_after_first_step is not None and current > 1: time_per_unit = (now - self._time_after_first_step) / ( current - 1 ) else: time_per_unit = (now - self._start) / current if current == 1: self._time_after_first_step = now return time_per_unit else: return 0
35,202
152,956
91
modin/core/dataframe/pandas/dataframe/dataframe.py
34
10
def _propagate_index_objs(self, axis=None): self._filter_empties() if axis is None or axis == 0: cum_row_lengths = np.cumsum([0] + self._row_lengths) if axis is None or axis == 1: cum_col_widths = np.cumsum([0] + self._column
FEAT-#3111: Ensure relabeling Modin Frame does not lose partition shape (#3662) Co-authored-by: Devin Petersohn <[email protected]> Signed-off-by: Naren Krishna <[email protected]>
_propagate_index_objs
3c740dbfcdd69ddc3ab45a42be996e5c61104342
modin
dataframe.py
12
64
https://github.com/modin-project/modin.git
15
373
0
20
107
Python
{ "docstring": "\n Synchronize labels by applying the index object for specific `axis` to the `self._partitions` lazily.\n\n Adds `set_axis` function to call-queue of each partition from `self._partitions`\n to apply new axis.\n\n Parameters\n ----------\n axis : int, default: None\n The axis to apply to. If it's None applies to both axes.\n ", "language": "en", "n_whitespaces": 108, "n_words": 47, "vocab_size": 38 }
def _propagate_index_objs(self, axis=None): self._filter_empties() if axis is None or axis == 0: cum_row_lengths = np.cumsum([0] + self._row_lengths) if axis is None or axis == 1: cum_col_widths = np.cumsum([0] + self._column_widths) if axis is None:
71,784
247,618
1,107
tests/handlers/test_e2e_keys.py
114
23
def test_query_devices_remote_no_sync(self) -> None: remote_user_id = "@test:other" local_user_id = "@test:test" remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" self.hs.get_federation_client().query_client_keys
Add type hints to some tests/handlers files. (#12224)
test_query_devices_remote_no_sync
5dd949bee6158a8b651db9f2ae417a62c8184bfd
synapse
test_e2e_keys.py
21
66
https://github.com/matrix-org/synapse.git
1
244
0
52
423
Python
{ "docstring": "Tests that querying keys for a remote user that we don't share a room\n with returns the cross signing keys correctly.\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 18 }
def test_query_devices_remote_no_sync(self) -> None: remote_user_id = "@test:other" local_user_id = "@test:test" remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" self.hs.get_federation_client().query_client_keys = mock.Mock( return_value=defer.succeed( { "device_keys": {remote_user_id: {}}, "master_keys": { remote_user_id: { "user_id": remote_user_id, "usage": ["master"], "keys": {"ed25519:" + remote_master_key: remote_master_key}, }, }, "self_signing_keys": { remote_user_id: { "user_id": remote_user_id, "usage": ["self_signing"], "keys": { "ed25519:" + remote_self_signing_key: remote_self_signing_key }, } }, } ) ) e2e_handler = self.hs.get_e2e_keys_handler() query_result = self.get_success( e2e_handler.query_devices( { "device_keys": {remote_user_id: []}, }, timeout=10, from_user_id=local_user_id, from_device_id="some_device_id", ) ) self.assertEqual(query_result["failures"], {}) self.assertEqual( query_result["master_keys"], { remote_user_id: { "user_id": remote_user_id, "usage": ["master"], "keys": {"ed25519:" + remote_master_key: remote_master_key}, }, }, ) self.assertEqual( query_result["self_signing_keys"], { remote_user_id: { "user_id": remote_user_id, "usage": ["self_signing"], "keys": { "ed25519:" + remote_self_signing_key: remote_self_signing_key }, } }, )
48,956
198,477
38
sympy/core/mul.py
17
6
def _matches_get_other_nodes(dictionary, nodes, node_ind): ind_node = nodes[node_ind] return [ind for ind in dictionary if nodes[ind] == ind_node]
Code cleanup
_matches_get_other_nodes
9d58006fc0a23afcba38f641c9472917c436428a
sympy
mul.py
9
3
https://github.com/sympy/sympy.git
3
31
0
17
47
Python
{ "docstring": "Find other wildcards that may have already been matched.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
def _matches_get_other_nodes(dictionary, nodes, node_ind): ind_node = nodes[node_ind] return [ind for ind in dictionary if nodes[ind] == ind_node]
6,372
35,025
121
src/transformers/processing_utils.py
37
11
def save_pretrained(self, save_directory): for attribute_name in self.attributes: attribute = getattr(self, attribute_name) # Include the processor class in the attribute config so this processor can then be reloaded with the # `AutoProcessor` API. if hasattr(attribute, "_set_processor_class"):
PoC for a ProcessorMixin class (#15549) * PoC for a ProcessorMixin class * Documentation * Apply suggestions from code review Co-authored-by: NielsRogge <[email protected]> Co-authored-by: Suraj Patil <[email protected]> Co-authored-by: Patrick von Platen <[email protected]> * Roll out to other processors * Add base feature extractor class in init * Use args and kwargs Co-authored-by: NielsRogge <[email protected]> Co-authored-by: Suraj Patil <[email protected]> Co-authored-by: Patrick von Platen <[email protected]>
save_pretrained
b5c6fdecf0cab6ffe22bee2ca5b8474afba0d813
transformers
processing_utils.py
13
6
https://github.com/huggingface/transformers.git
3
47
0
31
79
Python
{ "docstring": "\n Saves the attributes of this processor (feature extractor, tokenizer...) in the specified directory so that it\n can be reloaded using the [`~ProcessorMixin.from_pretrained`] method.\n\n <Tip>\n\n This class method is simply calling [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] and\n [`~tokenization_utils_base.PreTrainedTokenizer.save_pretrained`]. Please refer to the docstrings of the methods\n above for more information.\n\n </Tip>\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the feature extractor JSON file and the tokenizer files will be saved (directory will\n be created if it does not exist).\n ", "language": "en", "n_whitespaces": 179, "n_words": 74, "vocab_size": 62 }
def save_pretrained(self, save_directory): for attribute_name in self.attributes: attribute = getattr(self, attribute_name) # Include the processor class in the attribute config so this processor can then be reloaded with the # `AutoProcessor` API. if hasattr(attribute, "_set_processor_class"): attribute._set_processor_class(self.__class__.__name__) attribute.save_pretrained(save_directory)
5,284
30,033
102
saleor/permission/models.py
24
9
def _user_has_module_perms(user, app_label): for backend in auth.get_backends(): if not hasattr(backend, "has_module_perms"): continue try: if backend.has_module_perms(user, app_label
Move PermissionsMixin from django auth
_user_has_module_perms
d5ef58653803075849a6a13177e7a6e604aa2f60
saleor
models.py
11
10
https://github.com/saleor/saleor.git
5
48
0
19
80
Python
{ "docstring": "Backend can raise `PermissionDenied` to short-circuit permission checking.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
def _user_has_module_perms(user, app_label): for backend in auth.get_backends(): if not hasattr(backend, "has_module_perms"): continue try: if backend.has_module_perms(user, app_label): return True except PermissionDenied: return False return False
@frappe.whitelist()
14,578
67,615
13
erpnext/stock/doctype/delivery_trip/delivery_trip.py
26
11
def get_default_address(out, name): shipping_addresses = frappe.db.sql( , (name), as_dict=1, ) if shipping_addresses: for out.shipping_address in shipping_addresses: if out.shipping_address.is_shipping_address: return out.shipping_address out.shipping_address = shipping_addresses[0] return out.shipping_address @frappe.whitelis
style: format code with black
get_default_address
494bd9ef78313436f0424b918f200dab8fc7c20b
erpnext
delivery_trip.py
12
21
https://github.com/frappe/erpnext.git
4
59
1
19
101
Python
{ "docstring": "\n\t\t\tSELECT parent,\n\t\t\t\t(SELECT is_shipping_address FROM tabAddress a WHERE a.name=dl.parent) AS is_shipping_address\n\t\t\tFROM\n\t\t\t\t`tabDynamic Link` dl\n\t\t\tWHERE\n\t\t\t\tdl.link_doctype=\"Customer\"\n\t\t\t\tAND dl.link_name=%s\n\t\t\t\tAND dl.parenttype = \"Address\"\n\t\t", "language": "en", "n_whitespaces": 15, "n_words": 23, "vocab_size": 19 }
def get_default_address(out, name): shipping_addresses = frappe.db.sql( , (name), as_dict=1, ) if shipping_addresses: for out.shipping_address in shipping_addresses: if out.shipping_address.is_shipping_address: return out.shipping_address out.shipping_address = shipping_addresses[0] return out.shipping_address @frappe.whitelist()
29,007
129,707
56
rllib/utils/__init__.py
30
8
def force_list(elements=None, to_tuple=False): ctor = list if to_tuple is True: ctor = tuple return ctor() if elements is None else ctor(elements) \ if type(el
[RLlib] Make `policies_to_train` more flexible via callable option. (#20735)
force_list
371fbb17e4120f569e0b6c5efde9a00a097f438e
ray
__init__.py
10
6
https://github.com/ray-project/ray.git
4
57
0
24
87
Python
{ "docstring": "\n Makes sure `elements` is returned as a list, whether `elements` is a single\n item, already a list, or a tuple.\n\n Args:\n elements (Optional[any]): The inputs as single item, list, or tuple to\n be converted into a list/tuple. If None, returns empty list/tuple.\n to_tuple (bool): Whether to use tuple (instead of list).\n\n Returns:\n Union[list,tuple]: All given elements in a list/tuple depending on\n `to_tuple`'s value. If elements is None,\n returns an empty list/tuple.\n ", "language": "en", "n_whitespaces": 141, "n_words": 71, "vocab_size": 47 }
def force_list(elements=None, to_tuple=False): ctor = list if to_tuple is True: ctor = tuple return ctor() if elements is None else ctor(elements) \ if type(elements) in [list, set, tuple] else ctor([elements])
36,905
157,365
467
ldm/models/diffusion/dpm_solver/dpm_solver.py
159
17
def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): if order == 3: K = steps // 3 + 1 if steps % 3 == 0: orders = [3, ] * (K - 2) + [2, 1] elif steps % 3 == 1: orders = [3, ] * (K - 1) + [1] else: orders = [3, ] * (K - 1) + [2] elif order == 2: if steps % 2 == 0: K = steps // 2 orders = [2, ] * K else: K = steps // 2 + 1 orders = [2, ] * (K - 1) + [1] elif order == 1: K = 1 orders = [1, ] * steps else: raise ValueError("'order' must be '1' or '2' or '3'.") if skip_type == 'logSNR': # To reproduce the results in DPM-Solver paper timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) else:
release more models
get_orders_and_timesteps_for_singlestep_solver
ca86da3a30c4e080d4db8c25fca73de843663cb4
stablediffusion
dpm_solver.py
18
27
https://github.com/Stability-AI/stablediffusion.git
8
228
0
66
359
Python
{ "docstring": "\n Get the order of each step for sampling by the singlestep DPM-Solver.\n We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as \"DPM-Solver-fast\".\n Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:\n - If order == 1:\n We take `steps` of DPM-Solver-1 (i.e. DDIM).\n - If order == 2:\n - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of DPM-Solver-2.\n - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If order == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.\n ============================================\n Args:\n order: A `int`. The max order for the solver (2 or 3).\n steps: A `int`. The total number of function evaluations (NFE).\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n device: A torch device.\n Returns:\n orders: A list of the solver order of each step.\n ", "language": "en", "n_whitespaces": 634, "n_words": 309, "vocab_size": 125 }
def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): if order == 3: K = steps // 3 + 1 if steps % 3 == 0: orders = [3, ] * (K - 2) + [2, 1] elif steps % 3 == 1: orders = [3, ] * (K - 1) + [1] else: orders = [3, ] * (K - 1) + [2] elif order == 2: if steps % 2 == 0: K = steps // 2 orders = [2, ] * K else: K = steps // 2 + 1 orders = [2, ] * (K - 1) + [1] elif order == 1: K = 1 orders = [1, ] * steps else: raise ValueError("'order' must be '1' or '2' or '3'.") if skip_type == 'logSNR': # To reproduce the results in DPM-Solver paper timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) else: timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[ torch.cumsum(torch.tensor([0, ] + orders)).to(device)] return timesteps_outer, orders
38,693
160,675
247
numpy/core/tests/test_multiarray.py
129
27
def _aligned_zeros(shape, dtype=float, order="C", align=None): dtype = np.dtype(dtype) if dtype == np.dtype(object): # Can't do this, fall back to standard allocation (which # should always be sufficiently aligned) if align is not None: raise ValueError("object array alignment not supported") return np.zeros(shape, dtype=dtype, order=order) if align is None: align = dtype.alignment if not hasattr(shape, '__len__'): shape = (shape,) size = functools.reduce(operator.mul, shape) * dtype.itemsize buf = np.empty(size + 2*align + 1, np.uint8) ptr = buf.__array_interface__['data'][0] offse
MAINT: Simplify element setting and use it for filling This slightly modifies the behaviour of `arr.fill()` to be `arr.fill(scalar)`, i.e. match `arr1d[0] = scalar`, rather than `arr.fill(np.asarray(scalar))`, which subtely different! (Note that object was already special cased to have the scalar logic.) Otherwise, `PyArray_Pack` is now the actual, full featured, "scalar" assignment logic. It is a bit strange due to that quantity/masked array issue, but there is nothing to be done about it. The simplifications in `PyArray_AssignFromCache` should not cause any change in practice, because non 0-D arrays would have been rejected earlier on in that path. (Basically, it does not need the full `PyArray_Pack` logic, but that is fine, I intially split the two, but consolidated them again.)
_aligned_zeros
ac624d012cc0c3f90da4593e7bb8d9d335fa9696
numpy
test_multiarray.py
12
22
https://github.com/numpy/numpy.git
7
204
0
86
323
Python
{ "docstring": "\n Allocate a new ndarray with aligned memory.\n\n The ndarray is guaranteed *not* aligned to twice the requested alignment.\n Eg, if align=4, guarantees it is not aligned to 8. If align=None uses\n dtype.alignment.", "language": "en", "n_whitespaces": 44, "n_words": 32, "vocab_size": 27 }
def _aligned_zeros(shape, dtype=float, order="C", align=None): dtype = np.dtype(dtype) if dtype == np.dtype(object): # Can't do this, fall back to standard allocation (which # should always be sufficiently aligned) if align is not None: raise ValueError("object array alignment not supported") return np.zeros(shape, dtype=dtype, order=order) if align is None: align = dtype.alignment if not hasattr(shape, '__len__'): shape = (shape,) size = functools.reduce(operator.mul, shape) * dtype.itemsize buf = np.empty(size + 2*align + 1, np.uint8) ptr = buf.__array_interface__['data'][0] offset = ptr % align if offset != 0: offset = align - offset if (ptr % (2*align)) == 0: offset += align # Note: slices producing 0-size arrays do not necessarily change # data pointer --- so we use and allocate size+1 buf = buf[offset:offset+size+1][:-1] buf.fill(0) data = np.ndarray(shape, dtype, buf, order=order) return data
@keras_export('keras.utils.array_to_img', 'keras.preprocessing.image.array_to_img')
79,774
268,942
374
keras/preprocessing/image.py
228
36
def smart_resize(x, size, interpolation='bilinear'): if len(size) != 2: raise ValueError('Expected `size` to be a tuple of 2 integers, ' f'but got: {size}.') img = tf.convert_to_tensor(x) if img.shape.rank is not None: if img.shape.rank < 3 or img.shape.rank > 4: raise ValueError( 'Expected an image array with shape `(height, width, channels)`, ' 'or `(batch_size, height, width, channels)`, but ' f'got input with incorrect rank, of shape {img.shape}.') shape = tf.shape(img) height, width = shape[-3], shape[-2] target_height, target_width = size if img.shape.rank is not None: static_num_channels = img.shape[-1] else: static_num_channels = None crop_height = tf.cast( tf.cast(width * target_height, 'float32') / target_width, 'int32') crop_width = tf.cast( tf.cast(height * target_width, 'float32') / target_height, 'int32') # Set back to input height / width if crop_height / crop_width is not smaller. crop_height = tf.minimum(height, crop_height) crop_width = tf.minimum(width, crop_width) crop_box_hstart = tf.cast( tf.cast(height - crop_height, 'float32') / 2, 'int32') crop_box_w
Copy image utils from keras_preprocessing directly into core keras This is not new code, we are just moving these utilities directly into keras from keras-preprocessing. For the library code, just fixed linting errors. For the test code, had to do more major changes to port from pytest, but hopefully any errors have been caught by the tests themselves. PiperOrigin-RevId: 427274651
smart_resize
373ad97c72ed1ac4b6898e85b2cfd7b016e4b469
keras
image.py
15
43
https://github.com/keras-team/keras.git
11
404
1
124
661
Python
{ "docstring": "Resize images to a target size without aspect ratio distortion.\n\n TensorFlow image datasets typically yield images that have each a different\n size. However, these images need to be batched before they can be\n processed by Keras layers. To be batched, images need to share the same height\n and width.\n\n You could simply do:\n\n ```python\n size = (200, 200)\n ds = ds.map(lambda img: tf.image.resize(img, size))\n ```\n\n However, if you do this, you distort the aspect ratio of your images, since\n in general they do not all have the same aspect ratio as `size`. This is\n fine in many cases, but not always (e.g. for GANs this can be a problem).\n\n Note that passing the argument `preserve_aspect_ratio=True` to `resize`\n will preserve the aspect ratio, but at the cost of no longer respecting the\n provided target size. Because `tf.image.resize` doesn't crop images,\n your output images will still have different sizes.\n\n This calls for:\n\n ```python\n size = (200, 200)\n ds = ds.map(lambda img: smart_resize(img, size))\n ```\n\n Your output images will actually be `(200, 200)`, and will not be distorted.\n Instead, the parts of the image that do not fit within the target size\n get cropped out.\n\n The resizing process is:\n\n 1. Take the largest centered crop of the image that has the same aspect ratio\n as the target size. For instance, if `size=(200, 200)` and the input image has\n size `(340, 500)`, we take a crop of `(340, 340)` centered along the width.\n 2. Resize the cropped image to the target size. In the example above,\n we resize the `(340, 340)` crop to `(200, 200)`.\n\n Args:\n x: Input image or batch of images (as a tensor or NumPy array). Must be in\n format `(height, width, channels)` or `(batch_size, height, width,\n channels)`.\n size: Tuple of `(height, width)` integer. Target size.\n interpolation: String, interpolation to use for resizing. Defaults to\n `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`, `area`,\n `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.\n\n Returns:\n Array with shape `(size[0], size[1], channels)`. If the input image was a\n NumPy array, the output is a NumPy array, and if it was a TF tensor,\n the output is a TF tensor.\n ", "language": "en", "n_whitespaces": 419, "n_words": 348, "vocab_size": 194 }
def smart_resize(x, size, interpolation='bilinear'): if len(size) != 2: raise ValueError('Expected `size` to be a tuple of 2 integers, ' f'but got: {size}.') img = tf.convert_to_tensor(x) if img.shape.rank is not None: if img.shape.rank < 3 or img.shape.rank > 4: raise ValueError( 'Expected an image array with shape `(height, width, channels)`, ' 'or `(batch_size, height, width, channels)`, but ' f'got input with incorrect rank, of shape {img.shape}.') shape = tf.shape(img) height, width = shape[-3], shape[-2] target_height, target_width = size if img.shape.rank is not None: static_num_channels = img.shape[-1] else: static_num_channels = None crop_height = tf.cast( tf.cast(width * target_height, 'float32') / target_width, 'int32') crop_width = tf.cast( tf.cast(height * target_width, 'float32') / target_height, 'int32') # Set back to input height / width if crop_height / crop_width is not smaller. crop_height = tf.minimum(height, crop_height) crop_width = tf.minimum(width, crop_width) crop_box_hstart = tf.cast( tf.cast(height - crop_height, 'float32') / 2, 'int32') crop_box_wstart = tf.cast(tf.cast(width - crop_width, 'float32') / 2, 'int32') if img.shape.rank == 4: crop_box_start = tf.stack([0, crop_box_hstart, crop_box_wstart, 0]) crop_box_size = tf.stack([-1, crop_height, crop_width, -1]) else: crop_box_start = tf.stack([crop_box_hstart, crop_box_wstart, 0]) crop_box_size = tf.stack([crop_height, crop_width, -1]) img = tf.slice(img, crop_box_start, crop_box_size) img = tf.image.resize(images=img, size=size, method=interpolation) # Apparent bug in resize_images_v2 may cause shape to be lost if img.shape.rank is not None: if img.shape.rank == 4: img.set_shape((None, None, None, static_num_channels)) if img.shape.rank == 3: img.set_shape((None, None, static_num_channels)) if isinstance(x, np.ndarray): return img.numpy() return img @keras_export('keras.utils.array_to_img', 'keras.preprocessing.image.array_to_img')
120,987
337,266
68
src/accelerate/utils.py
25
13
def gather(tensor): if AcceleratorState().distributed_type == DistributedType.TPU: return _tpu_gather(tensor, name="accelerate.utils.gather") elif AcceleratorState().distribut
Basic fixes for DeepSpeed (#264)
gather
bbccd2c3fbaa93ed5984e22fc8bf66eb13fdb82b
accelerate
utils.py
11
9
https://github.com/huggingface/accelerate.git
4
68
0
18
113
Python
{ "docstring": "\n Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices.\n\n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n The data to gather.\n\n Returns:\n The same data structure as :obj:`tensor` with all tensors sent to the proper device.\n ", "language": "en", "n_whitespaces": 75, "n_words": 37, "vocab_size": 29 }
def gather(tensor): if AcceleratorState().distributed_type == DistributedType.TPU: return _tpu_gather(tensor, name="accelerate.utils.gather") elif AcceleratorState().distributed_type in [DistributedType.DEEPSPEED, DistributedType.MULTI_GPU]: return _gpu_gather(tensor) elif AcceleratorState().distributed_type == DistributedType.MULTI_CPU: return _cpu_gather(tensor) else: return tensor
6,038
32,946
91
src/transformers/trainer_utils.py
45
11
def speed_metrics(split, start_time, num_samples=None, num_steps=None): runtime = time.time() - start_time result = {f"{split}_runtime": round(runtime, 4)} if num_samples is not None: samples_per_second = num_samples / runtime result[f"{split}_samples_per_second"] = round(samples_per_second, 3) if num_steps is not None: steps_per_second = num_steps / runtime result[f"{split}_steps_per_second"] = round(steps_per_second, 3) return result
Fix docstrings with last version of hf-doc-builder styler (#18581) * Fix docstrings with last version of hf-doc-builder styler * Remove empty Parameter block
speed_metrics
c23cbdff4c097d3f3039999827a675cf8f06a32e
transformers
trainer_utils.py
11
10
https://github.com/huggingface/transformers.git
3
86
0
29
145
Python
{ "docstring": "\n Measure and return speed performance metrics.\n\n This function requires a time snapshot `start_time` before the operation to be measured starts and this function\n should be run immediately after the operation to be measured has completed.\n\n Args:\n - split: name to prefix metric (like train, eval, test...)\n - start_time: operation start time\n - num_samples: number of samples processed\n ", "language": "en", "n_whitespaces": 82, "n_words": 57, "vocab_size": 44 }
def speed_metrics(split, start_time, num_samples=None, num_steps=None): runtime = time.time() - start_time result = {f"{split}_runtime": round(runtime, 4)} if num_samples is not None: samples_per_second = num_samples / runtime result[f"{split}_samples_per_second"] = round(samples_per_second, 3) if num_steps is not None: steps_per_second = num_steps / runtime result[f"{split}_steps_per_second"] = round(steps_per_second, 3) return result
40,017
167,440
70
pandas/io/date_converters.py
27
16
def parse_date_fields(year_col, month_col, day_col) -> npt.NDArray[np.object_]: warnings.warn( , # noqa: E501 FutureWarning, stacklevel=find_stack_level(), ) year_col = _maybe_cast(year_col)
TYP: more return annotations for io/* (#47524) * TYP: more return annotations for io/* * import future
parse_date_fields
e48c9c3973286e257f6da1966c91806d86b917e0
pandas
date_converters.py
10
19
https://github.com/pandas-dev/pandas.git
1
63
0
23
100
Python
{ "docstring": "\n Parse columns with years, months and days into a single date column.\n\n .. deprecated:: 1.2\n \n Use pd.to_datetime({\"year\": year_col, \"month\": month_col, \"day\": day_col}) instead to get a Pandas Series.\n Use ser = pd.to_datetime({\"year\": year_col, \"month\": month_col, \"day\": day_col}) and\n np.array([s.to_pydatetime() for s in ser]) instead to get a Numpy array.\n", "language": "en", "n_whitespaces": 80, "n_words": 49, "vocab_size": 36 }
def parse_date_fields(year_col, month_col, day_col) -> npt.NDArray[np.object_]: warnings.warn( , # noqa: E501 FutureWarning, stacklevel=find_stack_level(), ) year_col = _maybe_cast(year_col) month_col = _maybe_cast(month_col) day_col = _maybe_cast(day_col) return parsing.try_parse_year_month_day(year_col, month_col, day_col)